http://www.cnblogs.com/lidabo/archive/2012/07/19/2599745.html http://blog.csdn.net/sdfgh2046/article/details/5830774
#!/usr/bin/python # -*- coding: utf-8 -*-
#-*- coding:utf8 -*- import sys from PIL import Image from PIL import ImageDraw from PIL import ImageFont
line_list = ['龙头摆尾',"新高资改","棋力三段","方可投机"] idx = 0 x=600 y=300
font = ImageFont.truetype('simsun.ttc',36,index=0) im = Image.new("RGBA",(1366,768),(0,0,0)) draw = ImageDraw.Draw(im)
for line in line_list: pos = (x,y+50*idx) print pos draw.text( (x,y+50*idx), unicode(line,'UTF-8'), font=font) idx += 1 del draw im.save(r"d://test.png")
# -*- coding: utf-8 -*- #!/usr/bin/env python ################################################### #Teach wisedom to my machine,please call me Croco# # 定时更改/etc/hosts 实现不修改程序的情况下,使得程序 # curl url 自动解析到内网IP ################################################## from DebugLogger import * Logger.init("updatehost") import os import sys import time import json import random
special_host = "chelun.eclicks.cn" last_data = "" last_line=""
def selectAnotherIp(iplist,last_ip): if last_ip in iplist: iplist.remove(last_ip) if len(iplist) == 0: return "" return random.choice(iplist)
def readWebConfAndGetNewIp(last_ip): file_name = "web.conf" if not os.path.exists(file_name): log("not exits such file:{0}".format(file_name)) sys.exit(0) return "" old_data = "" with open(file_name, "r") as f: old_data = f.read() if not old_data: log("get empty file:{0}".format(file_name)) return "" iplist = [] try: js_obj = json.loads(old_data) for item in js_obj["servers"]: if item["status"] == "online": iplist.append(item["host"]) except: log("read failed from webserver_conf:" + file_name + "data:" + old_data) return "" return selectAnotherIp(iplist,last_ip)
def task(): global last_data global special_host file_name = "hosts.conf" file_bak_name ="hosts2.conf" if not os.path.exists(file_name): log("not exits such file:{0}".format(file_name)) sys.exit(0) return old_data = "" with open(file_name,"r") as f: old_data = f.readlines() if not old_data: log("get empty file:{0}".format(file_name)) return if last_data == old_data: return line_list1=[] line_list2 = [] for line in old_data: line = line.strip() if not line: continue if "#" in line: line_list1.append(line) line_list2.append(line) continue if special_host not in line: line_list1.append(line) line_list2.append(line) continue line_list1.append(line) arr= line.split(" ") if len(arr) != 2: log("error line:{0}".format(line)) sys.exit(1) return ipaddr = arr[0] newipaddr = readWebConfAndGetNewIp(ipaddr) if not newipaddr: log("not need to change") return line = newipaddr+" "+special_host line_list2.append(line) log("change ipaddr:{0} -> newipaddr:{1}".format(ipaddr,newipaddr))
if os.path.exists(file_bak_name): os.remove(file_bak_name) with open(file_bak_name,"w") as f: for line in line_list1: f.write(line) f.write("\n") os.remove(file_name) with open(file_name,"w") as f: for line in line_list2: f.write(line) f.write("\n") if not os.path.exists(file_name): log("update failed,file:{0}".format(file_name),2) return log("update succ,file:{0}".format(file_name)) with open(file_name,"r") as f: last_data = f.readlines()
pass
def main(): uid = os.getuid() if uid != 0: log("os.getuid() != 0,current uid:{0}".format(uid)) return while True: task() time.sleep(60)
pass
if __name__ == '__main__': task() #main()
1.熟悉PYTHON,让PYTHON成为一门利器,帮我斩断一切阻止通向互联网自由之路的障碍。 为此,我必须尽可能掌握作为一名顶级黑客必备的技术列表。 2.建立在自然语言处理上,熟悉数据收集、存储、计算、分类、可视化等数据处理技术。 之后,我将独立自主研发,服务于金融投资的量化分析事业。
GCC -O 优化选项说明 | 编译耗时 | 编译结果大小(BYTES) | 运行耗时 | 不设置O选项 | real 0m20.079s user 0m18.943s sys 0m1.030s | 1590048 | real 0m13.143s user 0m12.996s sys 0m0.124s | -O0:这个等级(字母“O”后面跟个零)关闭所有优化选项,也是CFLAGS或CXXFLAGS中没有设置-O等级时的默认等级。这样就不会优化代码,这通常不是我们想要的。 | real 0m19.957s user 0m18.521s sys 0m1.406s | 1590048 | real 0m12.816s user 0m12.661s sys 0m0.138s | -O1:这是最基本的优化等级。编译器会在不花费太多编译时间的同时试图生成更快更小的代码。这些优化是非常基础的,但一般这些任务肯定能顺利完成。 | real 0m22.782s user 0m21.750s sys 0m0.978s | 599306 | real 0m5.074s user 0m4.951s sys 0m0.115s | -O2:-O1的进阶。这是推荐的优化等级,除非你有特殊的需求。-O2会比-O1启用多一些标记。设置了-O2后,编译器会试图提高代码性能而不会增大体积和大量占用的编译时间。 | real 0m26.258s user 0m25.313s sys 0m0.887s | 594456 | real 0m4.621s user 0m4.491s sys 0m0.123s | -O3:这是最高最危险的优化等级。用这个选项会延长编译代码的时间,并且在使用gcc4.x的系统里不应全局启用。自从3.x版本以来gcc的行为已经有了极大地改变。在3.x,-O3生成的代码也只是比-O2快一点点而已,而gcc4.x中还未必更快。用-O3来编译所有的软件包将产生更大体积更耗内存的二进制文件,大大增加编译失败的机会或不可预知的程序行为(包括错误)。这样做将得不偿失,记住过犹不及。在gcc 4.x.中使用-O3是不推荐的。 | real 0m29.642s user 0m28.671s sys 0m0.852s | 659664 | real 0m4.669s user 0m4.521s sys 0m0.141s | -O4 等价于-O3 -O5 等价于-O3 .... | real 0m30.264s user 0m29.236s sys 0m0.969s | 659664 | real 0m4.481s user 0m4.331s sys 0m0.144s | -Os:这个等级用来优化代码尺寸。其中启用了-O2中不会增加磁盘空间占用的代码生成选项。这对于磁盘空间极其紧张或者CPU缓存较小的机器非常有用。但也可能产生些许问题,因此软件树中的大部分ebuild都过滤掉这个等级的优化。使用-Os是不推荐的。 | real 0m24.206s user 0m23.285s sys 0m0.855s | 519903 | real 0m5.188s user 0m5.050s sys 0m0.132s |
# -*- coding: utf-8 -*- #HOW TO MAKE PASSWORD #BY ZHANGTAO import random,sys,time
def main(): if len(sys.argv) != 2: print "usage:python {0} length_of_password".format(sys.argv[0]) return c=sys.argv[1] c=int(c) a1="123456789" a2="qwertyuipasdfghjklzxcvbnm" a3="QWERTYUIOPADFGHJKLZXCBNM" a4="@$&" aList=[a1,a2,a3,a4] s="" aLast=5
for i in xrange(c): while True: a = random.choice(aList) if len(a) != aLast: aLast=len(a) if aLast == len(a4): aList = [a1, a2, a3] break time.sleep(0.1) s += random.choice(a)
print "make password:",s print "len :",len(s)
if __name__=="__main__": main()
#!/usr/bin/env python # encoding:utf-8 # zhangtao 2016/07-28 # monit the change of file; #if file is modified,send httpGet request to service,then service reload config ############################################# import os import urllib2 from urllib2 import URLError, HTTPError from pyinotify import WatchManager, Notifier, ProcessEvent,IN_DELETE, IN_CREATE,IN_MODIFY import threading from debuglogger import *
config_urls=["http://127.0.0.1:6001/chelun?query=reload1","http://127.0.0.1:6001/chelun?query=reload2","http://127.0.0.1:6001/chelun?query=reload3"] config_files=["service:dispatch.conf","service:proxy.conf","service:qc.conf"] config_flags=[0,0,0] config_timer=0 pid_file="run/daemon.pid"
def doGet(url): request = urllib2.Request(url) try: response = urllib2.urlopen(request, timeout=10) page = response.read() return page except URLError, e: if hasattr(e, 'code'): s='The server couldn\'t fulfill the request. errorcode:{0} url:{1}'.format(e.code,url) return s elif hasattr(e, 'reason'): s='We failed to reach a server. reason:{0} url:{1}'.format(e.reason,url) return s return "Error"
def func(): global config_files global config_flags global config_urls global config_timer config_timer=0 for i in xrange(3): if 1==config_flags[i]: result=doGet(config_urls[i]) config_flags[i]=0 s="fileChange:{0} triggerUrl:{1} rspResult:{2}".format( config_files[i],config_urls[i],result ) Logger.dumplog(s)
def startTimer(): global config_timer if config_timer != 0: return config_timer=1 timer = threading.Timer(5, func) timer.start()
class EventHandler(ProcessEvent): """事件处理""" def process_IN_CREATE(self, event): #print "Create file: %s " % os.path.join(event.path,event.name) pass def process_IN_DELETE(self, event): #print "Delete file: %s " % os.path.join(event.path,event.name) pass
def process_IN_MODIFY(self, event): #print "Modify file: %s " % os.path.join(event.path,event.name) pass if "service:dispatch.conf" == event.name: config_flags[0] = 1 startTimer() elif "service:proxy.conf" == event.name: config_flags[1] = 1 startTimer() elif "service:qc.conf" == event.name: config_flags[2] = 1 startTimer() def FSMonitor(path='.'): wm = WatchManager() #mask = IN_DELETE | IN_CREATE |IN_MODIFY mask = IN_MODIFY notifier = Notifier(wm, EventHandler()) wm.add_watch(path, mask,auto_add=True,rec=True) s= 'now starting monitor %s'%(path) Logger.dumplog(s) pid=str( os.getpid() ) with open(pid_file,"w") as f: f.write( pid ) s="pid:{0}".format(pid) Logger.dumplog( s)
while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
if __name__ == "__main__": FSMonitor('/data/config/')
# -*- coding: utf-8 -*- # http://blog.csdn.net/zcyhappy1314/article/details/8283717 import MySQLdb import sys import os import time
mysql = r"/usr/bin/mysql" mysqldump = r"/usr/bin/mysqldump" mysql_host = "127.0.0.1" mysql_port = 3306 mysql_user = "root" mysql_pwd = "" save_dir = "temp"
def mkdir(path): path = path.strip() path = path.rstrip("\\") if not os.path.exists(path): os.makedirs(path) return path
def pause(): raw_input("please enter to continue!") print "doing.."
# dump database ,only db-struct, def dumpDatabaseOnlyStruct(dbName="",dbCount=0): print "dumpDatabaseOnlyStruct:",dbName,dbCount databases="" if dbCount==0: databases=dbName else: for i in xrange(dbCount): databases += "{0}_{1} ".format(dbName,i) dbFile = r"{0}/{1}.sql".format(save_dir, dbName) if os.path.exists(dbFile): print "remove file:", dbFile os.remove(dbFile) ######################## cmdStr = "{0} -h{1} -u{2} -p{3} -d -B {4} > {5}".format(mysqldump, mysql_host, mysql_user, mysql_pwd, databases,dbFile) print "------------------dumpDatabaseOnlyStruct---------------" print cmdStr pause() t1 = time.time() result = os.system(cmdStr) t2 = time.time() t3 = t2 - t1 print "dumpDatabaseOnlyStruct cmd:{0} \n>>result:{1} useTime:{2} file:{3}".format(cmdStr, result, t3, dbFile)
# dump database , db-struct,+ db-data #db_im_msg_record def dumpDatabaseStructAndData(dbName="",dbCount=0): databases="" if dbCount==0: databases=dbName else: for i in xrange(dbCount): databases += "{0}_{1} ".format(dbName,i)
dbFile = r"{0}/{1}.sql".format(save_dir, dbName) if os.path.exists(dbFile): print "remove file:", dbFile os.remove(dbFile) ######################## cmdStr = "{0} -h{1} -u{2} -p{3} -B {4} > {5}".format(mysqldump, mysql_host, mysql_user, mysql_pwd, databases, dbFile) print "------------------dumpDatabaseStructAndData---------------" print cmdStr pause() t1 = time.time() result = os.system(cmdStr) t2 = time.time() t3 = t2 - t1 print "dumpDatabaseOnlyStruct cmd:{0} \n>>result:{1} useTime:{2} file:{3}".format(cmdStr, result, t3, dbFile)
# recover from mysql_dump_sql_file def recoverMysqlFromDumpfile(dbName): dbFile = r"{0}/{1}.sql".format(save_dir, dbName) if not os.path.exists(dbFile): print "recoverMysqlFromDumpfile,but not find file:{0}".format(dbFile) sys.exit(-1) cmdStr = '{0} -h{1} -u{2} -p{3} -e "source {4}"'.format(mysql, mysql_host, mysql_user, mysql_pwd, dbFile) print "------------------recoverMysqlFromDumpfile---------------" print cmdStr pause() t1 = time.time() result = os.system(cmdStr) t2 = time.time() t3 = t2 - t1 print "recoverMysqlFromDumpfile cmd:{0} result:{1} useTime:{2}".format(cmdStr, result, t3)
def dumpMysqlDB(): dumpDatabaseOnlyStruct("db_im_msg_record",8) dumpDatabaseOnlyStruct("db_im_user_msg", 4) dumpDatabaseOnlyStruct("db_im_group", 8) #dumpDatabaseStructAndData("db_im_msg_record",8)
def recoverMysqlDB(): recoverMysqlFromDumpfile("db_im_msg_record") recoverMysqlFromDumpfile("db_im_user_msg") recoverMysqlFromDumpfile("db_im_group")
if __name__ == "__main__": print "-------begin--------" mkdir(save_dir) dumpMysqlDB() # recoverMysqlDB() print "-------done---------"
# -*- coding: utf-8 -*- #Give wisdom to the machine,By ChangShouMeng\ #定义传输的上下文,client与server通用的上下文 import time import os,sys,traceback
class FileTransferContext(object): TRANSFER_TYPE_SEND=0 TRANSFER_TYPE_RECV=1 def __init__(self): self.transferType=0 self.filePath = "" # absolute path,send self.fileTotalSize = 0 self.fileChunkSize=1024 self.contextName="-" self.fileBaseName=""#only filename,recv self.fileHandle = None self.transferedSize=0 self.transferMsgId = 0 self.transferSeqNum = 0 self.transferSeqNumUpLimit = 0 self.transferSeqChunkSize = 10 self.transferBeginTime = 0 self.tmpdir="temp" self.workStatus=0
def __del__(self): self.finiTransInfo()
def initTransferInfo(self): print "---begin----init transfer info" try: if self.transferType== FileTransferContext.TRANSFER_TYPE_SEND: self.contextName="snd>>" self.fileTotalSize = os.path.getsize(self.filePath) self.fileBaseName = os.path.basename(self.filePath) self.fileHandle=open(self.filePath,'rb') else: self.contextName="rcv<<" tmp_fn = r"{0}/{1}_tmp".format(self.tmpdir, self.fileBaseName) self.fileHandle=open(tmp_fn,'wb') print tmp_fn assert self.fileHandle self.transferBeginTime= (int)(time.time()) self.transferedSize=0 self.transferSeqNumUpLimit=self.fileTotalSize/self.fileChunkSize + 1 self.workStatus=1 print "initTransferInfo {0} fileName:{1} fileSize:{2}".format(self.contextName,self.fileBaseName,self.fileTotalSize) return True except: print "initTransferinfo failed:",traceback.print_exc() print "init filepath is:",self.filePath return False
def finiTransInfo(self): if not self.fileHandle: return try: if self.transferType == FileTransferContext.TRANSFER_TYPE_RECV: self.fileHandle.flush() self.fileHandle.close() self.fileHandle=None except: print "finiTransInfo failed:", traceback.print_exc()
def traceTransferInfo(self): if self.transferedSize == self.fileTotalSize: self.workStatus=2 if self.transferSeqNum > 5 and (self.transferSeqNum % self.transferSeqChunkSize) != 0: return useTime = int(time.time()) - self.transferBeginTime speed = "" progress = float(self.transferedSize) / float(self.fileTotalSize) if useTime > 0: speedint = float(self.transferedSize) / float(useTime * 1024) if speedint >= 1024: speedint= float(speedint)/ float(1024) speed="{:.2f}mb/s".format(speedint) else: speed="{:.2f}kb/s".format(speedint) pass print "{0}msgid:{1} total:{2}byte tranfered:{3}byte speed:{4} progress:{5:.2f} use-time:{6}s".format( self.contextName, self.transferMsgId, self.fileTotalSize, self.transferedSize, speed, progress, useTime)
def readBuffer(self): try: return self.fileHandle.read(self.fileChunkSize) except: print "readBuffer err:",traceback.print_exc() return ""
def writeBuffer(self,buffer): try: self.fileHandle.write(buffer) if (self.transferSeqNum % self.transferSeqChunkSize) == 0: self.fileHandle.flush() self.transferedSize += len(buffer) #print self.transferedSize,self.fileTotalSize if self.transferedSize == self.fileTotalSize: self.fileHandle.flush() self.fileHandle.close() self.fileHandle=None tmp_fn = "{0}/{1}_tmp".format(self.tmpdir, self.fileBaseName) real_fn = "{0}/{1}".format(self.tmpdir, self.fileBaseName) print "-"*80 print "tmp_fn:",tmp_fn print "real_fn:",real_fn if os.path.exists(real_fn): os.remove(real_fn) os.rename(tmp_fn,real_fn) self.workStatus=2 except: print "writeBuffer err:",traceback.print_exc() return
def isFinished(self): if self.workStatus <= 1: return False return True
-- 代码下载: https://github.com/changshoumeng/python_local-network_file-transfer-service -- --
# -*- coding: utf-8 -*- #zhangtao 2016/06/14 #QQ 406878851
from PIL import * import Image import numpy as np import os
######################################################################################## ######################################################################################## class PixHelper(object): def __init__(self ,(r ,g ,b)): self. r =r self. g =g self. b =b def grayVal(self): return (self.r * 30 +self.g * 59 + self.b * 11) / 100
def validPix(self): if self.isBackgroundPix(): if self.grayVal() != 255: pass # print self.grayVal() return 255 return 0
def isBackgroundPix(self): background = PixHelper((150, 150, 150)) background_grayVal = background.grayVal() return self.grayVal() > background_grayVal
def isNeetPix(self): return False == self.isBackgroundPix()
######################################################################################## ######################################################################################## class ImageHelper(object): model_file="svm.model" def __init__(self, image): self.image = image self.width = image.size[0] self.height = image.size[1] self.subWidth=16 self.subHeight=self.height
#二值图 def convertGrayImage(self): for x in xrange(self.width): for y in xrange(self.height): pix = self.image.getpixel((x, y)) pixhelper = PixHelper(pix) pix2 = pixhelper.validPix() self.image.putpixel((x, y), (pix2, pix2, pix2))
def fixCropSubImages(self): boxList = [] box0 = (7, 0, 20, self.height) box1 = (20, 0, 34, self.height) box2 = (34, 0, 46, self.height) box3 = (46, 0, 59, self.height) box4 = (59, 0, 72, self.height) box5 = (72, 0, 88, self.height) boxList.append(box0) boxList.append(box1) boxList.append(box2) boxList.append(box3) boxList.append(box4) boxList.append(box5) subList = [] for box in boxList: sub = self.image.crop(box) subList.append(sub) return subList
def getRect(self,block): minx = self.width maxx = 0 miny = self.height maxy = 0 for p in block: # print p minx = min(minx, p[0]) maxx = max(maxx, p[0]) miny = min(miny, p[1]) maxy = max(maxy, p[1]) return (minx, miny, maxx - minx + 1, maxy - miny + 1)
def getValidPoints(self): points = list() for x in xrange(self.width): for y in xrange(self.height): p = (x, y) pix = self.image.getpixel(p) pixhelper = PixHelper(pix) if pixhelper.isNeetPix(): points.append(p) pass return points
def getDataListWithOffset(self): points = self.getValidPoints() (left, top, width, height) = self.getRect(points) dataList = list()
# if 1 == 0: # data = self.getData() # dataList.append(data) # return dataList
xBegin = 0 - left xEnd = self.subWidth - (left + width) yBegin = 0 - top yEnd = self.subHeight - (top + height)
h_i = 0 w_i = 0 # print "######################" for h_i in xrange(yBegin, yEnd, 1): for w_i in xrange(xBegin, xEnd, 1): data = [0] * (self.subWidth * self.subHeight) for p in points: (x, y) = p (x2, y2) = (x + w_i, y + h_i) if x2 >= self.subWidth or y2 >= self.subHeight: continue idx = x2 + y2 * self.subWidth data[idx] = 1 dataList.append(data)
return dataList
def getData(self): points = self.getValidPoints() (left, top, width, height) = self.getRect(points) data = [0] * (self.subWidth * self.subHeight) for p in points: (x, y) = p idx = x + y * self.subWidth data[idx] = 1 return data ---- # -*- coding: utf-8 -*- #zhangtao 2016/06/14 #QQ 406878851 import os import numpy as np import mlpy import sys import time from PIL import Image from ImageHelper import * from common import * def checkSampleSucc(train_label, test_result): if len(train_label) != len(test_result): print "checkSampleSucc shape no match" return total = len(train_label) succ = 0 for i in xrange(total): item1 = test_result[i] item2 = train_label[i] if int(item1) == int(item2): succ += 1 succRate = float(succ) / float(total) print "checkSampleSucc : ", (succ, total), succRate def getDataListFromFile(srcfilepath): im = Image.open(srcfilepath).convert( 'RGB') helper = ImageHelper(im) return helper.getDataListWithOffset() def loadSampleFromDir(label, srcdir, x, y): print "load ..", srcdir srcfiles = os.listdir(srcdir) tm = int(time.time()) k = 0 for srcfilename in srcfiles: srcfilepath = os.path.join(srcdir, srcfilename) if srcfilename[0] == 'a': pass # continue # print srcfilename
dataList = getDataListFromFile(srcfilepath) idx = 0 for data in dataList: x.append(data) y.append(label) # createImageFromData(srcdir,data,idx)
idx += 1 newfilename = "{0}\\{1}.png".format(srcdir, k + tm) os.rename(srcfilepath,newfilename) k += 1 def loadSampleFiles(): x = [] y = [] label_count = 0 for c in xrange(ord( 'a'), ord( 'z') + 1, 1): if c == ord( 'i'): pass #continue
srcdir = r 'sample\{0}'.format(chr(c)) #mkdir(srcdir)
loadSampleFromDir(c, srcdir, x, y) label_count += 1 for i in xrange(1,10,1): srcdir=r 'sample\{0}'.format( i ) #mkdir(srcdir)
loadSampleFromDir(i,srcdir,x,y) label_count += 1 print "label_count:",label_count return (x, y) def train(): (x, y) = loadSampleFiles() train_data = np.array(x, dtype= 'float32') train_label = np.array(y, dtype= 'int32') svm = mlpy.LibSvm(svm_type= 'c_svc', kernel_type= 'poly', gamma=10, probability=True) print "svm learn ." print "len(x):",len(x) svm.learn(train_data, train_label) print "svm learn done!" result = svm.pred(train_data) print result checkSampleSucc(y, result) svm.save_model(ImageHelper.model_file) # print result def main(): begin = time.time() train() #test()
end = time.time() t = end - begin print "done:", t, t / (60) # test2() if __name__ == "__main__": print __file__ main() ---- # -*- coding: utf-8 -*- #zhangtao 2016/06/14 #QQ 406878851
import os import numpy as np import mlpy import sys import time from PIL import Image from ImageHelper import * from common import *
testDir="test"
def processOne(i): dataList=[] fn=r'img/{0}.jpg'.format(i) fn2=r'img2/{0}.bmp'.format(i) im = Image.open(fn) helper = ImageHelper(im) helper.convertGrayImage() im.save(fn2)
subList=helper.fixCropSubImages() count = len(subList) for j in xrange(count): fn3=r'img3/{0}_{1}.bmp'.format(i,j) sub = subList[j] sub.save(fn3) subHelper=ImageHelper(sub) data=subHelper.getData() dataList.append(data) return dataList
def savePredictResult(result_list): global testDir print "savePredictResult" count=len(result_list) if count > 0: with open("data.txt",'w') as file: for i in xrange(0,count,1): item=result_list[i] file.write(str(i)+":") file.write(item) file.write("\n") #print i, item lines1=[] lines2=[] with open("data.txt",'r') as f: lines1=f.readlines() with open(r"{0}/data.txt".format(testDir),'r') as f: lines2=f.readlines() count1=len(lines1) count2=len(lines2) if count1 != count2 : print "no match :",count1,count2 return succ=0 for i in xrange(count1): line1=lines1[i].strip() line2=lines2[i].strip() if line1 != line2: print "!=",line1,line2 else: succ += 1 rate= float(succ)/float(count1) print "result:",(succ,count1),rate
def main(): print "-----------test---------------------" if False == os.path.exists(ImageHelper.model_file): print "s.path.exists ,faild ", ImageHelper.model_file return svm = mlpy.LibSvm.load_model(ImageHelper.model_file) #print 'nclasses:', svm.nclasses() #print 'labels:', svm.labels() test_list=[] for i in xrange(100): dataList=processOne(i) labels=[] for data in dataList: input_data = np.array(data, dtype='float32') result_label = int(svm.pred(input_data)) labels.append(result_label) result_string=parseLabels(labels) print "{0}:{1}".format(i,result_string) test_list.append(result_string) savePredictResult(test_list)
if __name__ == "__main__": print __file__ main()
--- ---
/* 简单说明: 针对libzip库,封装了两个接口,compressString 压缩字符串 ,uncompressString 解压字符串 zhangtao /2016/06/13 使用者请联系406878851@qq.com 测试代码: char inbuf[]="HELLOWORLD"; int inLen=strlen(inbuf); char outbuf[8192]; int outLen=0; compressString(inbuf,inLen,outbuf,8192,&outLen) ;
char outbuf2[8192]; int outLen2=0; uncompressString(outbuf,outLen,outbuf2,8192,&outLen2); printf("uncompressString %d %s\n",outLen2,outbuf2); */ static const char* ziparchive="data.zip"; static const char* archive="data";
static void saveZip(char* apOutBuf,int auOutBufSize) { FILE* fp; if ((fp=fopen(ziparchive, "wb")) == NULL) { fprintf(stderr, "fopen failed: %s\n", strerror(errno)); return ; } printf("fwrite size:%d\n",auOutBufSize); if (fwrite(apOutBuf, auOutBufSize, 1, fp) < 1) { fprintf(stderr, "fwrite failed: %s\n", strerror(errno)); fclose(fp); return; }
if (fclose(fp) != 0) { fprintf(stderr, "fclose failed: %s\n", strerror(errno)); return; } }
static int compressString(const char* apData,int auDataSize,char* apOutBuf,int auOutBufSize,int* apOutBufLen) { int ret=-1; *apOutBufLen=0; zip_t *za; zip_source_t *zs; zip_stat_t zst; struct stat st; zip_source_t *src; zip_error_t error; int err;
do { src = zip_source_buffer_create(NULL,0, 0, &error); if (src == NULL) { err = zip_error_code_zip(&error); errno = zip_error_code_system(&error); fprintf(stderr, "zip_source_buffer_create faild: %d\n",err); break; }
za = zip_open_from_source(src, 1, &error); if (za == NULL) { err = zip_error_code_zip(&error); errno = zip_error_code_system(&error); fprintf(stderr, "zip_open_from_source faild: %d\n",err); break; } zip_source_keep(src);
if ((zs=zip_source_buffer(za, apData, auDataSize, 0)) == NULL) { fprintf(stderr, "can't create zip_source from buffer: %s\n", zip_strerror(za)); break; }
if (zip_add(za, archive, zs) == -1) { fprintf(stderr, "can't add file '%s': %s\n", archive, zip_strerror(za)); break; }
if (zip_close(za) == -1) { fprintf(stderr, "can't close zip archive '%s': %s\n", archive, zip_strerror(za)); break; }
za=NULL;
if (zip_source_stat(src, &zst) < 0) { fprintf(stderr, "zip_source_stat on buffer failed: %s\n", zip_error_strerror(zip_source_error(src))); break; }
if (zst.size <=0){ printf(" size error 000\n"); break; }
if (zst.size >= auOutBufSize){ printf(" size error 111\n"); break; } if (zip_source_open(src) < 0) { if (zip_error_code_zip(zip_source_error(src)) == ZIP_ER_DELETED) { if (unlink(archive) < 0 && errno != ENOENT) { fprintf(stderr, "unlink failed: %s\n", strerror(errno)); break; } break; } fprintf(stderr, "zip_source_open on buffer failed: %s\n", zip_error_strerror(zip_source_error(src))); break; }
if (zip_source_read(src, apOutBuf, zst.size) < (zip_int64_t)zst.size) { fprintf(stderr, "zip_source_read on buffer failed: %s\n", zip_error_strerror(zip_source_error(src))); zip_source_close(src); break; } zip_source_close(src); *apOutBufLen = (int)(zst.size); ret=0;
//saveZip(apOutBuf,*apOutBufLen );
} while (0);
if (NULL != src) { zip_source_free(src); src=NULL; }
if (NULL != za) { zip_close(za); za=NULL; }
return ret; }
static int uncompressString(const char* apData,int auDataSize,char* apOutBuf,int auOutBufSize,int* apOutBufLen) { int ret=-1;
*apOutBufLen=0; zip_error_t error; int err=0; char* buf=apOutBuf; int totalSize=0; zip_int64_t n = 0; zip_source_t *src=NULL; zip_t *za=NULL; struct zip_file *f=NULL;
do { zip_error_init(&error); /* create source from buffer */ if ((src = zip_source_buffer_create(apData, auDataSize, 1, &error)) == NULL) { fprintf(stderr, "can't create source: %s\n", zip_error_strerror(&error)); zip_error_fini(&error); break; }
/* open zip archive from source */ if ((za = zip_open_from_source(src, 0, &error)) == NULL) { fprintf(stderr, "can't open zip from source: %s\n", zip_error_strerror(&error)); zip_error_fini(&error); break; }
zip_error_fini(&error); zip_source_keep(src);
zip_int64_t c = zip_get_num_entries(za, ZIP_FL_UNCHANGED); if ( c != 1) { printf("zip_get_num_entries 0 \n"); break; }
const char * name = zip_get_name(za, 0, ZIP_FL_ENC_GUESS); if (NULL == name) { printf("zip_get_name 0 \n"); break; }
f = zip_fopen(za, name, 0); if (NULL == f) { printf("zip_fopen 0 \n"); break; } if ( auOutBufSize < 4096) { printf("auOutBufSize < 4096 \n"); break; } totalSize=0; while( totalSize < auOutBufSize) { buf = apOutBuf+ totalSize; n = zip_fread(f, buf, 4096); if (n <=0 ) { break; }
totalSize += n; }
if (totalSize >= auOutBufSize) { printf("totalSize too big \n"); break; }
*apOutBufLen=totalSize; ret=0;
} while (0);
if (NULL != f) { zip_fclose(f); f=NULL; }
if (NULL != za) { //lt-in-memory: free(): invalid pointer: 0x00007fff9c75c6d0 *** //zip_close(za); za=NULL; }
if (NULL != src) { zip_source_free(src); src=NULL; }
return ret; }
#coding: utf8 import sys from Crypto.Cipher import AES from binascii import b2a_hex, a2b_hex import base64 def printMemLog2(mem): byteData=bytearray(mem) byteLen =len(byteData) print "printMemLog begin .byteLen:{0}".format(byteLen) i=0 while i <=byteLen-1: a=byteData[i] if i == byteLen-1: print "%02x "%( a ) break; i +=1 b=byteData[i] print "%02x%02x "%( a,b), i +=1 if i%64 == 0 : print print print "*"*10 pass#将text按照byteAlignLen字节对齐,如果不对齐,按照差数填充 def bytePad(text,byteAlignLen=16): count=len(text) mod_num=count%byteAlignLen if mod_num==0: return text add_num=byteAlignLen-mod_num print "bytePad:" ,add_num return text+chr(add_num)*add_num def byteUnpad(text,byteAlignLen=16): count=len(text) print "byteUnpad:",count mod_num=count%byteAlignLen assert mod_num==0 lastChar=text[-1] lastLen=ord(lastChar) lastChunk=text[-lastLen:] if lastChunk==chr(lastLen)*lastLen: #print "byteUnpad" return text[:-lastLen] return text class prpcrypt(): def __init__(self, key): self.key = key self.mode = AES.MODE_ECB self.iv = '\0' * 16 #加密函数,如果text不是16的倍数【加密文本text必须为16的倍数!】,那就补足为16的倍数 def encrypt(self, text): cryptor = AES.new(self.key, self.mode, self.iv ) #这里密钥key 长度必须为16(AES-128)、24(AES-192)、或32(AES-256)Bytes 长度.目前AES-128足够用 #length = 16
text=bytePad(text,16) #add = length - (count % length) #text = text + ('\0' * add) self.ciphertext = cryptor.encrypt(text) #因为AES加密时候得到的字符串不一定是ascii字符集的,输出到终端或者保存时候可能存在问题 #所以这里统一把加密后的字符串转化为16进制字符串 return self.ciphertext #return b2a_hex(self.ciphertext) #解密后,去掉补足的空格用strip() 去掉 def decrypt(self, text): cryptor = AES.new(self.key, self.mode, self.iv ) #plain_text = cryptor.decrypt(a2b_hex(text))
plain_text = cryptor.decrypt(text) #printMemLog2(plain_text) #print plain_text return byteUnpad(plain_text)
#ifndef __GIF_WRAPPER_H__ #define __GIF_WRAPPER_H__ //基于giflib的c++对象封装 #include "gif_lib.h" #include "rectimage.h" #include <vector> using namespace std; class CGifImage { public: CGifImage(); ~CGifImage(); bool open(const char* gif_path); void close(); const vector<RECT_IMAGE*>& getFrames() const { return frameArray ;}; private: void dumpCurrentFrame(); GifFileType *mpGifFile;// gif文件句柄 GifRowType *mpScreenBuffer;//GifRowType 实际是一个char* 类型;ScreenBuffer[0]表示指向第0行的像素列表的指针,ScreenBuffer[0][2]表示第0行第2个像素 vector<RECT_IMAGE*> frameArray; ColorMapObject *ColorMap; }; /* 测试例子:看 cpp文件里的main */
#endif//__GIF_WRAPPER_H__ // g++ gif_wrapper.cpp -o testgif -L./lib -lgif -I./lib -I./ #include <stdlib.h> #include <stdio.h> #include <ctype.h> #include <string.h> #include <stdbool.h> #include <fcntl.h> #include "gif_wrapper.h" CGifImage::CGifImage():mpGifFile(NULL),mpScreenBuffer(NULL),ColorMap(NULL) { } CGifImage::~CGifImage() { close(); } void CGifImage::close() { if (NULL != mpScreenBuffer) { (void)free(mpScreenBuffer); mpScreenBuffer=NULL; } if (NULL != mpGifFile) { int Error=0; DGifCloseFile(mpGifFile); mpGifFile=NULL; } int count = frameArray.size(); for (int i=0 ;i < count;++i) { RECT_IMAGE* p = frameArray[i]; if (NULL != p) { delete p; p = NULL; } } frameArray.clear() ; } void CGifImage::dumpCurrentFrame() { RECT_IMAGE* rectImage = new RECT_IMAGE(); if (NULL == rectImage) { return ; } rectImage->init( mpGifFile->SWidth,mpGifFile->SHeight ,NULL); GifRowType GifRow; GifColorType *ColorMapEntry; for (int i = 0; i < mpGifFile->SHeight; i++) { GifRow = mpScreenBuffer[i]; for (int j = 0; j < mpGifFile->SWidth; j++) { ColorMapEntry = &ColorMap->Colors[GifRow[j]]; rectImage->putPixel(j,i, __RGB(ColorMapEntry->Red,ColorMapEntry->Green,ColorMapEntry->Blue)); } } frameArray.push_back(rectImage); } bool CGifImage::open( const char* gif_path ) { GifFileType *GifFile=NULL; GifRowType * ScreenBuffer=NULL; if (NULL != mpGifFile) { close(); } int i, j, Size, Row, Col, Width, Height, ExtCode, Count,Error; GifRecordType RecordType; GifByteType *Extension; int ImageNum = 0; int InterlacedOffset[] = { 0, 4, 2, 1 }; /* The way Interlaced image should. */ int InterlacedJumps[] = { 8, 8, 4, 2 }; /* be read - offsets and jumps... */ if ((GifFile = DGifOpenFileName(gif_path)) == NULL) { close(); return false; } //printf("GifFile->ImageCount : %d GifFile->Image.Interlace:%d\n",GifFile->ImageCount,GifFile->Image.Interlace); ////* ////* * Allocate the screen as vector of column of rows. Note this ////* * screen is device independent - it's the screen defined by the ////* * GIF file parameters. ////* */ if ((ScreenBuffer = (GifRowType *)malloc(GifFile->SHeight * sizeof(GifRowType))) == NULL) { //GIF_EXIT("Failed to allocate memory required, aborted."); close(); return false; } Size = GifFile->SWidth * sizeof(GifPixelType);/* Size in bytes one row.*/ if ((ScreenBuffer[0] = (GifRowType) malloc(Size)) == NULL) /* First row. */ { //GIF_EXIT("Failed to allocate memory required, aborted."); close(); return false; } for (i = 0; i < GifFile->SWidth; i++) /* Set its color to BackGround. */ ScreenBuffer[0][i] = GifFile->SBackGroundColor; for (i = 1; i < GifFile->SHeight; i++) { /* Allocate the other rows, and set their color to background too: */ if ((ScreenBuffer[i] = (GifRowType) malloc(Size)) == NULL) { //GIF_EXIT("Failed to allocate memory required, aborted."); close(); return false; } memcpy(ScreenBuffer[i], ScreenBuffer[0], Size); } //////* Scan the content of the GIF file and load the image(s) in: */ mpGifFile=GifFile; mpScreenBuffer=ScreenBuffer; ColorMap = (mpGifFile->Image.ColorMap? mpGifFile->Image.ColorMap: mpGifFile->SColorMap); if (NULL == ColorMap) { close(); return false; } do { if (DGifGetRecordType(GifFile, &RecordType) == GIF_ERROR) { //PrintGifError(GifFile->Error); //exit(EXIT_FAILURE); close(); return false; } switch (RecordType) { case IMAGE_DESC_RECORD_TYPE: if (DGifGetImageDesc(GifFile) == GIF_ERROR) { //PrintGifError(GifFile->Error); //exit(EXIT_FAILURE); close(); return false; } Row = GifFile->Image.Top; /* Image Position relative to Screen. */ Col = GifFile->Image.Left; Width = GifFile->Image.Width; Height = GifFile->Image.Height; //printf("\n%s: Image %d at (%d, %d) [%dx%d]: ", "record ", count, Col, Row, Width, Height); if (GifFile->Image.Left + GifFile->Image.Width > GifFile->SWidth || GifFile->Image.Top + GifFile->Image.Height > GifFile->SHeight) { //fprintf(stderr, "Image %d is not confined to screen dimension, aborted.\n",ImageNum); //exit(EXIT_FAILURE); close(); return false; } if (GifFile->Image.Interlace) { /* Need to perform 4 passes on the images: */ for (Count = i = 0; i < 4; i++) for (j = Row + InterlacedOffset[i]; j < Row + Height;j += InterlacedJumps[i]) { //GifQprintf("\b\b\b\b%-4d", Count++); if (DGifGetLine(GifFile, &ScreenBuffer[j][Col],Width) == GIF_ERROR) { //PrintGifError(GifFile->Error); //exit(EXIT_FAILURE); close(); return false; } } } else { for (i = 0; i < Height; i++) { //GifQprintf("\b\b\b\b%-4d", i); if (DGifGetLine(GifFile, &ScreenBuffer[Row++][Col],Width) == GIF_ERROR) { //PrintGifError(GifFile->Error); //exit(EXIT_FAILURE); close(); return false; } } } dumpCurrentFrame(); //count++ ; break; case EXTENSION_RECORD_TYPE: /* Skip any extension blocks in file: */ if (DGifGetExtension(GifFile, &ExtCode, &Extension) == GIF_ERROR) { //PrintGifError(GifFile->Error); //exit(EXIT_FAILURE); close(); return false; } while (Extension != NULL) { if (DGifGetExtensionNext(GifFile, &Extension) == GIF_ERROR) { //PrintGifError(GifFile->Error); //exit(EXIT_FAILURE); close(); return false; } } break; case TERMINATE_RECORD_TYPE: break; default: /* Should be trapped by DGifGetRecordType. */ break; } } while (RecordType != TERMINATE_RECORD_TYPE); //printf("open finish:GifFile=%x width=%d,height=%d ScreenBuffer=%x\n",GifFile,GifFile->SWidth,GifFile->SHeight,ScreenBuffer); //printf("Extension:%p \n",Extension); //printf("count:%d\n",count); //printf("SColorResolution=%d,SBackGroundColor=%d SavedImages=%p\n",GifFile->SColorResolution,GifFile->SBackGroundColor,GifFile->SavedImages); //printf("Left=%d Top=%d Width=%d Height=%d Interlace=%d \n",GifFile->Image.Left,GifFile->Image.Top,GifFile->Image.Width,GifFile->Image.Height,GifFile->Image.Interlace); return true; }
#-*-coding:utf-8-*- #20160418 通过分析日志文件,对比redis的数据,更新redis的数据 import os import redis
import sys reload(sys)
sys.setdefaultencoding('utf-8')
r=None
def byRedis(groupId,msgId): global r if r==None: r = redis.Redis(host='192.168.1.227',port=6382,db=0) print "-------------------------------------->redis" if not r.ping() : print "cannot ping redis" return key= 'groupmsgcounter_{0}'.format(groupId) oldmsgId=r.get( key ) if oldmsgId == None: oldmsgId=0 #print "redis error:",key,oldmsgId,msgId oldmsgId=long(oldmsgId) #print "redis:",key,oldmsgId,msgId if oldmsgId < msgId: print "less:",key,oldmsgId,msgId return newmsgId=max(msgId,oldmsgId ) if newmsgId == 0 : return print newmsgId #r.set(key,newmsgId)
''' cli-groupmsgid:124 svr-groupmsgid:124 groupid:72057594037938371 '''
def parse1(lineData): flag1='cli-groupmsgid:' flag2='svr-groupmsgid:' flag3='lpContext:' flag4='groupid:' flag5='grouprealid:' s=lineData n1=s.find(flag1) n2=s.find(flag2) n3=s.find(flag3) n4=s.find(flag4) n5=s.find(flag5) # print lineData a=s[n1+len(flag1):n2] b=s[n2++len(flag2):n3] c=s[n4+len(flag4):n5] a=long(a) b=long(b) c=long (c) return (c,max(a,b)) #print n1,n2,n3
def paseFile(fileName): result=dict() with open(fileName) as f: lines=f.readlines() for line in lines: lineData=line.strip() (groupId,msgId) = parse1(lineData) msgId2=msgId if groupId in result: msgId2=max(result[groupId],msgId) result[groupId]=msgId2 # break return result def testTwoDict( result1,result2): for (d,x) in result1.items(): msgId=x if d in result2: if msgId < result2[d] : msgId=result2[d] #print "find:",d,x,msgId byRedis(d,msgId)
def main(): result1=paseFile("test1.txt") result2=paseFile("test2.txt") result3=dict() testTwoDict( result1,result2) testTwoDict( result2,result1)
main()
inline void image2text2( RECT_IMAGE* rawImage,svm_model *model,char* outbuf) { static int g_index=0;
RECT_IMAGE tempImage; tempImage.init(rawImage->w,rawImage->h,NULL); RECT_IMAGE* grayImage = &tempImage;
convert2GrayImage(rawImage,grayImage); //writeRectImage("gray.bmp",grayImage);
std::list< std::set<XYPOINT> > connGraps; getConnectedGraphs(grayImage,connGraps); if ( connGraps.size()==0){ errlog(" connGraps.size() is 0"); return; }
l_uint32 width=55; l_uint32 height=55;
int i=0;
std::list< std::set<XYPOINT> >::iterator it = connGraps.begin(); for ( ; it != connGraps.end() ;++it) { RECT_IMAGE subImage; subImage.init(width,height,NULL);
std::set<XYPOINT>& connGrap = *it; std::vector<XYPOINT> conList(connGrap.begin(),connGrap.end()); std::vector<XYPOINT> outList/*(connGrap.begin(),connGrap.end())*/;
//去除左边缘噪点 processSubImage(rawImage,conList,outList);
l_uint32 x0=0;l_uint32 y0=0; getOffsetPos(outList,x0,y0);
//左上角对齐 std::vector<XYPOINT>::iterator pos = outList.begin(); for ( ; pos != outList.end() ; ++pos){ l_uint32 x = pos->x; l_uint32 y = pos->y;
l_uint32 color = rawImage->getPixel(x,y);
l_uint32 x1 = x-x0; l_uint32 y1 = y-y0;
if (x1 >= width){ x1= width-1; } if( y1 >= height){ y1= height-1 ; }
subImage.putPixel(x1,y1,color); } //char fn[512]; //sprintf(fn,"result\\%d.bmp",g_index++); //writeRectImage(fn,&subImage); //return;
//预测得到结果 svm_problem prob; pickupFeatureDataFromRectImage(prob,&subImage); double v = svm_predict(model, prob.x[0]); svm_problem_release(&prob);
sprintf(outbuf+i,"%c",int(v)); i++; }
}
<?php $content = file_get_contents('http://www.baidu.com/s?wd=zhongguoyihang&cl=3'); //shangzhengzhishu //zhongguoyinhang ?> <!DOCTYPE html> <html> <head lang="en"> <meta charset="UTF-8"> <meta content="text/html; charset=utf-8" http-equiv="Content-Type"/> <meta content="no-cache,must-revalidate" http-equiv="Cache-Control"> <meta content="no-cache" http-equiv="pragma"> <meta content="0" http-equiv="expires"> <title>zthome</title> <script type="text/javascript" src="resources/jquery/jquery-2.1.4.min.js"></script>
<script> $(document).ready(function () { $("#date").text((new Date()).toString()); setInterval("startRequest()",3000); }); function startRequest() { window.location.reload(); } </script> <style type="text/css"> #gupiao { width :200px; } </style>
</head> <body> <div id="date"> </div> <div id="gupiao"> <?php echo $content ; ?> </div> </body> </html>
# -*- coding: utf-8 -*-
from HttpRequestModule import *
import os import json import traceback
import codecs from lxml import etree import StringIO, gzip import sys reload(sys) sys.setdefaultencoding('utf-8')
def write_file(file_name,file_data,encoding): if len(file_data) == 0 : print "file_data is zero" return file_dir = r"D:\fs\test_data\qqzone" file_path=os.path.join(file_dir,file_name) print file_path # fp=open(file_path,"w") # fp.write(file_data) # fp.flush() # fp.close() with codecs.open(file_path,"w",encoding) as f: f.write(file_data)
def decodeJson(json_string): decode_json=None try: decode_json=json.loads(json_string) return decode_json except (TypeError, ValueError) as err: print( 'TypeError or ValueError:{0}'.format(err) ) except Exception,e: print( traceback.format_exc() ) pass return decode_json
def getUserBlogList(): blog_list=[] diray_url=''' http://b1.qzone.qq.com/cgi-bin/blognew/get_abs?hostUin=859226880&blogType=0&cateName=&cateHex=&statYear=2015&reqInfo=7&pos=0&num=15&sortType=0&absType=0&source=0&rand=0.6346770680975169&ref=qzone&g_tk=1611717761&verbose=1 ''' data=doGet(diray_url) data_len = len(data) if data_len == 0 : print "data len is 0" return blog_list data_json = data[10:data_len-2] #write_file('bloglist.txt',data_json,'utf-8') decode_json=decodeJson(data_json.decode("gbk")) if decode_json == None : print "decode_json is None" return [] if decode_json['code'] != 0: print "server response code is "+decode_json['code'] return [] data =decode_json['data'] if data['totalNum'] <=0 : print "server response totalnum is "+data['totalNum'] return [] blog_list=data['list'] return blog_list
def getUserBlog(uin,blogid): url=''' http://b1.qzone.qq.com/cgi-bin/blognew/blog_output_data?uin=%(uin)s&blogid=%(blogid)s&styledm=ctc.qzonestyle.gtimg.cn&imgdm=ctc.qzs.qq.com&bdm=b.qzone.qq.com&mode=2&numperpage=15×tamp=1437033537&dprefix=&inCharset=gb2312&outCharset=gb2312&ref=qzone '''%{'uin':uin,'blogid':blogid} my_headers={ "Accept-Encoding":"gzip,deflate,sdch", "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6" , "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36" , "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8" , "Referer": "http://ctc.qzs.qq.com/qzone/newblog/blogcanvas.html" } request = urllib2.Request(url,headers=my_headers) try: response = urllib2.urlopen(request) except URLError,e: if hasattr(e, 'code'): print('The server couldn\'t fulfill the request. errorcode:{0}'.format(e.code )) elif hasattr(e, 'reason'): print('We failed to reach a server. reason:{0}'.format(e.reason )) else: page = response.read() return page return ""
def getText(elem): rc = [] for node in elem.itertext(): rc.append(node.strip()) return ''.join(rc)
def gzdecode(data) : compressedstream = StringIO.StringIO(data) gziper = gzip.GzipFile(fileobj=compressedstream) data2 = gziper.read() # 读取解压缩后数据 return data2 def test(blogid): print blogid blog_data=getUserBlog('859226880',blogid) blog_data=gzdecode(blog_data) #write_file( blogid+'.html',blog_data ) #return try: content=blog_data.decode('utf-8') tree=etree.HTML(content) node=tree.xpath("//div[@id='blogDetailDiv']")[0] tgt_data=getText(node) print "*"*30 print tgt_data write_file( blogid+'.txt',tgt_data, 'gbk') return except Exception,ex : print "111",Exception,":",ex try: content=blog_data.decode('gbk') tree=etree.HTML(content) node=tree.xpath("//div[@id='blogDetailDiv']")[0] tgt_data=getText(node) print "_"*30 print tgt_data write_file( blogid+'.txt',tgt_data ,'utf-8') except Exception,ex : print "222",Exception,":",ex def main(): print "main" test("1288281044") #return blog_list=getUserBlogList() for blog_item in blog_list: blogId=blog_item['blogId'] print blogId test( str(blogId) ) pass
main()
#!/bin/bash #zhangtao PROCESS_NAME="msgdbgate" LOG_FILE="run.log" PID_FILE="$PROCESS_NAME.pid" BASE_DIR="" RUN_DIR="" LIB_DIR="" function timeStamp(){ date +'%Y/%m/%d %H:%M:%S' } function logMessage(){ echo $(timeStamp) $@ echo $(timeStamp) $@>>$RUN_DIR/$LOG_FILE } function setEnv(){ if [ -z "$BASE_DIR" ] ; then PRG="$0" while [ -h "$PRG" ] ; do ls=`ls -ld "$PRG"` link=`expr "$ls" : '.*-> \(.*\)$'` if expr "$link" : '/.*' > /dev/ null; then PRG="$link" else PRG="`dirname "$PRG"`/$link" fi done BASE_DIR=`dirname "$PRG"`/.. BASE_DIR=`cd "$BASE_DIR" && pwd` fi RUN_DIR=$BASE_DIR/bin/run BIN_DIR=$BASE_DIR/bin LIB_DIR=$BASE_DIR/lib mkdir -p $RUN_DIR } function excuteCmdAndreportLog(){ `eval $@` logMessage $@ } function running(){ if [ -f "$RUN_DIR/$PID_FILE" ]; then pid=$(cat "$RUN_DIR/$PID_FILE") #logMessage "check : pid=$pid " process=`ps aux | grep " $pid " | grep -v grep`; #logMessage "result: $process" if [ "$process" == "" ]; then return 1; else return 0; fi else return 1 fi } function start_server(){ if running ;then logMessage "$PROCESS_NAME is running " exit 1 fi logMessage "----------------------------------> start_server $PROCESS_NAME " logMessage "nohup $BIN_DIR/$PROCESS_NAME 2>&1 1>&/dev/null &" export LD_LIBRARY_PATH=../lib:/usr/local/mysql/lib/:$LD_LIBRARY_PATH chmod a+x $BIN_DIR/$PROCESS_NAME nohup $BIN_DIR/$PROCESS_NAME 2>&1 1>&/dev/ null & echo $! >$RUN_DIR/$PID_FILE excuteCmdAndreportLog "chmod 755 $RUN_DIR/$PID_FILE" sleep 2 if running ;then logMessage "$PROCESS_NAME is running " exit 1 fi logMessage "$PROCESS_NAME start failed !" #tail -f $LOG_DIR/$LOG_FILE } function stop_server(){ if ! running;then logMessage "$PROCESS_NAME was not running" exit 1 fi count=0 pid=$( cat $RUN_DIR/$PID_FILE) while running; do let count=$count+1 logMessage "stopping $PROCESS_NAME $count times !!!" if [ $count -gt 5 ] ;then excuteCmdAndreportLog "kill -9 $pid" else sleep 1 excuteCmdAndreportLog "kill $pid" fi sleep 2 done logMessage "-----------> stop $PROCESS_NAME successfully <------------" excuteCmdAndreportLog "rm $RUN_DIR/$PID_FILE" } function monit_server(){ if running ;then exit 1 fi chmod a+x $BIN_DIR/$PROCESS_NAME nohup $BIN_DIR/$PROCESS_NAME 2>&1 1>&/dev/ null & echo $! >$RUN_DIR/$PID_FILE chmod 755 $RUN_DIR/$PID_FILE } function status(){ if running; then logMessage "$PROCESS_NAME is running."; exit 0; else logMessage "$PROCESS_NAME was stopped."; exit 1; fi } SERVER_NAME=$PROCESS_NAME function help() { echo "------------------------------------------------------------------------------" echo "Usage: server.sh {start|status|stop|restart|logback}" >&2 echo " start: start the $SERVER_NAME server" echo " stop: stop the $SERVER_NAME server" echo " restart: restart the $SERVER_NAME server" echo " logback: reload logback config file" echo " status: get $SERVER_NAME current status,running or stopped." echo "-----------------------------------------------------------------------------" } function getOpts(){ command=$1 shift 1 case $command in start) start_server $@; ;; stop) stop_server $@; ;; logback) reload_logback_config $@; ;; status) status $@; ;; monit) monit_server $@; ;; restart) $0 stop $@ $0 start $@ ;; help) help; ;; *) help; exit 1; ;; esac } function main(){ #logMessage "--------> begin <-------------" setEnv $@ #logMessage "BASE_DIR:$BASE_DIR" getOpts $@ #logMessage "--------> end " } main $@
<script> (function ($, undefined) { var defaults = { 'type': 'GET', 'url': '', 'data': {}, 'timeout': 60 * 1000 ,// 60秒超时,通常你的服务器端间隔要短于这个时间 'xhrFields': { 'withCredentials': true // 应对跨域的情况 } } // options 保持和 $.ajax 的api一致 $.poll = function (options, fn) { function onMessage (data) { alert("onMessage"); fn(data) $.poll(options,fn) } function onError () { alert("onError"); // 如果遇到错误,就两秒后重试 setTimeout(function(){ $.poll(options, fn) }, 2000) } $.ajax(options) .done(onMessage).fail(onError) } })(jQuery)
// // 使用方法: // $.poll({'url': '/server'}, function (data) { // process(data) // }) </script>
测试目的:测试SESSION分配与浏览器的关系 测试代码: pasting @RequestMapping(value ={"index.html"}) public String index2(HttpServletRequest request,ModelMap model) { String remoteIp = UserAddrUtils.getRemoteIp(request); HttpSession oldSession = request.getSession(false); HttpSession newSession = request.getSession(); String oldSessionId = (null==oldSession)?"null":oldSession.getId(); String newSessionId = (null==newSession)?"null":newSession.getId(); System.out.println(" oldSessiondId:"+oldSessionId); System.out.println(" newSessionId:"+newSessionId); model.addAttribute("oldSessiondId",oldSessionId); model.addAttribute("newSessionId",newSessionId); model.addAttribute("remoteIp",remoteIp); logger.log(Level.INFO,"index.html remoteIp:"+remoteIp); return "index"; }
测试结果: 操作步骤 | oldSessiondId | newSessionId | 启动一个IE,打开url | null | 85731566EA8FBE5C14E5C339F9C77439 | F5刷新 | 85731566EA8FBE5C14E5C339F9C77439 | 85731566EA8FBE5C14E5C339F9C77439 | 新建标签,打开url | 85731566EA8FBE5C14E5C339F9C77439 | 85731566EA8FBE5C14E5C339F9C77439 | 关闭所有的IE,启动一个IE,打开URL | null | E122FD375611B2CBEB734B6FAFA3FA2F | 启动一个chrome,打开URL | null | D180295DFAED11C2639EBD9D897BCEF1 | 新建一个chrome,打开URL | D180295DFAED11C2639EBD9D897BCEF1 | D180295DFAED11C2639EBD9D897BCEF1 | 关闭所有的chrome,启动一个chrome,打开url | null | A433F39D84685FF1E10BE194F834B535 | 测试结论: 打开一种浏览器,访问url,服务端建立一个SESSION,直到该种浏览器所启动的全部进程都关闭掉,该SESSION才释放,然后 再打开浏览器,访问所分配的SESSION是一个新的SESSION。
摘要: 注意:测试方法或者推算思路,可能有问题,欢迎批评与指正。 Mongodb的版本: 执行命令mongod --version得到 "version" :
"3.0.1",&n... 阅读全文
1.mongodb 的版本 v3.0.1 2.mongodb的用户权限分配
//创建超级管理用户 use admin db.createUser( { user:"superuser", pwd:"superuser@nMhU3x", roles:["root"] } )
参考文档:http://demo.netfoucs.com/xuzheng_java/article/details/42550653 3. mongodb 初次启动时,观察其占用的磁盘空间。发现占用了20G空间,这与mongodb为了提高写性能, 与分配数据有关。 原因: oplogSize:指定的复制操作日志(OPLOG)的最大大小。mongod创建一个OPLOG的大小基于最大可用 空间量。对于64位系统,OPLOG通常是5%的可用磁盘空间。一旦mongod第一次创建OPLOG,改变oplog Size将不会影响OPLOG的大小。 我把oplogSize配置为1G发现,初始化时生成了一个local.1,大小为1G。 4. 针对mongodb的warnning的系统级别的优化: 第一步: 使用root用户权限登录: echo "never" > /sys/kernel/mm/transparent_hugepage/enabled echo "never" > /sys/kernel/mm/transparent_hugepage/defrag
第二步: 使用root用户权限登录: vim /etc/profile
添加以下命令:
ulimit -f unlimited ulimit -t unlimited ulimit -v unlimited ulimit -n 64000 ulimit -m unlimited ulimit -u 32000
5. 在做mongodb主从复制时,在没有开启--auth模式时,运行正常。 然后,开启主库 --auth模式, 发现从库遇到验证问题,数据不能同步成功。 在这种情况下,做实验失败,尚末解决。
6. 发现mongodb初始化时,data占用物理空间是23G;然后插入1KW条数据后,占用空间是29G; 若插入2KW条数据,占用空间是30G 30601 LBSUser 20 0 56.9g(VIRT) 2.8g(RES) 2.4g S 0.0 9.0 0:14.47 mongod 7. mongodb主从复制的配置文件见:
mongodb-master-slave.tar
配置文件的说明: 参考:http://www.it165.net/database/html/201402/5303.html
8. mongodb副本集的配置文件见: mongodb-replset.tar.gz 配置文件
mongodb-master-slave.tar.zip 配置文件
问题:mysql 多列索引的最左前缀 参考:http://blog.sina.com.cn/s/blog_4deb16580100i8v2.html 结论:索引文件是根据最左边的一列进行排序的,然后按照二分查找法查找的。 问题:索引的类型:普通索引、唯一性索引、主键、全文索引 参考:http://opq149766320.iteye.com/blog/561183 结论: 问题: MySQL Hash索引和B-Tree索引的区别 参考:http://database.51cto.com/art/201010/229525.htm 结论:映射规则导致的缺点、冲突导致的缺点
im的架构,不是难在消息的转发,而是难在 1.用户状态的维护 2.用户社交网络的构造
如果一个无序的序列里,有且有一个值,出现了重复。那么如何以N的复杂度找出这个重复值?
0,1,2,..x,x,..98,99 设其为序列s1,其中x重复一次,假设重复的那个x,覆盖了y 0,1,2,..x,y,..98,99 设其为序列s2,是正确的序列
通过: sum(s1)-sum(s2)=x-y 求乘积(s1) / 求乘积(s2) = x/y 求得: x,y
for item in s1: if (x==item) : x是重复值;return; if (y==item) : y是重复值;return; 复杂度: N
static bool allowLocalNetworksegment(uint32 uSourceIP ) { //*允许192.168.x.x网段 //*43200由inet_addr( "192.168" ) 得到整型值,然后计算取其低16位得到 if ( 43200 == (uSourceIP&0x0000ffff) ) { return true; }
return false; }
static bool allowSpecialNetWorksegment(uint32 uSourceIP ) { //*允许配置文件里写的网段 for ( uint32 i=0; i < CConfigFile::instance()->m_uIPNum ; ++i) { if (uSourceIP == CConfigFile::instance()->m_uIP[i] ) { return true; } }
return false; }
import socket import struct
def IpStr2NetInt(IpStr): return struct.unpack( "I",socket.inet_aton(IpStr) )[0]
def IpStr2HostInt(IpStr): return socket.ntohl( IpStr2NetInt(IpStr) ) def NetInt2IpStr(NetInt): #return HostInt2IpStr( socket.ntohl(NetInt) ) return socket.inet_ntoa( struct.pack('I',NetInt) ) def HostInt2IpStr(HostInt): return NetInt2IpStr( socket.htonl(HostInt) )
def main(): ip="127.0.0.1" print IpStr2NetInt(ip) print IpStr2HostInt(ip) print NetInt2IpStr( 16777343) print HostInt2IpStr( 2130706433) main()
#!/bin/bash #date:2015-01-29 #author:zhangtao #desc:stop the service by given service-name ###################################################### SERVICE_NAME=imgate WATCHDOG_NAME=startimgate.sh ####################################### function stopService() { PROCESS_NAME=$1 PIDS=`ps axu | grep $PROCESS_NAME | grep -v "grep" | awk '{print $2}'` PIDNUM=0 for PID in $PIDS do echo "IsKilled : $PROCESS_NAME : $PID" kill -9 $PID let PIDNUM=$PIDNUM+1 done if [ $PIDNUM == 0 ] ;then echo "NotFound : $PROCESS_NAME" else echo "FoundNum : $PROCESS_NAME : $PIDNUM" fi echo "-------" return} #################################### function printPrompt() { read -p ">>>:you must input 'yes' to continue:" choice [ "$choice" != "yes" ] && echo ">>>: not yes,so exit" && exit 0 echo ">>>:yes,continue " } ##################################### function main() { printPrompt #set -x echo " " PROCESS_ARRAY=($WATCHDOG_NAME $SERVICE_NAME) echo "length of process_array :${#PROCESS_ARRAY[@]}" echo "============" for PROCESS in ${PROCESS_ARRAY[@]}; do stopService $PROCESS done echo "---->finished" } main
测试机: amd 两核心 ,客户端与服务端都在本机上跑。
网络层:A类epoll线程处理accept的被动连接; B类epoll线程处理connect的主动连接; C类epoll线程把A或B类epoll线程得到的连接分配到某个线程,专门处理send和recv操作; D类线程池处理C类抛出来的数据,处理完之后,再投递到C类线程做send操作。
用ab测试短连接 ab -n 1000 -c 150 http://127.0.0.1:8090/ 测试20次之后,得到一个峰值tps=6000
用字节的写的基于数据echo测试的压力测试程序测试长连接: client发出64字节数据,echosvrd返回64字节,client又发出64字节。
每次传输网络传输数据在64字节以下的情况下: ./stree_client 127.0.0.1 8090 100 得到tps=13851 ./stree_client 127.0.0.1 8090 1000 得到tps=16641 ./stree_client 127.0.0.1 8090 10000 得到tps=16600 service cpu跑到90%,始终在90%以下;stree_client的cpu跑到32%左右; ./stree_client 127.0.0.1 8090 20000 stree_client的cpu跑到100%告终 ./stree_client 127.0.0.1 8090 15000 stree_client的cpu跑到100%告终
把日志的输出级别调到1,此时基本无日志输出: ./stree_client 127.0.0.1 8090 10000 得到tps=29436 service cpu跑到70%;stree_client的cpu跑到48%左右; ./stree_client 127.0.0.1 8090 15000 stree_client的cpu跑到100%告终
再把网络传输的字节调整为128字节左右: ./stree_client 127.0.0.1 8090 10000 得到tps=29179 service cpu跑到68%;stree_client的cpu跑到45%左右;
结论: 在本机测试,就忽略了网络io时延,得到网络引擎cpu跑到100%时的tps是 不超过30000,或者不偏离30000很远, 此时并发连接数在10000
算法随想 摘抄于《算法之道》省略若干字。 事实上,算法如人生,人生也如算法。它由许多过程所构成,又因这些过程而改变。虽然不能肯定结果正确,但求解过程却可以合理。透过算法的本质,我们可以看到芸芸众生,各色人相,多彩的生活。只要愿意,每个人都能寻找一个属于自己一生的独特“算法”,为自己的将来绘出一张或简单或复杂、或单调或反复、或直接或递归、或串行或并行、或随机或确定、或成功或失败(假如世界上存在所谓的成功和失败)的蓝图。
如果生活是一个难解的数学问题,那么,每个人都在寻找一个适合自己的算法。有人天生喜欢“遍历”,踏遍千山万水,遍享万种风情。扮演各种角色,希望人生丰富多彩;有人一生“贪婪”,眼界不宽,及时行乐;有人注定适用“穷搜”,辛辛苦苦勤勤恳恳一辈子,付出很多,收获有限;有人善用“时空权衡”,用最少的时间办最多的事情,的确精明;有人会“分治”,再多的难题也能迎刃而解;有人常“回溯”,错的太多,后悔太多;有的人压根没有算法,于是盲目生活,盲目做事,最后所获无几;有人“动态规划”,从而积小成多。
智者希望统筹兼顾,努力设计一个最优的人生。遇到一个大的工程,他们分而治之,治而合之。今天做一部分,明天做一部分,终究会做完。尽管有时效率不高,但总比一直放在那里,叹息发愁有意义。当一个问题的最优解,包含了子问题的最优解的时候,他们就选择动态规划,通过选择子问题的最优解,来构造源问题的最优解。
贪婪者希望运气长在,他们将每一次的选择缩小到一种贪心的选择。如果运气好,也许会成功;而在多数时候,却只会让人误入歧途。
智者千虑,必有一失。如果猛然发现自己做错了选择,不必恐慌,试试回溯吧,退回去,重新来过。在下一次的选择时,记得使用分支估界,从某些途径得到一些经验,来判断哪些路径不好。当然,我们也可以随机选择一些路径来实施,说不定能立竿见影。但总的来说,回溯不是万能的,人生的路,有时一旦走过,便无法返回。毕竟人生有限,频繁回溯,重做选择,就只有原地踏步的份了。分叉路口,做一个恰当的选择的确是困难的,在对与错、是与否、灵与肉、坚持与背叛、努力与放弃之间,我们需要细心。但一定要踏出下一步。至少,我们就可以知道它并不在这里。
有时候,生活中总会有那么一点无奈,无论你花多少力气,用多大的毅力,也不能产生效果。那是因为很多事情即使是可行的,也不一定是有意义的。所以聪明的人懂得用意念改变现实,用智慧区分可行和不可行,用信念辨别有意义和无意义。如果觉得生活太累,环节太多,试试跳转表吧,也许有些环节并不必须。如果觉得生活中某个步骤成本太高,试试平摊分析吧,也许我们别的步骤走得太过容易。如果时间珍贵,就试试完美哈希,直接将自己定位到目标上。不过记住,完美哈希并不完美,它的高昂代价也许你的灵魂承受不起。 智者用渐近分析获得问题的内在复杂性。当明了一个问题的计算时间下界,就可以评价解决该问题的各种算法的效率,进而确定对已有算法还有多少改进的余地。如果是P类问题,就奋力继续;如果遇到的是NPC,就找一个近似的最优解。就像大多数人都无法(不能或不愿)回避的找对象问题:每人的心中都有一个标杆(白马王子或白雪公主),想找到完全符合的并不容易,但也不能证明这个人不存在,这不就是NPC么?怎么解决?求近似解吧,在每一次的恋爱中逐步靠近那个标杆。
算法要求于至简。漫漫人生,相信每个人心中都有一个与生俱来的梦想,这是算法的灵魂——循环不变式。它或许会随着经历的不同而发生变化,但它在你心中的位置是不会变的。位置有远有近,到达的路途有平坦有崎岖,那又有什么关系?当你实现了最初的梦想,所有的循环终将定位在最美的一霎——那便是永恒不变的信念。
在人生的成长过程中,糊里糊涂的人过着糊里糊涂的生活,不知道自己离心中的那个位置是越来越近,还是渐行渐远。而聪明人算法庞大却精妙,因为他们常常反省自己,检查这个不变式,不断简化,每一步都坚定迈向至简,即便是 “劝君更尽一杯酒,西出阳关无故人” 的独自前行。
算法终将归于永恒。人生的算法林林总总。有人天生智慧、美丽,有人生来愚笨、丑陋。你并不需要为此庆幸、骄傲,或者伤心、气馁,甚至抱怨上帝的不公。这些是无用,也是不必要的。因为如果一个算法足够健壮,初始条件带来的影响几乎是微不足道的。我们感到疲惫不堪,精疲力竭和毫无乐趣的,通常并非繁重的人生,而是没有意义和希望的人生。因为“没有目标的算法无法向正确推进。”
不同的算法演绎不同的人生。它的影响不限于今世,而是达于另一个维度。世俗精彩过眼消散,万种风情终将逝去。意识到这点,我们突然发现,过去看重的很多活动,目标,追求,甚或其他问题,都突然显得微不足道和不值一提。我们会重新订立优先次序,将算法的目标锁定为永恒做准备,升华我们的灵魂。因为这个算法将是我们面对造物主的终极答卷。
一花一世界,一叶一菩提。算法,一段神奇的代码,演绎一段传奇的人生,锁住一段永恒的痕迹。理解算法、把握人生,让我们用算法的一生,刻印下我们一生的算法,在没有时间的星空循环往复,留下我们那 “莫愁前路无知己,天下谁人不识君!”的不灭印记。这就是求于至简,归于永恒的境界……
摘要: 因为要在服务内集成一个提供状态数据的http监控服务,而封装的一个select io#include <sys/types.h>#include <sys/socket.h>#include <netinet/in.h>#include <arpa/inet.h>#include <assert... 阅读全文
首先使用下面命令获取连接本服务到的ip列表。 netstat -anp|egrep 80|egrep "EST"|awk '{print $(NF-2)}' |cut -f 1 -d ":" 然后用python写一个脚本实现通过IP获取归属地。
|