Utils.py 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. '''
  2. Created on 2018年12月20日
  3. @author: User
  4. '''
  5. import numpy as np
  6. import re
  7. import gensim
  8. from keras import backend as K
  9. import os,sys
  10. import time
  11. import traceback
  12. from threading import RLock
  13. # from pai_tf_predict_proto import tf_predict_pb2
  14. import requests
  15. model_w2v = None
  16. lock_model_w2v = RLock()
  17. USE_PAI_EAS = False
  18. Lazy_load = False
  19. # API_URL = "http://192.168.2.103:8802"
  20. API_URL = "http://127.0.0.1:888"
  21. # USE_API = True
  22. USE_API = False
  23. def getCurrent_date(format="%Y-%m-%d %H:%M:%S"):
  24. _time = time.strftime(format,time.localtime())
  25. return _time
  26. def getw2vfilepath():
  27. filename = "wiki_128_word_embedding_new.vector"
  28. w2vfile = getFileFromSysPath(filename)
  29. if w2vfile is not None:
  30. return w2vfile
  31. return filename
  32. def getLazyLoad():
  33. global Lazy_load
  34. return Lazy_load
  35. def getFileFromSysPath(filename):
  36. for _path in sys.path:
  37. if os.path.isdir(_path):
  38. for _file in os.listdir(_path):
  39. _abspath = os.path.join(_path,_file)
  40. if os.path.isfile(_abspath):
  41. if _file==filename:
  42. return _abspath
  43. return None
  44. model_word_file = os.path.dirname(__file__)+"/../singlew2v_model.vector"
  45. model_word = None
  46. lock_model_word = RLock()
  47. from decimal import Decimal
  48. import logging
  49. logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  50. logger = logging.getLogger(__name__)
  51. import pickle
  52. import os
  53. import json
  54. #自定义jsonEncoder
  55. class MyEncoder(json.JSONEncoder):
  56. def __init__(self):
  57. import numpy as np
  58. global np
  59. def default(self, obj):
  60. if isinstance(obj, np.ndarray):
  61. return obj.tolist()
  62. elif isinstance(obj, bytes):
  63. return str(obj, encoding='utf-8')
  64. elif isinstance(obj, (np.float_, np.float16, np.float32,
  65. np.float64)):
  66. return float(obj)
  67. elif isinstance(obj,(np.int64,np.int32)):
  68. return int(obj)
  69. return json.JSONEncoder.default(self, obj)
  70. vocab_word = None
  71. vocab_words = None
  72. file_vocab_word = "vocab_word.pk"
  73. file_vocab_words = "vocab_words.pk"
  74. selffool_authorization = "NjlhMWFjMjVmNWYyNzI0MjY1OGQ1M2Y0ZmY4ZGY0Mzg3Yjc2MTVjYg=="
  75. selffool_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/selffool_gpu"
  76. selffool_seg_authorization = "OWUwM2Q0ZmE3YjYxNzU4YzFiMjliNGVkMTA3MzJkNjQ2MzJiYzBhZg=="
  77. selffool_seg_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/selffool_seg_gpu"
  78. codename_authorization = "Y2M5MDUxMzU1MTU4OGM3ZDk2ZmEzYjkxYmYyYzJiZmUyYTgwYTg5NA=="
  79. codename_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/codename_gpu"
  80. form_item_authorization = "ODdkZWY1YWY0NmNhNjU2OTI2NWY4YmUyM2ZlMDg1NTZjOWRkYTVjMw=="
  81. form_item_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/form"
  82. person_authorization = "N2I2MDU2N2Q2MGQ0ZWZlZGM3NDkyNTA1Nzc4YmM5OTlhY2MxZGU1Mw=="
  83. person_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/person"
  84. role_authorization = "OWM1ZDg5ZDEwYTEwYWI4OGNjYmRlMmQ1NzYwNWNlZGZkZmRmMjE4OQ=="
  85. role_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/role"
  86. money_authorization = "MDQyNjc2ZDczYjBhYmM4Yzc4ZGI4YjRmMjc3NGI5NTdlNzJiY2IwZA=="
  87. money_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/money"
  88. codeclasses_authorization = "MmUyNWIxZjQ2NjAzMWJlMGIzYzkxMjMzNWY5OWI3NzJlMWQ1ZjY4Yw=="
  89. codeclasses_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/codeclasses"
  90. def viterbi_decode(score, transition_params):
  91. """Decode the highest scoring sequence of tags outside of TensorFlow.
  92. This should only be used at test time.
  93. Args:
  94. score: A [seq_len, num_tags] matrix of unary potentials.
  95. transition_params: A [num_tags, num_tags] matrix of binary potentials.
  96. Returns:
  97. viterbi: A [seq_len] list of integers containing the highest scoring tag
  98. indices.
  99. viterbi_score: A float containing the score for the Viterbi sequence.
  100. """
  101. trellis = np.zeros_like(score)
  102. backpointers = np.zeros_like(score, dtype=np.int32)
  103. trellis[0] = score[0]
  104. for t in range(1, score.shape[0]):
  105. v = np.expand_dims(trellis[t - 1], 1) + transition_params
  106. trellis[t] = score[t] + np.max(v, 0)
  107. backpointers[t] = np.argmax(v, 0)
  108. viterbi = [np.argmax(trellis[-1])]
  109. for bp in reversed(backpointers[1:]):
  110. viterbi.append(bp[viterbi[-1]])
  111. viterbi.reverse()
  112. viterbi_score = np.max(trellis[-1])
  113. return viterbi, viterbi_score
  114. def limitRun(sess,list_output,feed_dict,MAX_BATCH=1024):
  115. len_sample = 0
  116. if len(feed_dict.keys())>0:
  117. len_sample = len(feed_dict[list(feed_dict.keys())[0]])
  118. if len_sample>MAX_BATCH:
  119. list_result = [[] for _ in range(len(list_output))]
  120. _begin = 0
  121. while(_begin<len_sample):
  122. new_dict = dict()
  123. for _key in feed_dict.keys():
  124. if isinstance(feed_dict[_key],(float,int,np.int32,np.float_,np.float16,np.float32,np.float64)):
  125. new_dict[_key] = feed_dict[_key]
  126. else:
  127. new_dict[_key] = feed_dict[_key][_begin:_begin+MAX_BATCH]
  128. _output = sess.run(list_output,feed_dict=new_dict)
  129. for _index in range(len(list_output)):
  130. list_result[_index].extend(_output[_index])
  131. _begin += MAX_BATCH
  132. else:
  133. list_result = sess.run(list_output,feed_dict=feed_dict)
  134. return list_result
  135. def get_values(response,output_name):
  136. """
  137. Get the value of a specified output tensor
  138. :param output_name: name of the output tensor
  139. :return: the content of the output tensor
  140. """
  141. output = response.outputs[output_name]
  142. if output.dtype == tf_predict_pb2.DT_FLOAT:
  143. _value = output.float_val
  144. elif output.dtype == tf_predict_pb2.DT_INT8 or output.dtype == tf_predict_pb2.DT_INT16 or \
  145. output.dtype == tf_predict_pb2.DT_INT32:
  146. _value = output.int_val
  147. elif output.dtype == tf_predict_pb2.DT_INT64:
  148. _value = output.int64_val
  149. elif output.dtype == tf_predict_pb2.DT_DOUBLE:
  150. _value = output.double_val
  151. elif output.dtype == tf_predict_pb2.DT_STRING:
  152. _value = output.string_val
  153. elif output.dtype == tf_predict_pb2.DT_BOOL:
  154. _value = output.bool_val
  155. return np.array(_value).reshape(response.outputs[output_name].array_shape.dim)
  156. def vpc_requests(url,authorization,request_data,list_outputs):
  157. headers = {"Authorization": authorization}
  158. dict_outputs = dict()
  159. response = tf_predict_pb2.PredictResponse()
  160. resp = requests.post(url, data=request_data, headers=headers)
  161. if resp.status_code != 200:
  162. print(resp.status_code,resp.content)
  163. log("调用pai-eas接口出错,authorization:"+str(authorization))
  164. return None
  165. else:
  166. response = tf_predict_pb2.PredictResponse()
  167. response.ParseFromString(resp.content)
  168. for _output in list_outputs:
  169. dict_outputs[_output] = get_values(response, _output)
  170. return dict_outputs
  171. def encodeInput(data,word_len,word_flag=True,userFool=False):
  172. result = []
  173. out_index = 0
  174. for item in data:
  175. if out_index in [0]:
  176. list_word = item[-word_len:]
  177. else:
  178. list_word = item[:word_len]
  179. temp = []
  180. if word_flag:
  181. for word in list_word:
  182. if userFool:
  183. temp.append(getIndexOfWord_fool(word))
  184. else:
  185. temp.append(getIndexOfWord(word))
  186. list_append = []
  187. temp_len = len(temp)
  188. while(temp_len<word_len):
  189. if userFool:
  190. list_append.append(0)
  191. else:
  192. list_append.append(getIndexOfWord("<pad>"))
  193. temp_len += 1
  194. if out_index in [0]:
  195. temp = list_append+temp
  196. else:
  197. temp = temp+list_append
  198. else:
  199. for words in list_word:
  200. temp.append(getIndexOfWords(words))
  201. list_append = []
  202. temp_len = len(temp)
  203. while(temp_len<word_len):
  204. list_append.append(getIndexOfWords("<pad>"))
  205. temp_len += 1
  206. if out_index in [0,1]:
  207. temp = list_append+temp
  208. else:
  209. temp = temp+list_append
  210. result.append(temp)
  211. out_index += 1
  212. return result
  213. def encodeInput_form(input,MAX_LEN=30):
  214. x = np.zeros([MAX_LEN])
  215. for i in range(len(input)):
  216. if i>=MAX_LEN:
  217. break
  218. x[i] = getIndexOfWord(input[i])
  219. return x
  220. def getVocabAndMatrix(model,Embedding_size = 60):
  221. '''
  222. @summary:获取子向量的词典和子向量矩阵
  223. '''
  224. vocab = ["<pad>"]+model.index2word
  225. embedding_matrix = np.zeros((len(vocab),Embedding_size))
  226. for i in range(1,len(vocab)):
  227. embedding_matrix[i] = model[vocab[i]]
  228. return vocab,embedding_matrix
  229. def getIndexOfWord(word):
  230. global vocab_word,file_vocab_word
  231. if vocab_word is None:
  232. if os.path.exists(file_vocab_word):
  233. vocab = load(file_vocab_word)
  234. vocab_word = dict((w, i) for i, w in enumerate(np.array(vocab)))
  235. else:
  236. model = getModel_word()
  237. vocab,_ = getVocabAndMatrix(model, Embedding_size=60)
  238. vocab_word = dict((w, i) for i, w in enumerate(np.array(vocab)))
  239. save(vocab,file_vocab_word)
  240. if word in vocab_word.keys():
  241. return vocab_word[word]
  242. else:
  243. return vocab_word['<pad>']
  244. def changeIndexFromWordToWords(tokens,word_index):
  245. '''
  246. @summary:转换某个字的字偏移为词偏移
  247. '''
  248. before_index = 0
  249. after_index = 0
  250. for i in range(len(tokens)):
  251. after_index = after_index+len(tokens[i])
  252. if before_index<=word_index and after_index>word_index:
  253. return i
  254. before_index = after_index
  255. return i+1
  256. def getIndexOfWords(words):
  257. global vocab_words,file_vocab_words
  258. if vocab_words is None:
  259. if os.path.exists(file_vocab_words):
  260. vocab = load(file_vocab_words)
  261. vocab_words = dict((w, i) for i, w in enumerate(np.array(vocab)))
  262. else:
  263. model = getModel_w2v()
  264. vocab,_ = getVocabAndMatrix(model, Embedding_size=128)
  265. vocab_words = dict((w, i) for i, w in enumerate(np.array(vocab)))
  266. save(vocab,file_vocab_words)
  267. if words in vocab_words.keys():
  268. return vocab_words[words]
  269. else:
  270. return vocab_words["<pad>"]
  271. def log(msg):
  272. '''
  273. @summary:打印信息
  274. '''
  275. logger.info(msg)
  276. def debug(msg):
  277. '''
  278. @summary:打印信息
  279. '''
  280. logger.debug(msg)
  281. def save(object_to_save, path):
  282. '''
  283. 保存对象
  284. @Arugs:
  285. object_to_save: 需要保存的对象
  286. @Return:
  287. 保存的路径
  288. '''
  289. with open(path, 'wb') as f:
  290. pickle.dump(object_to_save, f)
  291. def load(path):
  292. '''
  293. 读取对象
  294. @Arugs:
  295. path: 读取的路径
  296. @Return:
  297. 读取的对象
  298. '''
  299. with open(path, 'rb') as f:
  300. object1 = pickle.load(f)
  301. return object1
  302. fool_char_to_id = load(os.path.dirname(__file__)+"/fool_char_to_id.pk")
  303. def getIndexOfWord_fool(word):
  304. if word in fool_char_to_id.keys():
  305. return fool_char_to_id[word]
  306. else:
  307. return fool_char_to_id["[UNK]"]
  308. def find_index(list_tofind,text):
  309. '''
  310. @summary: 查找所有词汇在字符串中第一次出现的位置
  311. @param:
  312. list_tofind:待查找词汇
  313. text:字符串
  314. @return: list,每个词汇第一次出现的位置
  315. '''
  316. result = []
  317. for item in list_tofind:
  318. index = text.find(item)
  319. if index>=0:
  320. result.append(index)
  321. else:
  322. result.append(-1)
  323. return result
  324. def combine(list1,list2):
  325. '''
  326. @summary:将两个list中的字符串两两拼接
  327. @param:
  328. list1:字符串list
  329. list2:字符串list
  330. @return:拼接结果list
  331. '''
  332. result = []
  333. for item1 in list1:
  334. for item2 in list2:
  335. result.append(str(item1)+str(item2))
  336. return result
  337. def getDigitsDic(unit):
  338. '''
  339. @summary:拿到中文对应的数字
  340. '''
  341. DigitsDic = {"零":0, "壹":1, "贰":2, "叁":3, "肆":4, "伍":5, "陆":6, "柒":7, "捌":8, "玖":9,
  342. "〇":0, "一":1, "二":2, "三":3, "四":4, "五":5, "六":6, "七":7, "八":8, "九":9}
  343. return DigitsDic.get(unit)
  344. def getMultipleFactor(unit):
  345. '''
  346. @summary:拿到单位对应的值
  347. '''
  348. MultipleFactor = {"兆":Decimal(1000000000000),"亿":Decimal(100000000),"万":Decimal(10000),"仟":Decimal(1000),"千":Decimal(1000),"佰":Decimal(100),"百":Decimal(100),"拾":Decimal(10),"十":Decimal(10),"元":Decimal(1),"圆":Decimal(1),"角":round(Decimal(0.1),1),"分":round(Decimal(0.01),2)}
  349. return MultipleFactor.get(unit)
  350. def getUnifyMoney(money):
  351. '''
  352. @summary:将中文金额字符串转换为数字金额
  353. @param:
  354. money:中文金额字符串
  355. @return: decimal,数据金额
  356. '''
  357. MAX_MONEY = 1000000000000
  358. MAX_NUM = 12
  359. #去掉逗号
  360. money = re.sub("[,,]","",money)
  361. money = re.sub("[^0-9.零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]","",money)
  362. result = Decimal(0)
  363. chnDigits = ["零", "壹", "贰", "叁", "肆", "伍", "陆", "柒", "捌", "玖"]
  364. # chnFactorUnits = ["兆", "亿", "万", "仟", "佰", "拾","圆","元","角","分"]
  365. chnFactorUnits = ["圆", "元","兆", "亿", "万", "仟", "佰", "拾", "角", "分", '十', '百', '千']
  366. LowMoneypattern = re.compile("^[\d,]+(\.\d+)?$")
  367. BigMoneypattern = re.compile("^零?(?P<BigMoney>[%s])$"%("".join(chnDigits)))
  368. try:
  369. if re.search(LowMoneypattern,money) is not None:
  370. return Decimal(money)
  371. elif re.search(BigMoneypattern,money) is not None:
  372. return getDigitsDic(re.search(BigMoneypattern,money).group("BigMoney"))
  373. for factorUnit in chnFactorUnits:
  374. if re.search(re.compile(".*%s.*"%(factorUnit)),money) is not None:
  375. subMoneys = re.split(re.compile("%s(?!.*%s.*)"%(factorUnit,factorUnit)),money)
  376. if re.search(re.compile("^(\d+)(\.\d+)?$"),subMoneys[0]) is not None:
  377. if MAX_MONEY/getMultipleFactor(factorUnit)<Decimal(subMoneys[0]):
  378. return Decimal(0)
  379. result += Decimal(subMoneys[0])*(getMultipleFactor(factorUnit))
  380. elif len(subMoneys[0])==1:
  381. if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[0]) is not None:
  382. result += Decimal(getDigitsDic(subMoneys[0]))*(getMultipleFactor(factorUnit))
  383. # subMoneys[0]中无金额单位,不可再拆分
  384. elif subMoneys[0]=="":
  385. result += 0
  386. elif re.search(re.compile("[%s]"%("".join(chnFactorUnits))),subMoneys[0]) is None:
  387. # print(subMoneys)
  388. # subMoneys[0] = subMoneys[0][0]
  389. result += Decimal(getUnifyMoney(subMoneys[0])) * (getMultipleFactor(factorUnit))
  390. else:
  391. result += Decimal(getUnifyMoney(subMoneys[0]))*(getMultipleFactor(factorUnit))
  392. if len(subMoneys)>1:
  393. if re.search(re.compile("^(\d+(,)?)+(\.\d+)?[百千万亿]?\s?(元)?$"),subMoneys[1]) is not None:
  394. result += Decimal(subMoneys[1])
  395. elif len(subMoneys[1])==1:
  396. if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[1]) is not None:
  397. result += Decimal(getDigitsDic(subMoneys[1]))
  398. else:
  399. result += Decimal(getUnifyMoney(subMoneys[1]))
  400. break
  401. except Exception as e:
  402. # traceback.print_exc()
  403. return Decimal(0)
  404. return result
  405. def getModel_w2v():
  406. '''
  407. @summary:加载词向量
  408. '''
  409. global model_w2v,lock_model_w2v
  410. with lock_model_w2v:
  411. if model_w2v is None:
  412. model_w2v = gensim.models.KeyedVectors.load_word2vec_format(getw2vfilepath(),binary=True)
  413. return model_w2v
  414. def getModel_word():
  415. '''
  416. @summary:加载字向量
  417. '''
  418. global model_word,lock_model_w2v
  419. with lock_model_word:
  420. if model_word is None:
  421. model_word = gensim.models.KeyedVectors.load_word2vec_format(model_word_file,binary=True)
  422. return model_word
  423. # getModel_w2v()
  424. # getModel_word()
  425. def findAllIndex(substr,wholestr):
  426. '''
  427. @summary: 找到字符串的子串的所有begin_index
  428. @param:
  429. substr:子字符串
  430. wholestr:子串所在完整字符串
  431. @return: list,字符串的子串的所有begin_index
  432. '''
  433. copystr = wholestr
  434. result = []
  435. indexappend = 0
  436. while(True):
  437. index = copystr.find(substr)
  438. if index<0:
  439. break
  440. else:
  441. result.append(indexappend+index)
  442. indexappend += index+len(substr)
  443. copystr = copystr[index+len(substr):]
  444. return result
  445. def spanWindow(tokens,begin_index,end_index,size,center_include=False,word_flag = False,use_text = False,text = None):
  446. '''
  447. @summary:取得某个实体的上下文词汇
  448. @param:
  449. tokens:句子分词list
  450. begin_index:实体的开始index
  451. end_index:实体的结束index
  452. size:左右两边各取多少个词
  453. center_include:是否包含实体
  454. word_flag:词/字,默认是词
  455. @return: list,实体的上下文词汇
  456. '''
  457. if use_text:
  458. assert text is not None
  459. length_tokens = len(tokens)
  460. if begin_index>size:
  461. begin = begin_index-size
  462. else:
  463. begin = 0
  464. if end_index+size<length_tokens:
  465. end = end_index+size+1
  466. else:
  467. end = length_tokens
  468. result = []
  469. if not word_flag:
  470. result.append(tokens[begin:begin_index])
  471. if center_include:
  472. if use_text:
  473. result.append(text)
  474. else:
  475. result.append(tokens[begin_index:end_index+1])
  476. result.append(tokens[end_index+1:end])
  477. else:
  478. result.append("".join(tokens[begin:begin_index]))
  479. if center_include:
  480. if use_text:
  481. result.append(text)
  482. else:
  483. result.append("".join(tokens[begin_index:end_index+1]))
  484. result.append("".join(tokens[end_index+1:end]))
  485. #print(result)
  486. return result
  487. def get_context(sentence_text, begin_index, end_index, size=20, center_include=False):
  488. '''
  489. 返回实体上下文信息
  490. :param sentence_text: 句子文本
  491. :param begin_index: 实体字开始位置
  492. :param end_index: 实体字结束位置
  493. :param size: 字偏移量
  494. :param center_include:
  495. :return:
  496. '''
  497. result = []
  498. begin = begin_index - size if begin_index>size else 0
  499. end = end_index + size
  500. result.append(sentence_text[begin: begin_index])
  501. if center_include:
  502. result.append(sentence_text[begin_index: end_index])
  503. result.append(sentence_text[end_index: end])
  504. return result
  505. #根据规则补全编号或名称两边的符号
  506. def fitDataByRule(data):
  507. symbol_dict = {"(":")",
  508. "(":")",
  509. "[":"]",
  510. "【":"】",
  511. ")":"(",
  512. ")":"(",
  513. "]":"[",
  514. "】":"【"}
  515. leftSymbol_pattern = re.compile("[\((\[【]")
  516. rightSymbol_pattern = re.compile("[\))\]】]")
  517. leftfinds = re.findall(leftSymbol_pattern,data)
  518. rightfinds = re.findall(rightSymbol_pattern,data)
  519. result = data
  520. if len(leftfinds)+len(rightfinds)==0:
  521. return data
  522. elif len(leftfinds)==len(rightfinds):
  523. return data
  524. elif abs(len(leftfinds)-len(rightfinds))==1:
  525. if len(leftfinds)>len(rightfinds):
  526. if symbol_dict.get(data[0]) is not None:
  527. result = data[1:]
  528. else:
  529. #print(symbol_dict.get(leftfinds[0]))
  530. result = data+symbol_dict.get(leftfinds[0])
  531. else:
  532. if symbol_dict.get(data[-1]) is not None:
  533. result = data[:-1]
  534. else:
  535. result = symbol_dict.get(rightfinds[0])+data
  536. result = re.sub("[。]","",result)
  537. return result
  538. from datetime import date
  539. # 时间合法性判断
  540. def isValidDate(year, month, day):
  541. try:
  542. date(year, month, day)
  543. except:
  544. return False
  545. else:
  546. return True
  547. time_format_pattern = re.compile("((?P<year>20\d{2}|\d{2}|二[零〇0][零〇一二三四五六七八九0]{2})\s*[-/年.]\s*(?P<month>\d{1,2}|[一二三四五六七八九十]{1,3})\s*[-/月.]\s*(?P<day>\d{1,2}|[一二三四五六七八九十]{1,3}))")
  548. from BiddingKG.dl.ratio.re_ratio import getUnifyNum
  549. def timeFormat(_time):
  550. current_year = time.strftime("%Y",time.localtime())
  551. all_match = re.finditer(time_format_pattern,_time)
  552. for _match in all_match:
  553. if len(_match.group())>0:
  554. legal = True
  555. year = ""
  556. month = ""
  557. day = ""
  558. for k,v in _match.groupdict().items():
  559. if k=="year":
  560. year = v
  561. if k=="month":
  562. month = v
  563. if k=="day":
  564. day = v
  565. if year!="":
  566. if re.search("^\d+$",year):
  567. if len(year)==2:
  568. year = "20"+year
  569. if int(year)-int(current_year)>10:
  570. legal = False
  571. else:
  572. _year = ""
  573. for word in year:
  574. if word == '0':
  575. _year += word
  576. else:
  577. _year += str(getDigitsDic(word))
  578. year = _year
  579. else:
  580. legal = False
  581. if month!="":
  582. if re.search("^\d+$", month):
  583. if int(month)>12:
  584. legal = False
  585. else:
  586. month = int(getUnifyNum(month))
  587. if month>=1 and month<=12:
  588. month = str(month)
  589. else:
  590. legal = False
  591. else:
  592. legal = False
  593. if day!="":
  594. if re.search("^\d+$", day):
  595. if int(day)>31:
  596. legal = False
  597. else:
  598. day = int(getUnifyNum(day))
  599. if day >= 1 and day <= 31:
  600. day = str(day)
  601. else:
  602. legal = False
  603. else:
  604. legal = False
  605. # print(year,month,day)
  606. if not isValidDate(int(year),int(month),int(day)):
  607. legal = False
  608. if legal:
  609. return "%s-%s-%s"%(year,month.rjust(2,"0"),day.rjust(2,"0"))
  610. return ""
  611. def embedding(datas,shape):
  612. '''
  613. @summary:查找词汇对应的词向量
  614. @param:
  615. datas:词汇的list
  616. shape:结果的shape
  617. @return: array,返回对应shape的词嵌入
  618. '''
  619. model_w2v = getModel_w2v()
  620. embed = np.zeros(shape)
  621. length = shape[1]
  622. out_index = 0
  623. #print(datas)
  624. for data in datas:
  625. index = 0
  626. for item in data:
  627. item_not_space = re.sub("\s*","",item)
  628. if index>=length:
  629. break
  630. if item_not_space in model_w2v.vocab:
  631. embed[out_index][index] = model_w2v[item_not_space]
  632. index += 1
  633. else:
  634. #embed[out_index][index] = model_w2v['unk']
  635. index += 1
  636. out_index += 1
  637. return embed
  638. def embedding_word(datas,shape):
  639. '''
  640. @summary:查找词汇对应的词向量
  641. @param:
  642. datas:词汇的list
  643. shape:结果的shape
  644. @return: array,返回对应shape的词嵌入
  645. '''
  646. model_w2v = getModel_word()
  647. embed = np.zeros(shape)
  648. length = shape[1]
  649. out_index = 0
  650. #print(datas)
  651. for data in datas:
  652. index = 0
  653. for item in str(data)[-shape[1]:]:
  654. if index>=length:
  655. break
  656. if item in model_w2v.vocab:
  657. embed[out_index][index] = model_w2v[item]
  658. index += 1
  659. else:
  660. # embed[out_index][index] = model_w2v['unk']
  661. index += 1
  662. out_index += 1
  663. return embed
  664. def embedding_word_forward(datas,shape):
  665. '''
  666. @summary:查找词汇对应的词向量
  667. @param:
  668. datas:词汇的list
  669. shape:结果的shape
  670. @return: array,返回对应shape的词嵌入
  671. '''
  672. model_w2v = getModel_word()
  673. embed = np.zeros(shape)
  674. length = shape[1]
  675. out_index = 0
  676. #print(datas)
  677. for data in datas:
  678. index = 0
  679. for item in str(data)[:shape[1]]:
  680. if index>=length:
  681. break
  682. if item in model_w2v.vocab:
  683. embed[out_index][index] = model_w2v[item]
  684. index += 1
  685. else:
  686. # embed[out_index][index] = model_w2v['unk']
  687. index += 1
  688. out_index += 1
  689. return embed
  690. def formEncoding(text,shape=(100,60),expand=False):
  691. embedding = np.zeros(shape)
  692. word_model = getModel_word()
  693. for i in range(len(text)):
  694. if i>=shape[0]:
  695. break
  696. if text[i] in word_model.vocab:
  697. embedding[i] = word_model[text[i]]
  698. if expand:
  699. embedding = np.expand_dims(embedding,0)
  700. return embedding
  701. def partMoney(entity_text,input2_shape = [7]):
  702. '''
  703. @summary:对金额分段
  704. @param:
  705. entity_text:数值金额
  706. input2_shape:分类数
  707. @return: array,分段之后的独热编码
  708. '''
  709. money = float(entity_text)
  710. parts = np.zeros(input2_shape)
  711. if money<100:
  712. parts[0] = 1
  713. elif money<1000:
  714. parts[1] = 1
  715. elif money<10000:
  716. parts[2] = 1
  717. elif money<100000:
  718. parts[3] = 1
  719. elif money<1000000:
  720. parts[4] = 1
  721. elif money<10000000:
  722. parts[5] = 1
  723. else:
  724. parts[6] = 1
  725. return parts
  726. def uniform_num(num):
  727. d1 = {'一': '1', '二': '2', '三': '3', '四': '4', '五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '十': '10'}
  728. # d2 = {'A': '1', 'B': '2', 'C': '3', 'D': '4', 'E': '5', 'F': '6', 'G': '7', 'H': '8', 'I': '9', 'J': '10'}
  729. d3 = {'Ⅰ': '1', 'Ⅱ': '2', 'Ⅲ': '3', 'Ⅳ': '4', 'Ⅴ': '5', 'Ⅵ': '6', 'Ⅶ': '7'}
  730. if num.isdigit():
  731. if re.search('^0[\d]$', num):
  732. num = num[1:]
  733. return num
  734. elif re.search('^[一二三四五六七八九十]+$', num):
  735. _digit = re.search('^[一二三四五六七八九十]+$', num).group(0)
  736. if len(_digit) == 1:
  737. num = d1[_digit]
  738. elif len(_digit) == 2 and _digit[0] == '十':
  739. num = '1'+ d1[_digit[1]]
  740. elif len(_digit) == 2 and _digit[1] == '十':
  741. num = d1[_digit[0]] + '0'
  742. elif len(_digit) == 3 and _digit[1] == '十':
  743. num = d1[_digit[0]] + d1[_digit[2]]
  744. elif re.search('[ⅠⅡⅢⅣⅤⅥⅦ]', num):
  745. num = re.search('[ⅠⅡⅢⅣⅤⅥⅦ]', num).group(0)
  746. num = d3[num]
  747. return num
  748. def uniform_package_name(package_name):
  749. '''
  750. 统一规范化包号。数值类型统一为阿拉伯数字,字母统一为大写,包含施工监理等抽到前面, 例 A包监理一标段 统一为 监理A1 ; 包Ⅱ 统一为 2
  751. :param package_name: 字符串类型 包号
  752. :return:
  753. '''
  754. package_name_raw = package_name
  755. package_name = re.sub('pdf|doc|docs|xlsx|rar|\d{4}年', ' ', package_name)
  756. package_name = package_name.replace('标段(包)', '标段').replace('№', '')
  757. package_name = re.sub('\[|【', '', package_name)
  758. kw = re.search('(施工|监理|监测|勘察|设计|劳务)', package_name)
  759. name = ""
  760. if kw:
  761. name += kw.group(0)
  762. if re.search('^[a-zA-Z0-9-]{5,}$', package_name): # 五个字符以上编号
  763. _digit = re.search('^[a-zA-Z0-9-]{5,}$', package_name).group(0).upper()
  764. # print('规范化包号1', _digit)
  765. name += _digit
  766. elif re.search('(?P<eng>[a-zA-Z])包[:)]?第?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))标段?', package_name): # 处理类似 A包2标段
  767. ser = re.search('(?P<eng>[a-zA-Z])包[:)]?第?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))标段?', package_name)
  768. # print('规范化包号2', ser.group(0))
  769. _char = ser.groupdict().get('eng')
  770. if _char:
  771. _char = _char.upper()
  772. _digit = ser.groupdict().get('num')
  773. _digit = uniform_num(_digit)
  774. name += _char.upper() + _digit
  775. elif re.search('第?(?P<eng>[0-9a-zA-Z-]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))(标[段号的包项]?|合同[包段]|([分子]?[包标]))', package_name): # 处理类似 A包2标段
  776. ser = re.search('第?(?P<eng>[0-9a-zA-Z-]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))(标[段号的包项]?|合同[包段]|([分子]?[包标]))', package_name)
  777. # print('规范化包号3', ser.group(0))
  778. _char = ser.groupdict().get('eng')
  779. if _char:
  780. _char = _char.upper()
  781. _digit = ser.groupdict().get('num')
  782. _digit = uniform_num(_digit)
  783. if _char:
  784. name += _char.upper()
  785. name += _digit
  786. elif re.search('(标[段号的包项]?|项目|子项目?|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[0-9a-zA-Z-]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))', package_name): # 数字的统一的阿拉伯数字
  787. ser = re.search('(标[段号的包项]?|项目|子项目?|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[0-9a-zA-Z-]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))',package_name)
  788. # print('规范化包号4', ser.group(0))
  789. _char = ser.groupdict().get('eng')
  790. if _char:
  791. _char = _char.upper()
  792. _digit = ser.groupdict().get('num')
  793. _digit = uniform_num(_digit)
  794. if _char:
  795. name += _char.upper()
  796. name += _digit
  797. elif re.search('(标[段号的包项]|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[a-zA-Z-]{1,5})', package_name): # 数字的统一的阿拉伯数字
  798. _digit = re.search('(标[段号的包项]|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[a-zA-Z-]{1,5})', package_name).group('eng').upper()
  799. # print('规范化包号5', _digit)
  800. name += _digit
  801. elif re.search('(?P<eng>[a-zA-Z]{1,4})(标[段号的包项]|([分子]?[包标]|包[组件号]))', package_name): # 数字的统一的阿拉伯数字
  802. _digit = re.search('(?P<eng>[a-zA-Z]{1,4})(标[段号的包项]|([分子]?[包标]|包[组件号]))', package_name).group('eng').upper()
  803. # print('规范化包号6', _digit)
  804. name += _digit
  805. elif re.search('^([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4})$', package_name): # 数字的统一的阿拉伯数字
  806. _digit = re.search('^([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4})$', package_name).group(0)
  807. # print('规范化包号7', _digit)
  808. _digit = uniform_num(_digit)
  809. name += _digit
  810. elif re.search('^[a-zA-Z0-9-]+$', package_name):
  811. _char = re.search('^[a-zA-Z0-9-]+$', package_name).group(0)
  812. # print('规范化包号8', _char)
  813. name += _char.upper()
  814. if name == "":
  815. return package_name_raw
  816. else:
  817. if name.isdigit():
  818. name = str(int(name))
  819. # print('原始包号:%s, 处理后:%s'%(package_name, name))
  820. return name
  821. def money_process(money_text, header):
  822. '''
  823. 输入金额文本及金额列表头,返回统一数字化金额及金额单位
  824. :param money_text:金额字符串
  825. :param header:金额列表头,用于提取单位
  826. :return:
  827. '''
  828. money = 0
  829. money_unit = ""
  830. # re_price = re.search("[零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]{3,}|\d[\d,]*(?:\.\d+)?[((]?万?", money_text)
  831. re_price = re.search("[零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]{3,}|\d{1,3}([,,]\d{3})+(\.\d+)?|\d+(\.\d+)?[((]?万?", money_text)
  832. if re_price:
  833. money_text = re_price.group(0)
  834. if re.search('万元|[((]万[))]', header) and '万' not in money_text: # 修复37797825 控制价(万)
  835. money_text += '万元'
  836. # money = float(getUnifyMoney(money_text))
  837. money = float(getUnifyMoney(money_text))
  838. if money > 10000000000000: # 大于万亿的去除
  839. money = 0
  840. money_unit = '万元' if '万' in money_text else '元'
  841. return (money, money_unit)
  842. def recall(y_true, y_pred):
  843. '''
  844. 计算召回率
  845. @Argus:
  846. y_true: 正确的标签
  847. y_pred: 模型预测的标签
  848. @Return
  849. 召回率
  850. '''
  851. c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  852. c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
  853. if c3 == 0:
  854. return 0
  855. recall = c1 / c3
  856. return recall
  857. def f1_score(y_true, y_pred):
  858. '''
  859. 计算F1
  860. @Argus:
  861. y_true: 正确的标签
  862. y_pred: 模型预测的标签
  863. @Return
  864. F1值
  865. '''
  866. c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  867. c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
  868. c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
  869. precision = c1 / c2
  870. if c3 == 0:
  871. recall = 0
  872. else:
  873. recall = c1 / c3
  874. f1_score = 2 * (precision * recall) / (precision + recall)
  875. return f1_score
  876. def precision(y_true, y_pred):
  877. '''
  878. 计算精确率
  879. @Argus:
  880. y_true: 正确的标签
  881. y_pred: 模型预测的标签
  882. @Return
  883. 精确率
  884. '''
  885. c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  886. c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
  887. precision = c1 / c2
  888. return precision
  889. # def print_metrics(history):
  890. # '''
  891. # 制作每次迭代的各metrics变化图片
  892. #
  893. # @Arugs:
  894. # history: 模型训练迭代的历史记录
  895. # '''
  896. # import matplotlib.pyplot as plt
  897. #
  898. # # loss图
  899. # loss = history.history['loss']
  900. # val_loss = history.history['val_loss']
  901. # epochs = range(1, len(loss) + 1)
  902. # plt.subplot(2, 2, 1)
  903. # plt.plot(epochs, loss, 'bo', label='Training loss')
  904. # plt.plot(epochs, val_loss, 'b', label='Validation loss')
  905. # plt.title('Training and validation loss')
  906. # plt.xlabel('Epochs')
  907. # plt.ylabel('Loss')
  908. # plt.legend()
  909. #
  910. # # f1图
  911. # f1 = history.history['f1_score']
  912. # val_f1 = history.history['val_f1_score']
  913. # plt.subplot(2, 2, 2)
  914. # plt.plot(epochs, f1, 'bo', label='Training f1')
  915. # plt.plot(epochs, val_f1, 'b', label='Validation f1')
  916. # plt.title('Training and validation f1')
  917. # plt.xlabel('Epochs')
  918. # plt.ylabel('F1')
  919. # plt.legend()
  920. #
  921. # # precision图
  922. # prec = history.history['precision']
  923. # val_prec = history.history['val_precision']
  924. # plt.subplot(2, 2, 3)
  925. # plt.plot(epochs, prec, 'bo', label='Training precision')
  926. # plt.plot(epochs, val_prec, 'b', label='Validation pecision')
  927. # plt.title('Training and validation precision')
  928. # plt.xlabel('Epochs')
  929. # plt.ylabel('Precision')
  930. # plt.legend()
  931. #
  932. # # recall图
  933. # recall = history.history['recall']
  934. # val_recall = history.history['val_recall']
  935. # plt.subplot(2, 2, 4)
  936. # plt.plot(epochs, recall, 'bo', label='Training recall')
  937. # plt.plot(epochs, val_recall, 'b', label='Validation recall')
  938. # plt.title('Training and validation recall')
  939. # plt.xlabel('Epochs')
  940. # plt.ylabel('Recall')
  941. # plt.legend()
  942. #
  943. # plt.show()
  944. if __name__=="__main__":
  945. # print(fool_char_to_id[">"])
  946. print(getUnifyMoney('伍仟贰佰零壹拾伍万零捌佰壹拾元陆角伍分'))
  947. # model = getModel_w2v()
  948. # vocab,matrix = getVocabAndMatrix(model, Embedding_size=128)
  949. # save([vocab,matrix],"vocabMatrix_words.pk")