Utils.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. '''
  2. Created on 2018年12月20日
  3. @author: User
  4. '''
  5. import numpy as np
  6. import re
  7. import gensim
  8. from keras import backend as K
  9. import os,sys
  10. import time
  11. import traceback
  12. from threading import RLock
  13. # from pai_tf_predict_proto import tf_predict_pb2
  14. import requests
  15. model_w2v = None
  16. lock_model_w2v = RLock()
  17. USE_PAI_EAS = False
  18. Lazy_load = False
  19. # API_URL = "http://192.168.2.103:8802"
  20. API_URL = "http://127.0.0.1:888"
  21. # USE_API = True
  22. USE_API = False
  23. def getCurrent_date(format="%Y-%m-%d %H:%M:%S"):
  24. _time = time.strftime(format,time.localtime())
  25. return _time
  26. def getw2vfilepath():
  27. filename = "wiki_128_word_embedding_new.vector"
  28. w2vfile = getFileFromSysPath(filename)
  29. if w2vfile is not None:
  30. return w2vfile
  31. return filename
  32. def getLazyLoad():
  33. global Lazy_load
  34. return Lazy_load
  35. def getFileFromSysPath(filename):
  36. for _path in sys.path:
  37. if os.path.isdir(_path):
  38. for _file in os.listdir(_path):
  39. _abspath = os.path.join(_path,_file)
  40. if os.path.isfile(_abspath):
  41. if _file==filename:
  42. return _abspath
  43. return None
  44. model_word_file = os.path.dirname(__file__)+"/../singlew2v_model.vector"
  45. model_word = None
  46. lock_model_word = RLock()
  47. from decimal import Decimal
  48. import logging
  49. logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  50. logger = logging.getLogger(__name__)
  51. import pickle
  52. import os
  53. import json
  54. #自定义jsonEncoder
  55. class MyEncoder(json.JSONEncoder):
  56. def __init__(self):
  57. import numpy as np
  58. global np
  59. def default(self, obj):
  60. if isinstance(obj, np.ndarray):
  61. return obj.tolist()
  62. elif isinstance(obj, bytes):
  63. return str(obj, encoding='utf-8')
  64. elif isinstance(obj, (np.float_, np.float16, np.float32,
  65. np.float64)):
  66. return float(obj)
  67. elif isinstance(obj,(np.int64,np.int32)):
  68. return int(obj)
  69. return json.JSONEncoder.default(self, obj)
  70. vocab_word = None
  71. vocab_words = None
  72. file_vocab_word = "vocab_word.pk"
  73. file_vocab_words = "vocab_words.pk"
  74. selffool_authorization = "NjlhMWFjMjVmNWYyNzI0MjY1OGQ1M2Y0ZmY4ZGY0Mzg3Yjc2MTVjYg=="
  75. selffool_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/selffool_gpu"
  76. selffool_seg_authorization = "OWUwM2Q0ZmE3YjYxNzU4YzFiMjliNGVkMTA3MzJkNjQ2MzJiYzBhZg=="
  77. selffool_seg_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/selffool_seg_gpu"
  78. codename_authorization = "Y2M5MDUxMzU1MTU4OGM3ZDk2ZmEzYjkxYmYyYzJiZmUyYTgwYTg5NA=="
  79. codename_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/codename_gpu"
  80. form_item_authorization = "ODdkZWY1YWY0NmNhNjU2OTI2NWY4YmUyM2ZlMDg1NTZjOWRkYTVjMw=="
  81. form_item_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/form"
  82. person_authorization = "N2I2MDU2N2Q2MGQ0ZWZlZGM3NDkyNTA1Nzc4YmM5OTlhY2MxZGU1Mw=="
  83. person_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/person"
  84. role_authorization = "OWM1ZDg5ZDEwYTEwYWI4OGNjYmRlMmQ1NzYwNWNlZGZkZmRmMjE4OQ=="
  85. role_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/role"
  86. money_authorization = "MDQyNjc2ZDczYjBhYmM4Yzc4ZGI4YjRmMjc3NGI5NTdlNzJiY2IwZA=="
  87. money_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/money"
  88. codeclasses_authorization = "MmUyNWIxZjQ2NjAzMWJlMGIzYzkxMjMzNWY5OWI3NzJlMWQ1ZjY4Yw=="
  89. codeclasses_url = "http://pai-eas-vpc.cn-beijing.aliyuncs.com/api/predict/codeclasses"
  90. def viterbi_decode(score, transition_params):
  91. """Decode the highest scoring sequence of tags outside of TensorFlow.
  92. This should only be used at test time.
  93. Args:
  94. score: A [seq_len, num_tags] matrix of unary potentials.
  95. transition_params: A [num_tags, num_tags] matrix of binary potentials.
  96. Returns:
  97. viterbi: A [seq_len] list of integers containing the highest scoring tag
  98. indices.
  99. viterbi_score: A float containing the score for the Viterbi sequence.
  100. """
  101. trellis = np.zeros_like(score)
  102. backpointers = np.zeros_like(score, dtype=np.int32)
  103. trellis[0] = score[0]
  104. for t in range(1, score.shape[0]):
  105. v = np.expand_dims(trellis[t - 1], 1) + transition_params
  106. trellis[t] = score[t] + np.max(v, 0)
  107. backpointers[t] = np.argmax(v, 0)
  108. viterbi = [np.argmax(trellis[-1])]
  109. for bp in reversed(backpointers[1:]):
  110. viterbi.append(bp[viterbi[-1]])
  111. viterbi.reverse()
  112. viterbi_score = np.max(trellis[-1])
  113. return viterbi, viterbi_score
  114. def limitRun(sess,list_output,feed_dict,MAX_BATCH=1024):
  115. len_sample = 0
  116. if len(feed_dict.keys())>0:
  117. len_sample = len(feed_dict[list(feed_dict.keys())[0]])
  118. if len_sample>MAX_BATCH:
  119. list_result = [[] for _ in range(len(list_output))]
  120. _begin = 0
  121. while(_begin<len_sample):
  122. new_dict = dict()
  123. for _key in feed_dict.keys():
  124. if isinstance(feed_dict[_key],(float,int,np.int32,np.float_,np.float16,np.float32,np.float64)):
  125. new_dict[_key] = feed_dict[_key]
  126. else:
  127. new_dict[_key] = feed_dict[_key][_begin:_begin+MAX_BATCH]
  128. _output = sess.run(list_output,feed_dict=new_dict)
  129. for _index in range(len(list_output)):
  130. list_result[_index].extend(_output[_index])
  131. _begin += MAX_BATCH
  132. else:
  133. list_result = sess.run(list_output,feed_dict=feed_dict)
  134. return list_result
  135. def get_values(response,output_name):
  136. """
  137. Get the value of a specified output tensor
  138. :param output_name: name of the output tensor
  139. :return: the content of the output tensor
  140. """
  141. output = response.outputs[output_name]
  142. if output.dtype == tf_predict_pb2.DT_FLOAT:
  143. _value = output.float_val
  144. elif output.dtype == tf_predict_pb2.DT_INT8 or output.dtype == tf_predict_pb2.DT_INT16 or \
  145. output.dtype == tf_predict_pb2.DT_INT32:
  146. _value = output.int_val
  147. elif output.dtype == tf_predict_pb2.DT_INT64:
  148. _value = output.int64_val
  149. elif output.dtype == tf_predict_pb2.DT_DOUBLE:
  150. _value = output.double_val
  151. elif output.dtype == tf_predict_pb2.DT_STRING:
  152. _value = output.string_val
  153. elif output.dtype == tf_predict_pb2.DT_BOOL:
  154. _value = output.bool_val
  155. return np.array(_value).reshape(response.outputs[output_name].array_shape.dim)
  156. def vpc_requests(url,authorization,request_data,list_outputs):
  157. headers = {"Authorization": authorization}
  158. dict_outputs = dict()
  159. response = tf_predict_pb2.PredictResponse()
  160. resp = requests.post(url, data=request_data, headers=headers)
  161. if resp.status_code != 200:
  162. print(resp.status_code,resp.content)
  163. log("调用pai-eas接口出错,authorization:"+str(authorization))
  164. return None
  165. else:
  166. response = tf_predict_pb2.PredictResponse()
  167. response.ParseFromString(resp.content)
  168. for _output in list_outputs:
  169. dict_outputs[_output] = get_values(response, _output)
  170. return dict_outputs
  171. def encodeInput(data,word_len,word_flag=True,userFool=False):
  172. result = []
  173. out_index = 0
  174. for item in data:
  175. if out_index in [0]:
  176. list_word = item[-word_len:]
  177. else:
  178. list_word = item[:word_len]
  179. temp = []
  180. if word_flag:
  181. for word in list_word:
  182. if userFool:
  183. temp.append(getIndexOfWord_fool(word))
  184. else:
  185. temp.append(getIndexOfWord(word))
  186. list_append = []
  187. temp_len = len(temp)
  188. while(temp_len<word_len):
  189. if userFool:
  190. list_append.append(0)
  191. else:
  192. list_append.append(getIndexOfWord("<pad>"))
  193. temp_len += 1
  194. if out_index in [0]:
  195. temp = list_append+temp
  196. else:
  197. temp = temp+list_append
  198. else:
  199. for words in list_word:
  200. temp.append(getIndexOfWords(words))
  201. list_append = []
  202. temp_len = len(temp)
  203. while(temp_len<word_len):
  204. list_append.append(getIndexOfWords("<pad>"))
  205. temp_len += 1
  206. if out_index in [0,1]:
  207. temp = list_append+temp
  208. else:
  209. temp = temp+list_append
  210. result.append(temp)
  211. out_index += 1
  212. return result
  213. def encodeInput_form(input,MAX_LEN=30):
  214. x = np.zeros([MAX_LEN])
  215. for i in range(len(input)):
  216. if i>=MAX_LEN:
  217. break
  218. x[i] = getIndexOfWord(input[i])
  219. return x
  220. def getVocabAndMatrix(model,Embedding_size = 60):
  221. '''
  222. @summary:获取子向量的词典和子向量矩阵
  223. '''
  224. vocab = ["<pad>"]+model.index2word
  225. embedding_matrix = np.zeros((len(vocab),Embedding_size))
  226. for i in range(1,len(vocab)):
  227. embedding_matrix[i] = model[vocab[i]]
  228. return vocab,embedding_matrix
  229. def getIndexOfWord(word):
  230. global vocab_word,file_vocab_word
  231. if vocab_word is None:
  232. if os.path.exists(file_vocab_word):
  233. vocab = load(file_vocab_word)
  234. vocab_word = dict((w, i) for i, w in enumerate(np.array(vocab)))
  235. else:
  236. model = getModel_word()
  237. vocab,_ = getVocabAndMatrix(model, Embedding_size=60)
  238. vocab_word = dict((w, i) for i, w in enumerate(np.array(vocab)))
  239. save(vocab,file_vocab_word)
  240. if word in vocab_word.keys():
  241. return vocab_word[word]
  242. else:
  243. return vocab_word['<pad>']
  244. def changeIndexFromWordToWords(tokens,word_index):
  245. '''
  246. @summary:转换某个字的字偏移为词偏移
  247. '''
  248. before_index = 0
  249. after_index = 0
  250. for i in range(len(tokens)):
  251. after_index = after_index+len(tokens[i])
  252. if before_index<=word_index and after_index>word_index:
  253. return i
  254. before_index = after_index
  255. def getIndexOfWords(words):
  256. global vocab_words,file_vocab_words
  257. if vocab_words is None:
  258. if os.path.exists(file_vocab_words):
  259. vocab = load(file_vocab_words)
  260. vocab_words = dict((w, i) for i, w in enumerate(np.array(vocab)))
  261. else:
  262. model = getModel_w2v()
  263. vocab,_ = getVocabAndMatrix(model, Embedding_size=128)
  264. vocab_words = dict((w, i) for i, w in enumerate(np.array(vocab)))
  265. save(vocab,file_vocab_words)
  266. if words in vocab_words.keys():
  267. return vocab_words[words]
  268. else:
  269. return vocab_words["<pad>"]
  270. def log(msg):
  271. '''
  272. @summary:打印信息
  273. '''
  274. logger.info(msg)
  275. def debug(msg):
  276. '''
  277. @summary:打印信息
  278. '''
  279. logger.debug(msg)
  280. def save(object_to_save, path):
  281. '''
  282. 保存对象
  283. @Arugs:
  284. object_to_save: 需要保存的对象
  285. @Return:
  286. 保存的路径
  287. '''
  288. with open(path, 'wb') as f:
  289. pickle.dump(object_to_save, f)
  290. def load(path):
  291. '''
  292. 读取对象
  293. @Arugs:
  294. path: 读取的路径
  295. @Return:
  296. 读取的对象
  297. '''
  298. with open(path, 'rb') as f:
  299. object1 = pickle.load(f)
  300. return object1
  301. fool_char_to_id = load(os.path.dirname(__file__)+"/fool_char_to_id.pk")
  302. def getIndexOfWord_fool(word):
  303. if word in fool_char_to_id.keys():
  304. return fool_char_to_id[word]
  305. else:
  306. return fool_char_to_id["[UNK]"]
  307. def find_index(list_tofind,text):
  308. '''
  309. @summary: 查找所有词汇在字符串中第一次出现的位置
  310. @param:
  311. list_tofind:待查找词汇
  312. text:字符串
  313. @return: list,每个词汇第一次出现的位置
  314. '''
  315. result = []
  316. for item in list_tofind:
  317. index = text.find(item)
  318. if index>=0:
  319. result.append(index)
  320. else:
  321. result.append(-1)
  322. return result
  323. def combine(list1,list2):
  324. '''
  325. @summary:将两个list中的字符串两两拼接
  326. @param:
  327. list1:字符串list
  328. list2:字符串list
  329. @return:拼接结果list
  330. '''
  331. result = []
  332. for item1 in list1:
  333. for item2 in list2:
  334. result.append(str(item1)+str(item2))
  335. return result
  336. def getDigitsDic(unit):
  337. '''
  338. @summary:拿到中文对应的数字
  339. '''
  340. DigitsDic = {"零":0, "壹":1, "贰":2, "叁":3, "肆":4, "伍":5, "陆":6, "柒":7, "捌":8, "玖":9,
  341. "〇":0, "一":1, "二":2, "三":3, "四":4, "五":5, "六":6, "七":7, "八":8, "九":9}
  342. return DigitsDic.get(unit)
  343. def getMultipleFactor(unit):
  344. '''
  345. @summary:拿到单位对应的值
  346. '''
  347. MultipleFactor = {"兆":Decimal(1000000000000),"亿":Decimal(100000000),"万":Decimal(10000),"仟":Decimal(1000),"千":Decimal(1000),"佰":Decimal(100),"百":Decimal(100),"拾":Decimal(10),"十":Decimal(10),"元":Decimal(1),"圆":Decimal(1),"角":round(Decimal(0.1),1),"分":round(Decimal(0.01),2)}
  348. return MultipleFactor.get(unit)
  349. def getUnifyMoney(money):
  350. '''
  351. @summary:将中文金额字符串转换为数字金额
  352. @param:
  353. money:中文金额字符串
  354. @return: decimal,数据金额
  355. '''
  356. MAX_MONEY = 1000000000000
  357. MAX_NUM = 12
  358. #去掉逗号
  359. money = re.sub("[,,]","",money)
  360. money = re.sub("[^0-9.零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]","",money)
  361. result = Decimal(0)
  362. chnDigits = ["零", "壹", "贰", "叁", "肆", "伍", "陆", "柒", "捌", "玖"]
  363. # chnFactorUnits = ["兆", "亿", "万", "仟", "佰", "拾","圆","元","角","分"]
  364. chnFactorUnits = ["圆", "元","兆", "亿", "万", "仟", "佰", "拾", "角", "分", '十', '百', '千']
  365. LowMoneypattern = re.compile("^[\d,]+(\.\d+)?$")
  366. BigMoneypattern = re.compile("^零?(?P<BigMoney>[%s])$"%("".join(chnDigits)))
  367. try:
  368. if re.search(LowMoneypattern,money) is not None:
  369. return Decimal(money)
  370. elif re.search(BigMoneypattern,money) is not None:
  371. return getDigitsDic(re.search(BigMoneypattern,money).group("BigMoney"))
  372. for factorUnit in chnFactorUnits:
  373. if re.search(re.compile(".*%s.*"%(factorUnit)),money) is not None:
  374. subMoneys = re.split(re.compile("%s(?!.*%s.*)"%(factorUnit,factorUnit)),money)
  375. if re.search(re.compile("^(\d+)(\.\d+)?$"),subMoneys[0]) is not None:
  376. if MAX_MONEY/getMultipleFactor(factorUnit)<Decimal(subMoneys[0]):
  377. return Decimal(0)
  378. result += Decimal(subMoneys[0])*(getMultipleFactor(factorUnit))
  379. elif len(subMoneys[0])==1:
  380. if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[0]) is not None:
  381. result += Decimal(getDigitsDic(subMoneys[0]))*(getMultipleFactor(factorUnit))
  382. # subMoneys[0]中无金额单位,不可再拆分
  383. elif subMoneys[0]=="":
  384. result += 0
  385. elif re.search(re.compile("[%s]"%("".join(chnFactorUnits))),subMoneys[0]) is None:
  386. # print(subMoneys)
  387. # subMoneys[0] = subMoneys[0][0]
  388. result += Decimal(getUnifyMoney(subMoneys[0])) * (getMultipleFactor(factorUnit))
  389. else:
  390. result += Decimal(getUnifyMoney(subMoneys[0]))*(getMultipleFactor(factorUnit))
  391. if len(subMoneys)>1:
  392. if re.search(re.compile("^(\d+(,)?)+(\.\d+)?[百千万亿]?\s?(元)?$"),subMoneys[1]) is not None:
  393. result += Decimal(subMoneys[1])
  394. elif len(subMoneys[1])==1:
  395. if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[1]) is not None:
  396. result += Decimal(getDigitsDic(subMoneys[1]))
  397. else:
  398. result += Decimal(getUnifyMoney(subMoneys[1]))
  399. break
  400. except Exception as e:
  401. # traceback.print_exc()
  402. return Decimal(0)
  403. return result
  404. def getModel_w2v():
  405. '''
  406. @summary:加载词向量
  407. '''
  408. global model_w2v,lock_model_w2v
  409. with lock_model_w2v:
  410. if model_w2v is None:
  411. model_w2v = gensim.models.KeyedVectors.load_word2vec_format(getw2vfilepath(),binary=True)
  412. return model_w2v
  413. def getModel_word():
  414. '''
  415. @summary:加载字向量
  416. '''
  417. global model_word,lock_model_w2v
  418. with lock_model_word:
  419. if model_word is None:
  420. model_word = gensim.models.KeyedVectors.load_word2vec_format(model_word_file,binary=True)
  421. return model_word
  422. # getModel_w2v()
  423. # getModel_word()
  424. def findAllIndex(substr,wholestr):
  425. '''
  426. @summary: 找到字符串的子串的所有begin_index
  427. @param:
  428. substr:子字符串
  429. wholestr:子串所在完整字符串
  430. @return: list,字符串的子串的所有begin_index
  431. '''
  432. copystr = wholestr
  433. result = []
  434. indexappend = 0
  435. while(True):
  436. index = copystr.find(substr)
  437. if index<0:
  438. break
  439. else:
  440. result.append(indexappend+index)
  441. indexappend += index+len(substr)
  442. copystr = copystr[index+len(substr):]
  443. return result
  444. def spanWindow(tokens,begin_index,end_index,size,center_include=False,word_flag = False,use_text = False,text = None):
  445. '''
  446. @summary:取得某个实体的上下文词汇
  447. @param:
  448. tokens:句子分词list
  449. begin_index:实体的开始index
  450. end_index:实体的结束index
  451. size:左右两边各取多少个词
  452. center_include:是否包含实体
  453. word_flag:词/字,默认是词
  454. @return: list,实体的上下文词汇
  455. '''
  456. if use_text:
  457. assert text is not None
  458. length_tokens = len(tokens)
  459. if begin_index>size:
  460. begin = begin_index-size
  461. else:
  462. begin = 0
  463. if end_index+size<length_tokens:
  464. end = end_index+size+1
  465. else:
  466. end = length_tokens
  467. result = []
  468. if not word_flag:
  469. result.append(tokens[begin:begin_index])
  470. if center_include:
  471. if use_text:
  472. result.append(text)
  473. else:
  474. result.append(tokens[begin_index:end_index+1])
  475. result.append(tokens[end_index+1:end])
  476. else:
  477. result.append("".join(tokens[begin:begin_index]))
  478. if center_include:
  479. if use_text:
  480. result.append(text)
  481. else:
  482. result.append("".join(tokens[begin_index:end_index+1]))
  483. result.append("".join(tokens[end_index+1:end]))
  484. #print(result)
  485. return result
  486. #根据规则补全编号或名称两边的符号
  487. def fitDataByRule(data):
  488. symbol_dict = {"(":")",
  489. "(":")",
  490. "[":"]",
  491. "【":"】",
  492. ")":"(",
  493. ")":"(",
  494. "]":"[",
  495. "】":"【"}
  496. leftSymbol_pattern = re.compile("[\((\[【]")
  497. rightSymbol_pattern = re.compile("[\))\]】]")
  498. leftfinds = re.findall(leftSymbol_pattern,data)
  499. rightfinds = re.findall(rightSymbol_pattern,data)
  500. result = data
  501. if len(leftfinds)+len(rightfinds)==0:
  502. return data
  503. elif len(leftfinds)==len(rightfinds):
  504. return data
  505. elif abs(len(leftfinds)-len(rightfinds))==1:
  506. if len(leftfinds)>len(rightfinds):
  507. if symbol_dict.get(data[0]) is not None:
  508. result = data[1:]
  509. else:
  510. #print(symbol_dict.get(leftfinds[0]))
  511. result = data+symbol_dict.get(leftfinds[0])
  512. else:
  513. if symbol_dict.get(data[-1]) is not None:
  514. result = data[:-1]
  515. else:
  516. result = symbol_dict.get(rightfinds[0])+data
  517. result = re.sub("[。]","",result)
  518. return result
  519. time_format_pattern = re.compile("((?P<year>\d{4}|\d{2})\s*[-\/年\.]\s*(?P<month>\d{1,2})\s*[-\/月\.]\s*(?P<day>\d{1,2}))")
  520. def timeFormat(_time):
  521. current_year = time.strftime("%Y",time.localtime())
  522. all_match = re.finditer(time_format_pattern,_time)
  523. for _match in all_match:
  524. if len(_match.group())>0:
  525. legal = True
  526. year = ""
  527. month = ""
  528. day = ""
  529. for k,v in _match.groupdict().items():
  530. if k=="year":
  531. year = v
  532. if k=="month":
  533. month = v
  534. if k=="day":
  535. day = v
  536. if year!="":
  537. if len(year)==2:
  538. year = "20"+year
  539. if int(year)>int(current_year):
  540. legal = False
  541. else:
  542. legal = False
  543. if month!="":
  544. if int(month)>12:
  545. legal = False
  546. else:
  547. legal = False
  548. if day!="":
  549. if int(day)>31:
  550. legal = False
  551. else:
  552. legal = False
  553. if legal:
  554. return "%s-%s-%s"%(year,month.rjust(2,"0"),day.rjust(2,"0"))
  555. return ""
  556. def embedding(datas,shape):
  557. '''
  558. @summary:查找词汇对应的词向量
  559. @param:
  560. datas:词汇的list
  561. shape:结果的shape
  562. @return: array,返回对应shape的词嵌入
  563. '''
  564. model_w2v = getModel_w2v()
  565. embed = np.zeros(shape)
  566. length = shape[1]
  567. out_index = 0
  568. #print(datas)
  569. for data in datas:
  570. index = 0
  571. for item in data:
  572. item_not_space = re.sub("\s*","",item)
  573. if index>=length:
  574. break
  575. if item_not_space in model_w2v.vocab:
  576. embed[out_index][index] = model_w2v[item_not_space]
  577. index += 1
  578. else:
  579. #embed[out_index][index] = model_w2v['unk']
  580. index += 1
  581. out_index += 1
  582. return embed
  583. def embedding_word(datas,shape):
  584. '''
  585. @summary:查找词汇对应的词向量
  586. @param:
  587. datas:词汇的list
  588. shape:结果的shape
  589. @return: array,返回对应shape的词嵌入
  590. '''
  591. model_w2v = getModel_word()
  592. embed = np.zeros(shape)
  593. length = shape[1]
  594. out_index = 0
  595. #print(datas)
  596. for data in datas:
  597. index = 0
  598. for item in str(data)[-shape[1]:]:
  599. if index>=length:
  600. break
  601. if item in model_w2v.vocab:
  602. embed[out_index][index] = model_w2v[item]
  603. index += 1
  604. else:
  605. # embed[out_index][index] = model_w2v['unk']
  606. index += 1
  607. out_index += 1
  608. return embed
  609. def embedding_word_forward(datas,shape):
  610. '''
  611. @summary:查找词汇对应的词向量
  612. @param:
  613. datas:词汇的list
  614. shape:结果的shape
  615. @return: array,返回对应shape的词嵌入
  616. '''
  617. model_w2v = getModel_word()
  618. embed = np.zeros(shape)
  619. length = shape[1]
  620. out_index = 0
  621. #print(datas)
  622. for data in datas:
  623. index = 0
  624. for item in str(data)[:shape[1]]:
  625. if index>=length:
  626. break
  627. if item in model_w2v.vocab:
  628. embed[out_index][index] = model_w2v[item]
  629. index += 1
  630. else:
  631. # embed[out_index][index] = model_w2v['unk']
  632. index += 1
  633. out_index += 1
  634. return embed
  635. def formEncoding(text,shape=(100,60),expand=False):
  636. embedding = np.zeros(shape)
  637. word_model = getModel_word()
  638. for i in range(len(text)):
  639. if i>=shape[0]:
  640. break
  641. if text[i] in word_model.vocab:
  642. embedding[i] = word_model[text[i]]
  643. if expand:
  644. embedding = np.expand_dims(embedding,0)
  645. return embedding
  646. def partMoney(entity_text,input2_shape = [7]):
  647. '''
  648. @summary:对金额分段
  649. @param:
  650. entity_text:数值金额
  651. input2_shape:分类数
  652. @return: array,分段之后的独热编码
  653. '''
  654. money = float(entity_text)
  655. parts = np.zeros(input2_shape)
  656. if money<100:
  657. parts[0] = 1
  658. elif money<1000:
  659. parts[1] = 1
  660. elif money<10000:
  661. parts[2] = 1
  662. elif money<100000:
  663. parts[3] = 1
  664. elif money<1000000:
  665. parts[4] = 1
  666. elif money<10000000:
  667. parts[5] = 1
  668. else:
  669. parts[6] = 1
  670. return parts
  671. def uniform_num(num):
  672. d1 = {'一': '1', '二': '2', '三': '3', '四': '4', '五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '十': '10'}
  673. # d2 = {'A': '1', 'B': '2', 'C': '3', 'D': '4', 'E': '5', 'F': '6', 'G': '7', 'H': '8', 'I': '9', 'J': '10'}
  674. d3 = {'Ⅰ': '1', 'Ⅱ': '2', 'Ⅲ': '3', 'Ⅳ': '4', 'Ⅴ': '5', 'Ⅵ': '6', 'Ⅶ': '7'}
  675. if num.isdigit():
  676. if re.search('^0[\d]$', num):
  677. num = num[1:]
  678. return num
  679. elif re.search('^[一二三四五六七八九十]+$', num):
  680. _digit = re.search('^[一二三四五六七八九十]+$', num).group(0)
  681. if len(_digit) == 1:
  682. num = d1[_digit]
  683. elif len(_digit) == 2 and _digit[0] == '十':
  684. num = '1'+ d1[_digit[1]]
  685. elif len(_digit) == 2 and _digit[1] == '十':
  686. num = d1[_digit[0]] + '0'
  687. elif len(_digit) == 3 and _digit[1] == '十':
  688. num = d1[_digit[0]] + d1[_digit[2]]
  689. elif re.search('[ⅠⅡⅢⅣⅤⅥⅦ]', num):
  690. num = re.search('[ⅠⅡⅢⅣⅤⅥⅦ]', num).group(0)
  691. num = d3[num]
  692. return num
  693. def uniform_package_name(package_name):
  694. '''
  695. 统一规范化包号。数值类型统一为阿拉伯数字,字母统一为大写,包含施工监理等抽到前面, 例 A包监理一标段 统一为 监理A1 ; 包Ⅱ 统一为 2
  696. :param package_name: 字符串类型 包号
  697. :return:
  698. '''
  699. package_name_raw = package_name
  700. package_name = re.sub('pdf|doc|docs|xlsx|rar|\d{4}年', ' ', package_name)
  701. kw = re.search('(施工|监理|监测|勘察|设计|劳务)', package_name)
  702. name = ""
  703. if kw:
  704. name += kw.group(0)
  705. if re.search('^[a-zA-Z0-9-]{5,}$', package_name): # 五个字符以上编号
  706. _digit = re.search('^[a-zA-Z0-9-]{5,}$', package_name).group(0).upper()
  707. name += _digit
  708. elif re.search('(?P<eng>[a-zA-Z])包[:)]?第?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))标段?', package_name): # 处理类似 A包2标段
  709. ser = re.search('(?P<eng>[a-zA-Z])包[:)]?第?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))标段?', package_name)
  710. _char = ser.groupdict().get('eng')
  711. if _char:
  712. _char = _char.upper()
  713. _digit = ser.groupdict().get('num')
  714. _digit = uniform_num(_digit)
  715. name += _char.upper() + _digit
  716. elif re.search('第?(?P<eng>[a-zA-Z]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))(标[段号的包项]?|([分子]?[包标]))', package_name): # 处理类似 A包2标段
  717. ser = re.search('第?(?P<eng>[a-zA-Z]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))(标[段号的包项]?|([分子]?[包标]))', package_name)
  718. _char = ser.groupdict().get('eng')
  719. if _char:
  720. _char = _char.upper()
  721. _digit = ser.groupdict().get('num')
  722. _digit = uniform_num(_digit)
  723. if _char:
  724. name += _char.upper()
  725. name += _digit
  726. elif re.search('(标[段号的包项]|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[a-zA-Z]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))', package_name): # 数字的统一的阿拉伯数字
  727. ser = re.search('(标[段号的包项]|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[a-zA-Z]{1,4})?(?P<num>([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4}))',package_name)
  728. _char = ser.groupdict().get('eng')
  729. if _char:
  730. _char = _char.upper()
  731. _digit = ser.groupdict().get('num')
  732. _digit = uniform_num(_digit)
  733. if _char:
  734. name += _char.upper()
  735. name += _digit
  736. elif re.search('(标[段号的包项]|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[a-zA-Z]{1,4})', package_name): # 数字的统一的阿拉伯数字
  737. _digit = re.search('(标[段号的包项]|([分子]?包|包[组件号]))编?号?[::]?(?P<eng>[a-zA-Z]{1,4})', package_name).group('eng').upper()
  738. name += _digit
  739. elif re.search('(?P<eng>[a-zA-Z]{1,4})(标[段号的包项]|([分子]?[包标]|包[组件号]))', package_name): # 数字的统一的阿拉伯数字
  740. _digit = re.search('(?P<eng>[a-zA-Z]{1,4})(标[段号的包项]|([分子]?[包标]|包[组件号]))', package_name).group('eng').upper()
  741. name += _digit
  742. elif re.search('^([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4})$', package_name): # 数字的统一的阿拉伯数字
  743. _digit = re.search('^([0-9]{1,4}|[一二三四五六七八九十]{1,4}|[ⅠⅡⅢⅣⅤⅥⅦ]{1,4})$', package_name).group(0)
  744. _digit = uniform_num(_digit)
  745. name += _digit
  746. elif re.search('^[a-zA-Z0-9-]+$', package_name):
  747. _char = re.search('^[a-zA-Z0-9-]+$', package_name).group(0)
  748. name += _char.upper()
  749. if name == "":
  750. return package_name_raw
  751. else:
  752. # print('原始包号:%s, 处理后:%s'%(package_name, name))
  753. return name
  754. def money_process(money_text, header):
  755. '''
  756. 输入金额文本及金额列表头,返回统一数字化金额及金额单位
  757. :param money_text:金额字符串
  758. :param header:金额列表头,用于提取单位
  759. :return:
  760. '''
  761. money = 0
  762. money_unit = ""
  763. re_price = re.search("[零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]{3,}|\d[\d,]*(?:\.\d+)?万?", money_text)
  764. if re_price:
  765. money_text = re_price.group(0)
  766. if '万元' in header and '万' not in money_text:
  767. money_text += '万元'
  768. money = float(getUnifyMoney(money_text))
  769. if money > 10000000000000: # 大于万亿的去除
  770. money = 0
  771. money_unit = '万元' if '万' in money_text else '元'
  772. return (money, money_unit)
  773. def recall(y_true, y_pred):
  774. '''
  775. 计算召回率
  776. @Argus:
  777. y_true: 正确的标签
  778. y_pred: 模型预测的标签
  779. @Return
  780. 召回率
  781. '''
  782. c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  783. c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
  784. if c3 == 0:
  785. return 0
  786. recall = c1 / c3
  787. return recall
  788. def f1_score(y_true, y_pred):
  789. '''
  790. 计算F1
  791. @Argus:
  792. y_true: 正确的标签
  793. y_pred: 模型预测的标签
  794. @Return
  795. F1值
  796. '''
  797. c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  798. c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
  799. c3 = K.sum(K.round(K.clip(y_true, 0, 1)))
  800. precision = c1 / c2
  801. if c3 == 0:
  802. recall = 0
  803. else:
  804. recall = c1 / c3
  805. f1_score = 2 * (precision * recall) / (precision + recall)
  806. return f1_score
  807. def precision(y_true, y_pred):
  808. '''
  809. 计算精确率
  810. @Argus:
  811. y_true: 正确的标签
  812. y_pred: 模型预测的标签
  813. @Return
  814. 精确率
  815. '''
  816. c1 = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  817. c2 = K.sum(K.round(K.clip(y_pred, 0, 1)))
  818. precision = c1 / c2
  819. return precision
  820. # def print_metrics(history):
  821. # '''
  822. # 制作每次迭代的各metrics变化图片
  823. #
  824. # @Arugs:
  825. # history: 模型训练迭代的历史记录
  826. # '''
  827. # import matplotlib.pyplot as plt
  828. #
  829. # # loss图
  830. # loss = history.history['loss']
  831. # val_loss = history.history['val_loss']
  832. # epochs = range(1, len(loss) + 1)
  833. # plt.subplot(2, 2, 1)
  834. # plt.plot(epochs, loss, 'bo', label='Training loss')
  835. # plt.plot(epochs, val_loss, 'b', label='Validation loss')
  836. # plt.title('Training and validation loss')
  837. # plt.xlabel('Epochs')
  838. # plt.ylabel('Loss')
  839. # plt.legend()
  840. #
  841. # # f1图
  842. # f1 = history.history['f1_score']
  843. # val_f1 = history.history['val_f1_score']
  844. # plt.subplot(2, 2, 2)
  845. # plt.plot(epochs, f1, 'bo', label='Training f1')
  846. # plt.plot(epochs, val_f1, 'b', label='Validation f1')
  847. # plt.title('Training and validation f1')
  848. # plt.xlabel('Epochs')
  849. # plt.ylabel('F1')
  850. # plt.legend()
  851. #
  852. # # precision图
  853. # prec = history.history['precision']
  854. # val_prec = history.history['val_precision']
  855. # plt.subplot(2, 2, 3)
  856. # plt.plot(epochs, prec, 'bo', label='Training precision')
  857. # plt.plot(epochs, val_prec, 'b', label='Validation pecision')
  858. # plt.title('Training and validation precision')
  859. # plt.xlabel('Epochs')
  860. # plt.ylabel('Precision')
  861. # plt.legend()
  862. #
  863. # # recall图
  864. # recall = history.history['recall']
  865. # val_recall = history.history['val_recall']
  866. # plt.subplot(2, 2, 4)
  867. # plt.plot(epochs, recall, 'bo', label='Training recall')
  868. # plt.plot(epochs, val_recall, 'b', label='Validation recall')
  869. # plt.title('Training and validation recall')
  870. # plt.xlabel('Epochs')
  871. # plt.ylabel('Recall')
  872. # plt.legend()
  873. #
  874. # plt.show()
  875. if __name__=="__main__":
  876. # print(fool_char_to_id[">"])
  877. print(getUnifyMoney('伍仟贰佰零壹拾伍万零捌佰壹拾元陆角伍分'))
  878. # model = getModel_w2v()
  879. # vocab,matrix = getVocabAndMatrix(model, Embedding_size=128)
  880. # save([vocab,matrix],"vocabMatrix_words.pk")