extract.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. '''
  2. Created on 2019年1月4日
  3. @author: User
  4. '''
  5. import os
  6. from bs4 import BeautifulSoup, Comment
  7. import copy
  8. import re
  9. import sys
  10. import os
  11. import codecs
  12. import requests
  13. import time
  14. _time1 = time.time()
  15. sys.path.append(os.path.abspath("../.."))
  16. from BiddingKG.dl.common.Utils import *
  17. import BiddingKG.dl.entityLink.entityLink as entityLink
  18. import BiddingKG.dl.interface.predictor as predictor
  19. import BiddingKG.dl.interface.Preprocessing as Preprocessing
  20. import BiddingKG.dl.interface.getAttributes as getAttributes
  21. import BiddingKG.dl.complaint.punish_predictor as punish_rule
  22. import json
  23. from BiddingKG.dl.money.re_money_total_unit import extract_total_money, extract_unit_money
  24. from BiddingKG.dl.ratio.re_ratio import extract_ratio
  25. # 自定义jsonEncoder
  26. class MyEncoder(json.JSONEncoder):
  27. def default(self, obj):
  28. if isinstance(obj, np.ndarray):
  29. return obj.tolist()
  30. elif isinstance(obj, bytes):
  31. return str(obj, encoding='utf-8')
  32. elif isinstance(obj, (np.float_, np.float16, np.float32,
  33. np.float64)):
  34. return float(obj)
  35. elif isinstance(obj,str):
  36. return obj
  37. return json.JSONEncoder.default(self, obj)
  38. def predict(doc_id,text,title="",page_time="",**kwargs):
  39. cost_time = dict()
  40. start_time = time.time()
  41. # log("start process doc %s"%(str(doc_id)))
  42. list_articles,list_sentences,list_entitys,_cost_time = Preprocessing.get_preprocessed([[doc_id,text,"","",title,page_time]],useselffool=True)
  43. # log("get preprocessed done of doc_id%s"%(doc_id))
  44. cost_time["preprocess"] = round(time.time()-start_time,2)
  45. cost_time.update(_cost_time)
  46. # 依赖句子顺序
  47. start_time = time.time()
  48. list_channel_dic = predictor.getPredictor("channel").predict(title=title, content=list_sentences[0])
  49. cost_time["channel"] = round(time.time()-start_time,2)
  50. start_time = time.time()
  51. codeName = predictor.getPredictor("codeName").predict(list_sentences,MAX_AREA=5000,list_entitys=list_entitys)
  52. # log("get codename done of doc_id%s"%(doc_id))
  53. cost_time["codename"] = round(time.time()-start_time,2)
  54. start_time = time.time()
  55. predictor.getPredictor("prem").predict(list_sentences,list_entitys)
  56. # log("get prem done of doc_id%s"%(doc_id))
  57. cost_time["prem"] = round(time.time()-start_time,2)
  58. start_time = time.time()
  59. predictor.getPredictor("product").predict(list_sentences,list_entitys)
  60. # log("get product done of doc_id%s"%(doc_id))
  61. cost_time["product"] = round(time.time()-start_time,2)
  62. start_time = time.time()
  63. product_attrs = predictor.getPredictor("product_attrs").predict(doc_id, text, page_time)
  64. # log("get product attributes done of doc_id%s"%(doc_id))
  65. cost_time["product_attrs"] = round(time.time()-start_time,2)
  66. start_time = time.time()
  67. predictor.getPredictor("roleRule").predict(list_articles,list_sentences, list_entitys,codeName)
  68. cost_time["rule"] = round(time.time()-start_time,2)
  69. start_time = time.time()
  70. predictor.getPredictor("epc").predict(list_sentences,list_entitys)
  71. # log("get epc done of doc_id%s"%(doc_id))
  72. cost_time["person"] = round(time.time()-start_time,2)
  73. start_time = time.time()
  74. predictor.getPredictor("time").predict(list_sentences, list_entitys)
  75. # log("get time done of doc_id%s"%(doc_id))
  76. cost_time["time"] = round(time.time()-start_time,2)
  77. # 需在getPredictor("prem")后 getAttributes.getPREMs 前
  78. if len(re.findall('监理|施工|设计|勘察', title))==1 and re.search('施工|总承包|epc|EPC',title)==None:
  79. keyword = re.search('监理|设计|勘察', title).group(0)
  80. for list_entity in list_entitys:
  81. for _entity in list_entity:
  82. # print('keyword:',keyword, '_entity.notes :',_entity.notes)
  83. if _entity.entity_type == "money" and _entity.notes == keyword and _entity.label==2:
  84. if list_channel_dic[0]['docchannel'] == "招标公告":
  85. _entity.values[0] = 0.51
  86. _entity.set_Money(0, _entity.values) #2021/11/18 根据公告类别把费用改为招标或中投标金额
  87. else:
  88. _entity.values[1] = 0.51
  89. _entity.set_Money(1, _entity.values)
  90. # 2021-12-08新增:提取:总价,单价,比率
  91. total_money_list = []
  92. unit_money_list = []
  93. ratio_list = []
  94. for i in range(len(list_entitys)):
  95. list_entity = list_entitys[i]
  96. # 总价单价
  97. for _entity in list_entity:
  98. if _entity.entity_type == 'money':
  99. word_of_sentence = list_sentences[i][_entity.sentence_index].sentence_text
  100. # 总价在中投标金额中
  101. if _entity.label == 1:
  102. result = extract_total_money(word_of_sentence,
  103. _entity.entity_text,
  104. [_entity.wordOffset_begin, _entity.wordOffset_end])
  105. if result:
  106. total_money_list.append(result)
  107. # 单价在普通金额中
  108. else:
  109. result = extract_unit_money(word_of_sentence,
  110. _entity.entity_text,
  111. [_entity.wordOffset_begin, _entity.wordOffset_end])
  112. if result:
  113. unit_money_list.append(result)
  114. # 比率
  115. all_sentence = ""
  116. for sentence in list_sentences[i]:
  117. all_sentence += sentence.sentence_text + ","
  118. result = extract_ratio(all_sentence)
  119. if result:
  120. ratio_list.append(result)
  121. # 依赖句子顺序
  122. start_time = time.time()
  123. entityLink.link_entitys(list_entitys)
  124. prem = getAttributes.getPREMs(list_sentences,list_entitys,list_articles)
  125. # log("get attributes done of doc_id%s"%(doc_id))
  126. cost_time["attrs"] = round(time.time()-start_time,2)
  127. start_time = time.time()
  128. list_punish_dic = predictor.getPredictor("punish").get_punish_extracts(list_articles,list_sentences, list_entitys)
  129. cost_time["punish"] = round(time.time()-start_time,2)
  130. if len(product_attrs[1]['demand_info']['data'])>0:
  131. for d in product_attrs[1]['demand_info']['data']:
  132. for product in set(prem[0]['product']):
  133. if product in d['project_name']:
  134. d['product'].append(product) #把产品在项目名称中的添加进需求要素中
  135. # print(prem)
  136. # data_res = Preprocessing.union_result(Preprocessing.union_result(codeName, prem),list_punish_dic)[0]
  137. # data_res = Preprocessing.union_result(Preprocessing.union_result(Preprocessing.union_result(codeName, prem),list_punish_dic), list_channel_dic)[0]
  138. data_res = dict(codeName[0], **prem[0], **list_channel_dic[0], **product_attrs[0], **product_attrs[1])
  139. data_res["cost_time"] = cost_time
  140. data_res["success"] = True
  141. data_res["total_money"] = total_money_list
  142. data_res["unit_money"] = unit_money_list
  143. data_res["ratio"] = ratio_list
  144. # for _article in list_articles:
  145. # log(_article.content)
  146. #
  147. # for list_entity in list_entitys:
  148. # for _entity in list_entity:
  149. # log("type:%s,text:%s,label:%s,values:%s,sentence:%s,begin_index:%s,end_index:%s"%
  150. # (str(_entity.entity_type),str(_entity.entity_text),str(_entity.label),str(_entity.values),str(_entity.sentence_index),
  151. # str(_entity.begin_index),str(_entity.end_index)))
  152. return json.dumps(data_res,cls=MyEncoder,sort_keys=True,indent=4,ensure_ascii=False)
  153. def test(name,content):
  154. user = {
  155. "content": content,
  156. "id":name
  157. }
  158. myheaders = {'Content-Type': 'application/json'}
  159. _resp = requests.post("http://192.168.2.102:15030" + '/article_extract', json=user, headers=myheaders, verify=True)
  160. resp_json = _resp.content.decode("utf-8")
  161. # print(resp_json)
  162. return resp_json
  163. if __name__=="__main__":
  164. import pandas as pd
  165. t1 = time.time()
  166. # text = '中标人:广州中医药有限公司,招标人:广州市第一人民医院, 代理机构:希达招标代理有限公司。招标金额:100万元, 手续费:100元,总投资:1亿元。中标金额:50000元。合同金额:50000万元。'
  167. title = '打印机'
  168. # df = pd.read_excel('E:/公告金额/产品名称采购需求预算金额采购时间等要素公告.xlsx')
  169. # # df = pd.read_excel('E:/公告金额/产品数量单价.xlsx')
  170. # for i in range(30,50,1):
  171. # text = df.loc[i, 'dochtmlcon']
  172. # rs = json.loads(predict('', text, ''))
  173. # print(rs['demand_info'])
  174. # print(rs['product'])
  175. # print(rs['product_attrs'])
  176. # print(rs)
  177. with open('D:/html/138786703.html', 'r', encoding='utf-8') as f:
  178. text = f.read()
  179. print(predict('', text, title))
  180. # print(predict('',text,title))
  181. # df = pd.read_excel('E:/大网站规则识别/大网站要素提取结果2.xlsx')[:]
  182. # df = pd.read_excel('/data/python/lsm/datas_biddingkg/大网站要素提取结果20211115_2.xlsx')[:]
  183. # new_prem = []
  184. # for i in range(len(df)):
  185. # i = 530
  186. # doc_id = df.loc[i, 'docid']
  187. # text = df.loc[i, 'html']
  188. # # title = df.loc[i, 'doctitle']
  189. # rs = predict(doc_id,text)
  190. # rs = json.loads(rs)
  191. # prem = json.dumps(rs['prem'], ensure_ascii=False)
  192. # # print(rs)
  193. # new_prem.append(prem)
  194. # print(prem)
  195. # break
  196. # df['new_prem'] = pd.Series(new_prem)
  197. # print('耗时:', time.time()-t1)
  198. # # df.to_excel('E:/大网站规则识别/大网站要素提取结果20211115.xlsx')
  199. # df.to_excel('/data/python/lsm/datas_biddingkg/大网站要素提取结果20211115.xlsx')
  200. # # pass