entityLink.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. #coding:UTF8
  2. '''
  3. Created on 2019年5月21日
  4. @author: User
  5. '''
  6. import re
  7. import os
  8. import time
  9. import pandas as pd
  10. _time = time.time()
  11. from BiddingKG.dl.common.Utils import *
  12. from BiddingKG.dl.interface.Entitys import *
  13. import json
  14. def edit_distance(source,target):
  15. dp = [["" for i in range(len(source)+1)] for j in range(len(target)+1)]
  16. for i in range(len(dp)):
  17. for j in range(len(dp[i])):
  18. if i==0:
  19. dp[i][j] = j
  20. elif j==0:
  21. dp[i][j] = i
  22. else:
  23. if source[j-1]==target[i-1]:
  24. cost = 0
  25. else:
  26. cost = 2
  27. dp[i][j] = min([dp[i-1][j]+1,dp[i][j-1]+1,dp[i-1][j-1]+cost])
  28. return dp[-1][-1]
  29. def jaccard_score(source,target):
  30. source_set = set([s for s in source])
  31. target_set = set([s for s in target])
  32. if len(source_set)==0 or len(target_set)==0:
  33. return 0
  34. return max(len(source_set&target_set)/len(source_set),len(source_set&target_set)/len(target_set))
  35. def get_place_list():
  36. path = os.path.dirname(__file__) + '/../place_info.csv'
  37. place_df = pd.read_csv(path)
  38. place_list = []
  39. for index, row in place_df.iterrows():
  40. place_list.append(row[1])
  41. place_list.append('台湾')
  42. place_list.append('澳门')
  43. place_list.append('香港')
  44. # place_list.append('東莞')
  45. # place_list.append('廣州')
  46. # place_list.append('韩国')
  47. # place_list.append('德国')
  48. # place_list.append('英国')
  49. # place_list.append('日本')
  50. # place_list.append('意大利')
  51. # place_list.append('新加坡')
  52. # place_list.append('加拿大')
  53. # place_list.append('西班牙')
  54. # place_list.append('澳大利亚')
  55. # place_list.append('美国')
  56. place_list = list(set(place_list))
  57. return place_list
  58. place_list = get_place_list()
  59. place_pattern = "|".join(place_list)
  60. def link_entitys(list_entitys,on_value=1):#on_value=0.81
  61. for list_entity in list_entitys:
  62. range_entity = []
  63. for _entity in list_entity:
  64. if _entity.entity_type in ["org","company"]:
  65. range_entity.append(_entity)
  66. range_entity = range_entity[:1000]
  67. #替换公司的逻辑有问题,先取消
  68. # for first_i in range(len(range_entity)):
  69. # _entity = range_entity[first_i]
  70. # for second_i in range(first_i+1,len(range_entity)):
  71. # _ent = range_entity[second_i]
  72. # # 2021/5/21 update: 两个实体标签互斥(一个是招标人、一个是代理人)且entity_text不相等时,跳过
  73. # if _entity.entity_text != _ent.entity_text and _entity.label != _ent.label and _entity.label in [0,1] and _ent.label in [0, 1]:
  74. # continue
  75. # _score = jaccard_score(re.sub("%s|%s"%("股份|责任|有限|公司",place_pattern),"",_entity.entity_text), re.sub("%s|%s"%("股份|责任|有限|公司",place_pattern),"",_ent.entity_text))
  76. # if _entity.entity_text!=_ent.entity_text and _score>=on_value:
  77. # _entity.linked_entitys.append(_ent)
  78. # _ent.linked_entitys.append(_entity)
  79. # print("=-===",_entity.entity_text,_ent.entity_text,_score)
  80. #替换公司名称
  81. for _entity in range_entity:
  82. if re.search("公司",_entity.entity_text) is None:
  83. for _ent in _entity.linked_entitys:
  84. if re.search("公司$",_ent.entity_text) is not None:
  85. if len(_ent.entity_text)>len(_entity.entity_text):
  86. _entity.entity_text = _ent.entity_text
  87. # 2021/12/21 替换通过字典识别到的取长度最大的相似实体
  88. for _entity in range_entity:
  89. used_linked_entitys = []
  90. if not _entity.linked_entitys:
  91. continue
  92. _entity.linked_entitys.sort(key=lambda x: len(x.entity_text), reverse=True)
  93. for _ent in _entity.linked_entitys:
  94. if _ent in used_linked_entitys:
  95. break
  96. # print("_entity, _ent", _entity.entity_text, _ent.if_dict_match, _ent.entity_text)
  97. if _ent.if_dict_match == 1:
  98. if len(_ent.entity_text) > len(_entity.entity_text):
  99. # 判断两个公司地区相同
  100. match_list_1, match_list_2 = [], []
  101. for place in place_list:
  102. if place in _entity.entity_text:
  103. match_list_1.append(place)
  104. if place in _ent.entity_text:
  105. match_list_2.append(place)
  106. if str(match_list_1) == str(match_list_2):
  107. # print("字典替换", _entity.entity_text, "->", _ent.entity_text)
  108. _entity.origin_entity_text = _entity.entity_text
  109. _entity.entity_text = _ent.entity_text
  110. used_linked_entitys.append(_ent)
  111. # print(_entity.entity_text, _entity.if_dict_match, _ent.entity_text, _ent.if_dict_match)
  112. # 用于去重的标题
  113. def doctitle_refine(doctitle):
  114. _doctitle_refine = re.sub(r'工程|服务|询价|比价|谈判|竞争性|磋商|结果|中标|招标|采购|的|公示|公开|成交|公告|评标|候选人|'
  115. r'交易|通知|废标|流标|终止|中止|一笔|预告|单一来源|竞价|合同', '', doctitle)
  116. return _doctitle_refine
  117. # 前100个公司实体
  118. def get_nlp_enterprise(list_entity):
  119. nlp_enterprise = []
  120. nlp_enterprise_attachment = []
  121. max_num = 100
  122. list_entity = sorted(list_entity,key=lambda x:(x.sentence_index,x.begin_index))
  123. for entity in list_entity:
  124. if entity.entity_type in ['org','company']:
  125. if not entity.in_attachment:
  126. if entity.entity_text not in nlp_enterprise:
  127. nlp_enterprise.append(entity.entity_text)
  128. else:
  129. if entity.entity_text not in nlp_enterprise_attachment:
  130. nlp_enterprise_attachment.append(entity.entity_text)
  131. return nlp_enterprise[:max_num],nlp_enterprise_attachment[:max_num]
  132. def getEnterprisePath():
  133. filename_huge = "LEGAL_ENTERPRISE_HUGE.txt"
  134. huge_path = getFileFromSysPath(filename_huge)
  135. if huge_path is None:
  136. if os.path.exists(huge_path):
  137. log("enterprise path:%s"%(huge_path))
  138. return huge_path
  139. else:
  140. log("enterprise path:%s"%(huge_path))
  141. return huge_path
  142. filename = "LEGAL_ENTERPRISE.txt"
  143. real_path = getFileFromSysPath(filename)
  144. if real_path is None:
  145. real_path = filename
  146. log("enterprise path:%s"%(real_path))
  147. return real_path
  148. DICT_ENTERPRISE = {}
  149. DICT_ENTERPRISE_DONE = False
  150. def getDict_enterprise():
  151. global DICT_ENTERPRISE,DICT_ENTERPRISE_DONE
  152. real_path = getEnterprisePath()
  153. with open(real_path,"r",encoding="UTF8") as f:
  154. for _e in f:
  155. if not _e:
  156. continue
  157. _e = _e.strip()
  158. if len(_e)>=4:
  159. key_enter = _e[:4]
  160. if key_enter not in DICT_ENTERPRISE:
  161. DICT_ENTERPRISE[key_enter] = set()
  162. DICT_ENTERPRISE[key_enter].add(_e[4:])
  163. log("dict_enterprise takes memory:%dM"%(sys.getsizeof(DICT_ENTERPRISE)/1024/1024))
  164. # for _e in ["河南省柘源","建筑工程有限公司"]:
  165. # if not _e:
  166. # continue
  167. # _e = _e.strip()
  168. # if len(_e)>=4:
  169. # key_enter = _e[:4]
  170. # if key_enter not in DICT_ENTERPRISE:
  171. # DICT_ENTERPRISE[key_enter] = set()
  172. # DICT_ENTERPRISE[key_enter].add(_e[4:])
  173. DICT_ENTERPRISE_DONE = True
  174. return DICT_ENTERPRISE
  175. import threading
  176. import time
  177. load_enterprise_thread = threading.Thread(target=getDict_enterprise)
  178. load_enterprise_thread.start()
  179. MAX_ENTERPRISE_LEN = 30
  180. def match_enterprise_max_first(sentence):
  181. while True:
  182. if not DICT_ENTERPRISE_DONE:
  183. time.sleep(1)
  184. else:
  185. break
  186. list_match = []
  187. begin_index = 0
  188. if len(sentence)>4:
  189. while True:
  190. if begin_index+4<len(sentence):
  191. key_enter = sentence[begin_index:begin_index+4]
  192. if key_enter in DICT_ENTERPRISE:
  193. for _i in range(MAX_ENTERPRISE_LEN-4+1):
  194. enter_name = sentence[begin_index+4:begin_index+MAX_ENTERPRISE_LEN-_i]
  195. if enter_name in DICT_ENTERPRISE[key_enter]:
  196. match_item = {"entity_text":"%s%s"%(key_enter,enter_name),"begin_index":begin_index,"end_index":begin_index+len(key_enter)+len(enter_name)}
  197. list_match.append(match_item)
  198. begin_index += (len(key_enter)+len(enter_name))-1
  199. break
  200. begin_index += 1
  201. else:
  202. break
  203. return list_match
  204. def calibrateEnterprise(list_articles,list_sentences,list_entitys):
  205. for _article,list_sentence,list_entity in zip(list_articles,list_sentences,list_entitys):
  206. list_calibrate = []
  207. match_add = False
  208. match_replace = False
  209. range_entity = []
  210. for p_entity in list_entity:
  211. if p_entity.entity_type in ("org","company","location"):
  212. range_entity.append(p_entity)
  213. if len(range_entity)>1000:
  214. break
  215. for p_sentence in list_sentence:
  216. sentence = p_sentence.sentence_text
  217. sentence_entitys = [(ent.entity_text,ent.wordOffset_begin,ent.wordOffset_end) for ent in list_entity if ent.sentence_index==p_sentence.sentence_index and ent.entity_type in ['org','company']]
  218. list_match = match_enterprise_max_first(sentence)
  219. # print("list_match", list_match)
  220. doc_id = p_sentence.doc_id
  221. sentence_index = p_sentence.sentence_index
  222. tokens = p_sentence.tokens
  223. list_match.sort(key=lambda x:x["begin_index"])
  224. for _match_index in range(len(list_match)):
  225. _match = list_match[_match_index]
  226. find_flag = False
  227. for p_entity in range_entity:
  228. if p_entity.sentence_index!=p_sentence.sentence_index:
  229. continue
  230. if p_entity.entity_type=="location" and p_entity.entity_text==_match["entity_text"]:
  231. find_flag = True
  232. p_entity.entity_type = "company"
  233. p_entity.if_dict_match = 1
  234. if p_entity.entity_type not in ["location","org","company"]:
  235. continue
  236. if _match["entity_text"] == p_entity.entity_text:
  237. p_entity.if_dict_match = 1
  238. #有重叠
  239. #match部分被包含则不处理
  240. if _match["begin_index"]>=p_entity.wordOffset_begin and _match["end_index"]<=p_entity.wordOffset_end:
  241. find_flag = True
  242. #判断是否是多个公司
  243. for _match_j in range(_match_index,len(list_match)):
  244. if not list_match[_match_j]["end_index"]<=p_entity.wordOffset_end:
  245. _match_j -= 1
  246. break
  247. if _match_j>_match_index:
  248. match_replace = True
  249. match_add = True
  250. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  251. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  252. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  253. p_entity.entity_text = _match["entity_text"]
  254. p_entity.wordOffset_begin = _match["begin_index"]
  255. p_entity.wordOffset_end = _match["end_index"]
  256. p_entity.begin_index = begin_index
  257. p_entity.end_index = end_index
  258. # 该公司实体是字典识别的
  259. p_entity.if_dict_match = 1
  260. for _match_h in range(_match_index+1,_match_j+1):
  261. entity_text = list_match[_match_h]["entity_text"]
  262. entity_type = "company"
  263. begin_index = changeIndexFromWordToWords(tokens,list_match[_match_h]["begin_index"])
  264. end_index = changeIndexFromWordToWords(tokens,list_match[_match_h]["end_index"]-1)
  265. entity_id = "%s_%d_%d_%d"%(doc_id,sentence_index,begin_index,end_index)
  266. add_entity = Entity(p_sentence.doc_id,entity_id,entity_text,entity_type,sentence_index,begin_index,end_index,list_match[_match_h]["begin_index"],list_match[_match_h]["end_index"],in_attachment=p_sentence.in_attachment)
  267. add_entity.if_dict_match = 1
  268. list_entity.append(add_entity)
  269. range_entity.append(add_entity)
  270. list_calibrate.append({"type":"add","from":"","to":entity_text})
  271. _match_index = _match_j
  272. break
  273. continue
  274. elif _match["begin_index"]<=p_entity.wordOffset_begin and _match["end_index"]>p_entity.wordOffset_begin:
  275. find_flag = True
  276. if _match["begin_index"]<p_entity.wordOffset_begin and _match["end_index"]<=p_entity.wordOffset_end:
  277. if p_entity.entity_type in ("org","company"):
  278. _diff_text = sentence[p_entity.wordOffset_end:_match["end_index"]]
  279. if re.search("分",_diff_text) is not None:
  280. pass
  281. else:
  282. match_replace = True
  283. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  284. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  285. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  286. p_entity.entity_text = _match["entity_text"]
  287. p_entity.wordOffset_begin = _match["begin_index"]
  288. p_entity.wordOffset_end = _match["end_index"]
  289. p_entity.begin_index = begin_index
  290. p_entity.end_index = end_index
  291. p_entity.if_dict_match = 1
  292. elif _match["end_index"]>=p_entity.wordOffset_end:
  293. # 原entity列表已有实体,则不重复添加
  294. if (_match["entity_text"],_match["begin_index"],_match["end_index"]) not in sentence_entitys:
  295. match_replace = True
  296. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  297. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  298. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  299. p_entity.entity_text = _match["entity_text"]
  300. p_entity.wordOffset_begin = _match["begin_index"]
  301. p_entity.wordOffset_end = _match["end_index"]
  302. p_entity.begin_index = begin_index
  303. p_entity.end_index = end_index
  304. p_entity.entity_type = "company"
  305. p_entity.if_dict_match = 1
  306. elif _match["begin_index"]<p_entity.wordOffset_end and _match["end_index"]>p_entity.wordOffset_end:
  307. find_flag = True
  308. if p_entity.entity_type in ("org","company"):
  309. match_replace = True
  310. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  311. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  312. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  313. p_entity.entity_text = _match["entity_text"]
  314. p_entity.wordOffset_begin = _match["begin_index"]
  315. p_entity.wordOffset_end = _match["end_index"]
  316. p_entity.begin_index = begin_index
  317. p_entity.end_index = end_index
  318. p_entity.if_dict_match = 1
  319. if not find_flag:
  320. match_add = True
  321. entity_text = _match["entity_text"]
  322. entity_type = "company"
  323. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  324. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  325. entity_id = "%s_%d_%d_%d"%(doc_id,sentence_index,begin_index,end_index)
  326. add_entity = Entity(p_sentence.doc_id,entity_id,entity_text,entity_type,sentence_index,begin_index,end_index,_match["begin_index"],_match["end_index"],in_attachment=p_sentence.in_attachment)
  327. list_entity.append(add_entity)
  328. range_entity.append(add_entity)
  329. list_calibrate.append({"type":"add","from":"","to":entity_text})
  330. #去重
  331. set_calibrate = set()
  332. list_match_enterprise = []
  333. for _calibrate in list_calibrate:
  334. _from = _calibrate.get("from","")
  335. _to = _calibrate.get("to","")
  336. _key = _from+_to
  337. if _key not in set_calibrate:
  338. list_match_enterprise.append(_calibrate)
  339. set_calibrate.add(_key)
  340. match_enterprise_type = 0
  341. if match_add:
  342. match_enterprise_type += 1
  343. if match_replace:
  344. match_enterprise_type += 2
  345. _article.match_enterprise = list_match_enterprise
  346. _article.match_enterprise_type = match_enterprise_type
  347. def isLegalEnterprise(name):
  348. is_legal = True
  349. if re.search("^[省市区县]",name) is not None or re.search("^\**.{,3}(分(公司|行|支)|街道|中心|办事处|经营部|委员会|有限公司)$",name) or re.search("标段|标包|名称|联系人|联系方式|中标单位|中标人|测试单位|采购单位|采购人|代理人|代理机构|盖章|(主)",name) is not None:
  350. is_legal = False
  351. return is_legal
  352. def fix_LEGAL_ENTERPRISE():
  353. unlegal_enterprise = []
  354. _path = getEnterprisePath()
  355. _sum = 0
  356. set_enter = set()
  357. paths = [_path]
  358. for _p in paths:
  359. with open(_p,"r",encoding="utf8") as f:
  360. while True:
  361. line = f.readline()
  362. if not line:
  363. break
  364. line = line.strip()
  365. if isLegalEnterprise(line):
  366. set_enter.add(line)
  367. if line=="有限责任公司" or line=='设计研究院' or line=='限责任公司' or (re.search("^.{,4}(分公司|支行|分行)$",line) is not None and re.search("电信|移动|联通|建行|工行|农行|中行|交行",line) is None):
  368. print(line)
  369. if line in set_enter:
  370. set_enter.remove(line)
  371. with open("enter.txt","w",encoding="utf8") as fwrite:
  372. for line in list(set_enter):
  373. fwrite.write(line.replace("(","(").replace(")",")"))
  374. fwrite.write("\n")
  375. # if re.search("标段|地址|标包|名称",line) is not None:#\(|\)||
  376. # _count += 1
  377. # print("=",line)
  378. # print("%d/%d"%(_count,_sum))
  379. # a_list = []
  380. # with open("电信分公司.txt","r",encoding="utf8") as f:
  381. # while True:
  382. # _line = f.readline()
  383. # if not _line:
  384. # break
  385. # if _line.strip()!="":
  386. # a_list.append(_line.strip())
  387. # with open("enter.txt","a",encoding="utf8") as f:
  388. # for _line in a_list:
  389. # f.write(_line)
  390. # f.write("\n")
  391. if __name__=="__main__":
  392. # edit_distance("GUMBO","GAMBOL")
  393. # print(jaccard_score("周口经济开发区陈营运粮河两岸拆迁工地土工布覆盖项目竞争性谈判公告","周口经济开发区陈营运粮河两岸拆迁工地土工布覆盖项目-成交公告"))
  394. #
  395. # sentences = "广州比地数据科技有限公司比地数据科技有限公司1111111123沈阳南光工贸有限公司"
  396. # print(match_enterprise_max_first(sentences))
  397. #
  398. # print("takes %d s"%(time.time()-_time))
  399. # fix_LEGAL_ENTERPRISE()
  400. print(jaccard_score("吉林省九台","吉林省建苑设计集团有限公司"))
  401. # print(match_enterprise_max_first("中国南方航空股份有限公司黑龙江分公司"))