entityLink.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. '''
  2. Created on 2019年5月21日
  3. @author: User
  4. '''
  5. import re
  6. import os
  7. import time
  8. _time = time.time()
  9. from BiddingKG.dl.common.Utils import *
  10. from BiddingKG.dl.interface.Entitys import *
  11. import json
  12. def edit_distance(source,target):
  13. dp = [["" for i in range(len(source)+1)] for j in range(len(target)+1)]
  14. for i in range(len(dp)):
  15. for j in range(len(dp[i])):
  16. if i==0:
  17. dp[i][j] = j
  18. elif j==0:
  19. dp[i][j] = i
  20. else:
  21. if source[j-1]==target[i-1]:
  22. cost = 0
  23. else:
  24. cost = 2
  25. dp[i][j] = min([dp[i-1][j]+1,dp[i][j-1]+1,dp[i-1][j-1]+cost])
  26. return dp[-1][-1]
  27. def jaccard_score(source,target):
  28. source_set = set([s for s in source])
  29. target_set = set([s for s in target])
  30. if len(source_set)==0 or len(target_set)==0:
  31. return 0
  32. return max(len(source_set&target_set)/len(source_set),len(source_set&target_set)/len(target_set))
  33. def link_entitys(list_entitys,on_value=0.8):
  34. for list_entity in list_entitys:
  35. range_entity = []
  36. for _entity in list_entity:
  37. if _entity.entity_type in ["org","company"]:
  38. range_entity.append(_entity)
  39. range_entity = range_entity[:1000]
  40. for first_i in range(len(range_entity)):
  41. _entity = range_entity[first_i]
  42. for second_i in range(first_i+1,len(range_entity)):
  43. _ent = range_entity[second_i]
  44. # 2021/5/21 update: 两个实体标签互斥(一个是招标人、一个是代理人)且entity_text不相等时,跳过
  45. if _entity.entity_text != _ent.entity_text and _entity.label != _ent.label and _entity.label in [0,1] and _ent.label in [0, 1]:
  46. continue
  47. _score = jaccard_score(_entity.entity_text, _ent.entity_text)
  48. if _entity.entity_text!=_ent.entity_text and _score>=on_value:
  49. _entity.linked_entitys.append(_ent)
  50. _ent.linked_entitys.append(_entity)
  51. #替换公司名称
  52. for _entity in range_entity:
  53. if re.search("公司",_entity.entity_text) is None:
  54. for _ent in _entity.linked_entitys:
  55. if re.search("公司$",_ent.entity_text) is not None:
  56. if len(_ent.entity_text)>len(_entity.entity_text):
  57. _entity.entity_text = _ent.entity_text
  58. def getEnterprisePath():
  59. filename = "LEGAL_ENTERPRISE.txt"
  60. real_path = getFileFromSysPath(filename)
  61. if real_path is None:
  62. real_path = filename
  63. return real_path
  64. DICT_ENTERPRISE = {}
  65. DICT_ENTERPRISE_DONE = False
  66. def getDict_enterprise():
  67. global DICT_ENTERPRISE,DICT_ENTERPRISE_DONE
  68. real_path = getEnterprisePath()
  69. with open(real_path,"r",encoding="UTF8") as f:
  70. for _e in f:
  71. if not _e:
  72. continue
  73. _e = _e.strip()
  74. if len(_e)>=4:
  75. key_enter = _e[:4]
  76. if key_enter not in DICT_ENTERPRISE:
  77. DICT_ENTERPRISE[key_enter] = set()
  78. DICT_ENTERPRISE[key_enter].add(_e[4:])
  79. # for _e in ["河南省柘源","建筑工程有限公司"]:
  80. # if not _e:
  81. # continue
  82. # _e = _e.strip()
  83. # if len(_e)>=4:
  84. # key_enter = _e[:4]
  85. # if key_enter not in DICT_ENTERPRISE:
  86. # DICT_ENTERPRISE[key_enter] = set()
  87. # DICT_ENTERPRISE[key_enter].add(_e[4:])
  88. DICT_ENTERPRISE_DONE = True
  89. return DICT_ENTERPRISE
  90. import threading
  91. import time
  92. load_enterprise_thread = threading.Thread(target=getDict_enterprise)
  93. load_enterprise_thread.start()
  94. MAX_ENTERPRISE_LEN = 30
  95. def match_enterprise_max_first(sentence):
  96. while True:
  97. if not DICT_ENTERPRISE_DONE:
  98. time.sleep(1)
  99. else:
  100. break
  101. list_match = []
  102. begin_index = 0
  103. if len(sentence)>4:
  104. while True:
  105. if begin_index+4<len(sentence):
  106. key_enter = sentence[begin_index:begin_index+4]
  107. if key_enter in DICT_ENTERPRISE:
  108. for _i in range(MAX_ENTERPRISE_LEN-4+1):
  109. enter_name = sentence[begin_index+4:begin_index+MAX_ENTERPRISE_LEN-_i]
  110. if enter_name in DICT_ENTERPRISE[key_enter]:
  111. match_item = {"entity_text":"%s%s"%(key_enter,enter_name),"begin_index":begin_index,"end_index":begin_index+len(key_enter)+len(enter_name)}
  112. list_match.append(match_item)
  113. begin_index += (len(key_enter)+len(enter_name))-1
  114. break
  115. begin_index += 1
  116. else:
  117. break
  118. return list_match
  119. def calibrateEnterprise(list_articles,list_sentences,list_entitys):
  120. for _article,list_sentence,list_entity in zip(list_articles,list_sentences,list_entitys):
  121. list_calibrate = []
  122. match_add = False
  123. match_replace = False
  124. range_entity = []
  125. for p_entity in list_entity:
  126. if p_entity.entity_type in ("org","company","location"):
  127. range_entity.append(p_entity)
  128. if len(range_entity)>1000:
  129. break
  130. for p_sentence in list_sentence:
  131. sentence = p_sentence.sentence_text
  132. list_match = match_enterprise_max_first(sentence)
  133. doc_id = p_sentence.doc_id
  134. sentence_index = p_sentence.sentence_index
  135. tokens = p_sentence.tokens
  136. list_match.sort(key=lambda x:x["begin_index"])
  137. for _match_index in range(len(list_match)):
  138. _match = list_match[_match_index]
  139. find_flag = False
  140. for p_entity in range_entity:
  141. if p_entity.sentence_index!=p_sentence.sentence_index:
  142. continue
  143. if p_entity.entity_type=="location" and p_entity.entity_text==_match["entity_text"]:
  144. find_flag = True
  145. p_entity.entity_type = "company"
  146. if p_entity.entity_type not in ["location","org","company"]:
  147. continue
  148. #有重叠
  149. #match部分被包含则不处理
  150. if _match["begin_index"]>=p_entity.wordOffset_begin and _match["end_index"]<=p_entity.wordOffset_end:
  151. find_flag = True
  152. #判断是否是多个公司
  153. for _match_j in range(_match_index,len(list_match)):
  154. if not list_match[_match_j]["end_index"]<=p_entity.wordOffset_end:
  155. _match_j -= 1
  156. break
  157. if _match_j>_match_index:
  158. match_replace = True
  159. match_add = True
  160. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  161. end_index = changeIndexFromWordToWords(tokens,_match["end_index"])
  162. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  163. p_entity.entity_text = _match["entity_text"]
  164. p_entity.wordOffset_begin = _match["begin_index"]
  165. p_entity.wordOffset_end = _match["end_index"]
  166. p_entity.begin_index = begin_index
  167. p_entity.end_index = end_index
  168. for _match_h in range(_match_index+1,_match_j+1):
  169. entity_text = list_match[_match_h]["entity_text"]
  170. entity_type = "company"
  171. begin_index = changeIndexFromWordToWords(tokens,list_match[_match_h]["begin_index"])
  172. end_index = changeIndexFromWordToWords(tokens,list_match[_match_h]["end_index"])
  173. entity_id = "%s_%d_%d_%d"%(doc_id,sentence_index,begin_index,end_index)
  174. add_entity = Entity(p_sentence.doc_id,entity_id,entity_text,entity_type,sentence_index,begin_index,end_index,list_match[_match_h]["begin_index"],list_match[_match_h]["end_index"])
  175. list_entity.append(add_entity)
  176. range_entity.append(add_entity)
  177. list_calibrate.append({"type":"add","from":"","to":entity_text})
  178. _match_index = _match_j
  179. break
  180. continue
  181. elif _match["begin_index"]<=p_entity.wordOffset_begin and _match["end_index"]>p_entity.wordOffset_begin:
  182. find_flag = True
  183. if _match["begin_index"]<p_entity.wordOffset_begin and _match["end_index"]<=p_entity.wordOffset_end:
  184. if p_entity.entity_type in ("org","company"):
  185. _diff_text = sentence[p_entity.wordOffset_end:_match["end_index"]]
  186. if re.search("分",_diff_text) is not None:
  187. pass
  188. else:
  189. match_replace = True
  190. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  191. end_index = changeIndexFromWordToWords(tokens,_match["end_index"])
  192. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  193. p_entity.entity_text = _match["entity_text"]
  194. p_entity.wordOffset_begin = _match["begin_index"]
  195. p_entity.wordOffset_end = _match["end_index"]
  196. p_entity.begin_index = begin_index
  197. p_entity.end_index = end_index
  198. elif _match["end_index"]>=p_entity.wordOffset_end:
  199. match_replace = True
  200. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  201. end_index = changeIndexFromWordToWords(tokens,_match["end_index"])
  202. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  203. p_entity.entity_text = _match["entity_text"]
  204. p_entity.wordOffset_begin = _match["begin_index"]
  205. p_entity.wordOffset_end = _match["end_index"]
  206. p_entity.begin_index = begin_index
  207. p_entity.end_index = end_index
  208. p_entity.entity_type = "company"
  209. elif _match["begin_index"]<p_entity.wordOffset_end and _match["end_index"]>p_entity.wordOffset_end:
  210. find_flag = True
  211. if p_entity.entity_type in ("org","company"):
  212. match_replace = True
  213. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  214. end_index = changeIndexFromWordToWords(tokens,_match["end_index"])
  215. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  216. p_entity.entity_text = _match["entity_text"]
  217. p_entity.wordOffset_begin = _match["begin_index"]
  218. p_entity.wordOffset_end = _match["end_index"]
  219. p_entity.begin_index = begin_index
  220. p_entity.end_index = end_index
  221. if not find_flag:
  222. match_add = True
  223. entity_text = _match["entity_text"]
  224. entity_type = "company"
  225. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  226. end_index = changeIndexFromWordToWords(tokens,_match["end_index"])
  227. entity_id = "%s_%d_%d_%d"%(doc_id,sentence_index,begin_index,end_index)
  228. add_entity = Entity(p_sentence.doc_id,entity_id,entity_text,entity_type,sentence_index,begin_index,end_index,_match["begin_index"],_match["end_index"])
  229. list_entity.append(add_entity)
  230. range_entity.append(add_entity)
  231. list_calibrate.append({"type":"add","from":"","to":entity_text})
  232. #去重
  233. set_calibrate = set()
  234. list_match_enterprise = []
  235. for _calibrate in list_calibrate:
  236. _from = _calibrate.get("from","")
  237. _to = _calibrate.get("to","")
  238. _key = _from+_to
  239. if _key not in set_calibrate:
  240. list_match_enterprise.append(_calibrate)
  241. set_calibrate.add(_key)
  242. match_enterprise_type = 0
  243. if match_add:
  244. match_enterprise_type += 1
  245. if match_replace:
  246. match_enterprise_type += 2
  247. _article.match_enterprise = list_match_enterprise
  248. _article.match_enterprise_type = match_enterprise_type
  249. def isLegalEnterprise(name):
  250. is_legal = True
  251. if re.search("^[省市区县]",name) is not None or re.search("^.{,3}(分(公司|行|支)|街道|中心|办事处|经营部)$",name) or re.search("标段|标包|名称",name) is not None:
  252. is_legal = False
  253. return is_legal
  254. def fix_LEGAL_ENTERPRISE():
  255. unlegal_enterprise = []
  256. _path = getEnterprisePath()
  257. _sum = 0
  258. with open("enter.txt","w",encoding="utf8") as fwrite:
  259. fwrite.write("大理市方向电脑经营部\n")
  260. with open(_path,"r",encoding="utf8") as f:
  261. while True:
  262. line = f.readline()
  263. if not line:
  264. break
  265. _sum += 1
  266. line = line.strip()
  267. if isLegalEnterprise(line):
  268. fwrite.write(line.replace("(","(").replace(")",")"))
  269. fwrite.write("\n")
  270. # if re.search("标段|地址|标包|名称",line) is not None:#\(|\)||
  271. # _count += 1
  272. # print("=",line)
  273. # print("%d/%d"%(_count,_sum))
  274. if __name__=="__main__":
  275. # edit_distance("GUMBO","GAMBOL")
  276. # print(jaccard_score("周口经济开发区陈营运粮河两岸拆迁工地土工布覆盖项目竞争性谈判公告","周口经济开发区陈营运粮河两岸拆迁工地土工布覆盖项目-成交公告"))
  277. #
  278. # sentences = "广州比地数据科技有限公司比地数据科技有限公司1111111123沈阳南光工贸有限公司"
  279. # print(match_enterprise_max_first(sentences))
  280. #
  281. # print("takes %d s"%(time.time()-_time))
  282. fix_LEGAL_ENTERPRISE()