entityLink.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. #coding:UTF8
  2. '''
  3. Created on 2019年5月21日
  4. @author: User
  5. '''
  6. import re
  7. import os
  8. import time
  9. import pandas as pd
  10. _time = time.time()
  11. from BiddingKG.dl.common.Utils import *
  12. from BiddingKG.dl.interface.Entitys import *
  13. import json
  14. from BiddingKG.dl.common.constDict import ConstDict
  15. def edit_distance(source,target):
  16. dp = [["" for i in range(len(source)+1)] for j in range(len(target)+1)]
  17. for i in range(len(dp)):
  18. for j in range(len(dp[i])):
  19. if i==0:
  20. dp[i][j] = j
  21. elif j==0:
  22. dp[i][j] = i
  23. else:
  24. if source[j-1]==target[i-1]:
  25. cost = 0
  26. else:
  27. cost = 2
  28. dp[i][j] = min([dp[i-1][j]+1,dp[i][j-1]+1,dp[i-1][j-1]+cost])
  29. return dp[-1][-1]
  30. def jaccard_score(source,target):
  31. source_set = set([s for s in source])
  32. target_set = set([s for s in target])
  33. if len(source_set)==0 or len(target_set)==0:
  34. return 0
  35. return max(len(source_set&target_set)/len(source_set),len(source_set&target_set)/len(target_set))
  36. def get_place_list():
  37. path = os.path.dirname(__file__) + '/../place_info.csv'
  38. place_df = pd.read_csv(path)
  39. place_list = []
  40. for index, row in place_df.iterrows():
  41. place_list.append(row[1])
  42. place_list.append('台湾')
  43. place_list.append('澳门')
  44. place_list.append('香港')
  45. # place_list.append('東莞')
  46. # place_list.append('廣州')
  47. # place_list.append('韩国')
  48. # place_list.append('德国')
  49. # place_list.append('英国')
  50. # place_list.append('日本')
  51. # place_list.append('意大利')
  52. # place_list.append('新加坡')
  53. # place_list.append('加拿大')
  54. # place_list.append('西班牙')
  55. # place_list.append('澳大利亚')
  56. # place_list.append('美国')
  57. place_list = list(set(place_list))
  58. return place_list
  59. place_list = get_place_list()
  60. place_pattern = "|".join(place_list)
  61. def link_entitys(list_entitys,on_value=1):#on_value=0.81
  62. for list_entity in list_entitys:
  63. range_entity = []
  64. for _entity in list_entity:
  65. if _entity.entity_type in ["org","company"]:
  66. range_entity.append(_entity)
  67. range_entity = range_entity[:1000]
  68. #替换公司的逻辑有问题,先取消
  69. # for first_i in range(len(range_entity)):
  70. # _entity = range_entity[first_i]
  71. # for second_i in range(first_i+1,len(range_entity)):
  72. # _ent = range_entity[second_i]
  73. # # 2021/5/21 update: 两个实体标签互斥(一个是招标人、一个是代理人)且entity_text不相等时,跳过
  74. # if _entity.entity_text != _ent.entity_text and _entity.label != _ent.label and _entity.label in [0,1] and _ent.label in [0, 1]:
  75. # continue
  76. # _score = jaccard_score(re.sub("%s|%s"%("股份|责任|有限|公司",place_pattern),"",_entity.entity_text), re.sub("%s|%s"%("股份|责任|有限|公司",place_pattern),"",_ent.entity_text))
  77. # if _entity.entity_text!=_ent.entity_text and _score>=on_value:
  78. # _entity.linked_entitys.append(_ent)
  79. # _ent.linked_entitys.append(_entity)
  80. # print("=-===",_entity.entity_text,_ent.entity_text,_score)
  81. #替换公司名称
  82. for _entity in range_entity:
  83. if re.search("公司",_entity.entity_text) is None:
  84. for _ent in _entity.linked_entitys:
  85. if re.search("公司$",_ent.entity_text) is not None:
  86. if len(_ent.entity_text)>len(_entity.entity_text):
  87. _entity.entity_text = _ent.entity_text
  88. # 2021/12/21 替换通过字典识别到的取长度最大的相似实体
  89. for _entity in range_entity:
  90. used_linked_entitys = []
  91. if not _entity.linked_entitys:
  92. continue
  93. _entity.linked_entitys.sort(key=lambda x: len(x.entity_text), reverse=True)
  94. for _ent in _entity.linked_entitys:
  95. if _ent in used_linked_entitys:
  96. break
  97. # print("_entity, _ent", _entity.entity_text, _ent.if_dict_match, _ent.entity_text)
  98. if _ent.if_dict_match == 1:
  99. if len(_ent.entity_text) > len(_entity.entity_text):
  100. # 判断两个公司地区相同
  101. match_list_1, match_list_2 = [], []
  102. for place in place_list:
  103. if place in _entity.entity_text:
  104. match_list_1.append(place)
  105. if place in _ent.entity_text:
  106. match_list_2.append(place)
  107. if str(match_list_1) == str(match_list_2):
  108. # print("字典替换", _entity.entity_text, "->", _ent.entity_text)
  109. _entity.origin_entity_text = _entity.entity_text
  110. _entity.entity_text = _ent.entity_text
  111. used_linked_entitys.append(_ent)
  112. # print(_entity.entity_text, _entity.if_dict_match, _ent.entity_text, _ent.if_dict_match)
  113. # 用于去重的标题
  114. def doctitle_refine(doctitle):
  115. _doctitle_refine = re.sub(r'工程|服务|询价|比价|谈判|竞争性|磋商|结果|中标|招标|采购|的|公示|公开|成交|公告|评标|候选人|'
  116. r'交易|通知|废标|流标|终止|中止|一笔|预告|单一来源|竞价|合同', '', doctitle)
  117. return _doctitle_refine
  118. # 前100个公司实体
  119. def get_nlp_enterprise(list_entity):
  120. nlp_enterprise = []
  121. nlp_enterprise_attachment = []
  122. max_num = 100
  123. list_entity = sorted(list_entity,key=lambda x:(x.sentence_index,x.begin_index))
  124. for entity in list_entity:
  125. if entity.entity_type in ['org','company']:
  126. if not entity.in_attachment:
  127. if entity.entity_text not in nlp_enterprise:
  128. nlp_enterprise.append(entity.entity_text)
  129. else:
  130. if entity.entity_text not in nlp_enterprise_attachment:
  131. nlp_enterprise_attachment.append(entity.entity_text)
  132. return nlp_enterprise[:max_num],nlp_enterprise_attachment[:max_num]
  133. ENTERPRISE_HUGE = None
  134. def getEnterprisePath():
  135. global ENTERPRISE_HUGE
  136. filename_huge = "LEGAL_ENTERPRISE_HUGE.txt"
  137. huge_path = getFileFromSysPath(filename_huge)
  138. if huge_path is None:
  139. if os.path.exists(filename_huge):
  140. log("enterprise path:%s"%(filename_huge))
  141. ENTERPRISE_HUGE = True
  142. return filename_huge,ENTERPRISE_HUGE
  143. else:
  144. log("enterprise path:%s"%(huge_path))
  145. ENTERPRISE_HUGE = True
  146. return huge_path,ENTERPRISE_HUGE
  147. filename = "LEGAL_ENTERPRISE.txt"
  148. real_path = getFileFromSysPath(filename)
  149. if real_path is None:
  150. real_path = filename
  151. log("ENTERPRISE path:%s"%(real_path))
  152. ENTERPRISE_HUGE = False
  153. return real_path,ENTERPRISE_HUGE
  154. DICT_ENTERPRISE_DONE = False
  155. POOL_REDIS = None
  156. ENTERPRISE_KEY_LEN = 3
  157. ENTERPRISE_PREFIX_LEN = 3
  158. ENTERPRISE_TAIL_LEN = 3
  159. SET_ENTERPRISE = set()
  160. SET_PREFIX_ENTERPRISE = set()
  161. SET_TAIL_ENTERPRISE = set()
  162. SET_PREFIX_ENTERPRISE_HUGE_FILE = "SET_PREFIX_ENTERPRISE_HUGE.pk"
  163. SET_TAIL_ENTERPRISE_HUGE_FILE = "SET_TAIL_ENTERPRISE_HUGE.pk"
  164. def getDict_enterprise():
  165. global DICT_ENTERPRISE_DONE,SET_ENTERPRISE,SET_PREFIX_ENTERPRISE,SET_TAIL_ENTERPRISE
  166. real_path,is_huge = getEnterprisePath()
  167. _ok = False
  168. if is_huge:
  169. if os.path.exists(SET_PREFIX_ENTERPRISE_HUGE_FILE) and os.path.exists(SET_TAIL_ENTERPRISE_HUGE_FILE):
  170. SET_PREFIX_ENTERPRISE = load(SET_PREFIX_ENTERPRISE_HUGE_FILE)
  171. SET_TAIL_ENTERPRISE = load(SET_TAIL_ENTERPRISE_HUGE_FILE)
  172. _ok = True
  173. if not _ok:
  174. with open(real_path,"r",encoding="UTF8") as f:
  175. for _e in f:
  176. if not _e:
  177. continue
  178. _e = _e.strip()
  179. if len(_e)>=4:
  180. key_enter = _e[:ENTERPRISE_KEY_LEN]
  181. SET_PREFIX_ENTERPRISE.add(key_enter)
  182. SET_TAIL_ENTERPRISE.add(_e[-ENTERPRISE_TAIL_LEN:])
  183. if not is_huge:
  184. SET_ENTERPRISE.add(_e)
  185. #仅在大文件情况下才使用缓存加载
  186. if is_huge:
  187. save(SET_PREFIX_ENTERPRISE,SET_PREFIX_ENTERPRISE_HUGE_FILE)
  188. save(SET_TAIL_ENTERPRISE,SET_TAIL_ENTERPRISE_HUGE_FILE)
  189. log("SET_PREFIX_ENTERPRISE takes memory:%.2fM size:%d"%(sys.getsizeof(SET_PREFIX_ENTERPRISE)/1024/1024,len(SET_PREFIX_ENTERPRISE)))
  190. log("SET_TAIL_ENTERPRISE takes memory:%.2fM size:%d"%(sys.getsizeof(SET_TAIL_ENTERPRISE)/1024/1024,len(SET_TAIL_ENTERPRISE)))
  191. log("SET_ENTERPRISE takes memory:%.2fM size:%d"%(sys.getsizeof(SET_ENTERPRISE)/1024/1024,len(SET_ENTERPRISE)))
  192. # for _e in ["河南省柘源","建筑工程有限公司"]:
  193. # if not _e:
  194. # continue
  195. # _e = _e.strip()
  196. # if len(_e)>=4:
  197. # key_enter = _e[:4]
  198. # if key_enter not in DICT_ENTERPRISE:
  199. # DICT_ENTERPRISE[key_enter] = set()
  200. # DICT_ENTERPRISE[key_enter].add(_e[4:])
  201. DICT_ENTERPRISE_DONE = True
  202. def init_redis_pool():
  203. from BiddingKG.dl.common.pool import ConnectorPool
  204. from BiddingKG.dl.common.source import getConnect_redis_baseline
  205. global POOL_REDIS
  206. if POOL_REDIS is None:
  207. POOL_REDIS = ConnectorPool(init_num=1,max_num=10,method_init=getConnect_redis_baseline)
  208. # 插入 Redis
  209. # def add_redis(company_list):
  210. # global ENTERPRISE_HUGE,POOL_REDIS
  211. # if ENTERPRISE_HUGE:
  212. # _db = POOL_REDIS.getConnector()
  213. # for enterprise_name in company_list:
  214. # _v = _db.get(enterprise_name)
  215. # if _v is None:
  216. # if isLegalNewName(enterprise_name):
  217. # _db.set(enterprise_name,1)
  218. # 新实体合法判断
  219. def isLegalNewName(enterprise_name):
  220. # head_character_list = ["[",'【',"(",'(']
  221. # tail_character_list = ["]",'】',")",')']
  222. # 名称开头判断
  223. if re.search("^[\da-zA-Z][^\da-zA-Z]|"
  224. "^[^\da-zA-Z\u4e00-\u9fa5\[【((]|"
  225. "^[\[【((].{,1}[\]】))]|"
  226. "^[0〇]|"
  227. "^(20[0-2][0-9]|[0-2]?[0-9]年|[0-1]?[0-9]月|[0-3]?[0-9]日)",enterprise_name):
  228. return -1
  229. if len(re.findall("[\u4e00-\u9fa5]",enterprise_name))<2:
  230. return -1
  231. if re.search("╳|*|\*|×|xx|XX",enterprise_name):
  232. return -1
  233. if re.search("^(省|自治[县州区]|市|县|区|镇|乡|街道)",enterprise_name) and not re.search("^(镇江|乡宁|镇原|镇海|镇安|镇巴|镇坪|镇赉|镇康|镇沅|镇雄|镇远|镇宁|乡城|镇平|市中|市南|市北)",enterprise_name):
  234. return -1
  235. if re.search("\d{1,2}:\d{2}(:\d{2})?|(rar|xlsx|zip|png|jpg|swf|docx|txt|pdf|PDF|doc|xls|bmp|&?nbsp)",enterprise_name):
  236. return -1
  237. if re.search("(招标|代理)(人|机构)|联系(人|方式)|中标|候选|第.名",enterprise_name):
  238. return -1
  239. if re.search("[a-zA-Z\d]{1,2}(包|标段?)|第.批"):
  240. return 0
  241. return 1
  242. # 过滤掉Redis里值为0的错误实体
  243. def enterprise_filter(entity_list):
  244. global ENTERPRISE_HUGE,SET_ENTERPRISE,POOL_REDIS
  245. if ENTERPRISE_HUGE:
  246. if POOL_REDIS is None:
  247. init_redis_pool()
  248. _db = POOL_REDIS.getConnector()
  249. remove_list = []
  250. try:
  251. for entity in entity_list:
  252. if entity.entity_type in ['company','org']:
  253. _v = _db.get(entity.entity_text)
  254. if _v==0:
  255. remove_list.append(entity)
  256. except Exception as e:
  257. traceback.print_exc()
  258. POOL_REDIS.putConnector(_db)
  259. for _entity in remove_list:
  260. entity_list.remove(_entity)
  261. return entity_list
  262. def is_enterprise_exist(enterprise_name):
  263. global ENTERPRISE_HUGE,SET_ENTERPRISE,POOL_REDIS
  264. # print("test",enterprise_name)
  265. if ENTERPRISE_HUGE:
  266. if POOL_REDIS is None:
  267. init_redis_pool()
  268. _db = POOL_REDIS.getConnector()
  269. try:
  270. _time = time.time()
  271. _v = _db.get(enterprise_name)
  272. POOL_REDIS.putConnector(_db)
  273. if _v is None:
  274. return False
  275. else:
  276. if _v:
  277. log("redis take %.5f of '%s' exists"%(time.time()-_time,enterprise_name))
  278. return True
  279. else:
  280. return False
  281. except Exception as e:
  282. traceback.print_exc()
  283. return False
  284. else:
  285. if enterprise_name in SET_ENTERPRISE:
  286. return True
  287. else:
  288. return False
  289. import threading
  290. import time
  291. load_enterprise_thread = threading.Thread(target=getDict_enterprise)
  292. load_enterprise_thread.start()
  293. MAX_ENTERPRISE_LEN = 30
  294. def match_enterprise_max_first(sentence):
  295. while True:
  296. if not DICT_ENTERPRISE_DONE:
  297. time.sleep(1)
  298. else:
  299. break
  300. list_match = []
  301. begin_index = 0
  302. if len(sentence)>4:
  303. while True:
  304. if begin_index+ENTERPRISE_KEY_LEN<len(sentence):
  305. key_enter = sentence[begin_index:begin_index+ENTERPRISE_KEY_LEN]
  306. # if key_enter in DICT_ENTERPRISE:
  307. # _len = min(MAX_ENTERPRISE_LEN-ENTERPRISE_KEY_LEN+1,len(sentence)-begin_index)
  308. # for _i in range(_len):
  309. # enter_name = sentence[begin_index+ENTERPRISE_KEY_LEN:begin_index+_len-_i]
  310. # if enter_name in DICT_ENTERPRISE[key_enter]:
  311. # match_item = {"entity_text":"%s%s"%(key_enter,enter_name),"begin_index":begin_index,"end_index":begin_index+len(key_enter)+len(enter_name)}
  312. # list_match.append(match_item)
  313. # begin_index += (len(key_enter)+len(enter_name))-1
  314. # break
  315. if key_enter in SET_PREFIX_ENTERPRISE:
  316. _len = min(MAX_ENTERPRISE_LEN-ENTERPRISE_KEY_LEN+1,len(sentence)-begin_index)
  317. for _i in range(_len):
  318. enter_name = sentence[begin_index:begin_index+_len-_i]
  319. enter_tail = enter_name[-ENTERPRISE_TAIL_LEN:]
  320. if enter_tail in SET_TAIL_ENTERPRISE:
  321. if is_enterprise_exist(enter_name):
  322. match_item = {"entity_text":"%s"%(enter_name),"begin_index":begin_index,"end_index":begin_index+len(enter_name)}
  323. # print("match_item",key_enter,enter_name)
  324. list_match.append(match_item)
  325. begin_index += len(enter_name)-1
  326. break
  327. begin_index += 1
  328. else:
  329. break
  330. # print("======",list_match)
  331. return list_match
  332. def calibrateEnterprise(list_articles,list_sentences,list_entitys):
  333. for _article,list_sentence,list_entity in zip(list_articles,list_sentences,list_entitys):
  334. list_calibrate = []
  335. match_add = False
  336. match_replace = False
  337. range_entity = []
  338. for p_entity in list_entity:
  339. if p_entity.entity_type in ("org","company","location"):
  340. range_entity.append(p_entity)
  341. if len(range_entity)>1000:
  342. break
  343. for p_sentence in list_sentence:
  344. sentence = p_sentence.sentence_text
  345. sentence_entitys = [(ent.entity_text,ent.wordOffset_begin,ent.wordOffset_end) for ent in list_entity if ent.sentence_index==p_sentence.sentence_index and ent.entity_type in ['org','company']]
  346. list_match = match_enterprise_max_first(sentence)
  347. # print("list_match", list_match)
  348. doc_id = p_sentence.doc_id
  349. sentence_index = p_sentence.sentence_index
  350. tokens = p_sentence.tokens
  351. list_match.sort(key=lambda x:x["begin_index"])
  352. for _match_index in range(len(list_match)):
  353. _match = list_match[_match_index]
  354. find_flag = False
  355. for p_entity in range_entity:
  356. if p_entity.sentence_index!=p_sentence.sentence_index:
  357. continue
  358. if p_entity.entity_type=="location" and p_entity.entity_text==_match["entity_text"]:
  359. find_flag = True
  360. p_entity.entity_type = "company"
  361. p_entity.if_dict_match = 1
  362. if p_entity.entity_type not in ["location","org","company"]:
  363. continue
  364. if _match["entity_text"] == p_entity.entity_text:
  365. p_entity.if_dict_match = 1
  366. #有重叠
  367. #match部分被包含则不处理
  368. if _match["begin_index"]>=p_entity.wordOffset_begin and _match["end_index"]<=p_entity.wordOffset_end:
  369. find_flag = True
  370. #判断是否是多个公司
  371. for _match_j in range(_match_index,len(list_match)):
  372. if not list_match[_match_j]["end_index"]<=p_entity.wordOffset_end:
  373. _match_j -= 1
  374. break
  375. if _match_j>_match_index:
  376. match_replace = True
  377. match_add = True
  378. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  379. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  380. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  381. p_entity.entity_text = _match["entity_text"]
  382. p_entity.wordOffset_begin = _match["begin_index"]
  383. p_entity.wordOffset_end = _match["end_index"]
  384. p_entity.begin_index = begin_index
  385. p_entity.end_index = end_index
  386. # 该公司实体是字典识别的
  387. p_entity.if_dict_match = 1
  388. for _match_h in range(_match_index+1,_match_j+1):
  389. entity_text = list_match[_match_h]["entity_text"]
  390. entity_type = "company"
  391. begin_index = changeIndexFromWordToWords(tokens,list_match[_match_h]["begin_index"])
  392. end_index = changeIndexFromWordToWords(tokens,list_match[_match_h]["end_index"]-1)
  393. entity_id = "%s_%d_%d_%d"%(doc_id,sentence_index,begin_index,end_index)
  394. add_entity = Entity(p_sentence.doc_id,entity_id,entity_text,entity_type,sentence_index,begin_index,end_index,list_match[_match_h]["begin_index"],list_match[_match_h]["end_index"],in_attachment=p_sentence.in_attachment)
  395. add_entity.if_dict_match = 1
  396. list_entity.append(add_entity)
  397. range_entity.append(add_entity)
  398. list_calibrate.append({"type":"add","from":"","to":entity_text})
  399. _match_index = _match_j
  400. break
  401. continue
  402. elif _match["begin_index"]<=p_entity.wordOffset_begin and _match["end_index"]>p_entity.wordOffset_begin:
  403. find_flag = True
  404. if _match["begin_index"]<p_entity.wordOffset_begin and _match["end_index"]<=p_entity.wordOffset_end:
  405. if p_entity.entity_type in ("org","company"):
  406. _diff_text = sentence[p_entity.wordOffset_end:_match["end_index"]]
  407. if re.search("分",_diff_text) is not None:
  408. pass
  409. else:
  410. match_replace = True
  411. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  412. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  413. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  414. p_entity.entity_text = _match["entity_text"]
  415. p_entity.wordOffset_begin = _match["begin_index"]
  416. p_entity.wordOffset_end = _match["end_index"]
  417. p_entity.begin_index = begin_index
  418. p_entity.end_index = end_index
  419. p_entity.if_dict_match = 1
  420. elif _match["end_index"]>=p_entity.wordOffset_end:
  421. # 原entity列表已有实体,则不重复添加
  422. if (_match["entity_text"],_match["begin_index"],_match["end_index"]) not in sentence_entitys:
  423. match_replace = True
  424. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  425. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  426. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  427. p_entity.entity_text = _match["entity_text"]
  428. p_entity.wordOffset_begin = _match["begin_index"]
  429. p_entity.wordOffset_end = _match["end_index"]
  430. p_entity.begin_index = begin_index
  431. p_entity.end_index = end_index
  432. p_entity.entity_type = "company"
  433. p_entity.if_dict_match = 1
  434. elif _match["begin_index"]<p_entity.wordOffset_end and _match["end_index"]>p_entity.wordOffset_end:
  435. find_flag = True
  436. if p_entity.entity_type in ("org","company"):
  437. match_replace = True
  438. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  439. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  440. list_calibrate.append({"type":"update","from":p_entity.entity_text,"to":_match["entity_text"]})
  441. p_entity.entity_text = _match["entity_text"]
  442. p_entity.wordOffset_begin = _match["begin_index"]
  443. p_entity.wordOffset_end = _match["end_index"]
  444. p_entity.begin_index = begin_index
  445. p_entity.end_index = end_index
  446. p_entity.if_dict_match = 1
  447. if not find_flag:
  448. match_add = True
  449. entity_text = _match["entity_text"]
  450. entity_type = "company"
  451. begin_index = changeIndexFromWordToWords(tokens,_match["begin_index"])
  452. end_index = changeIndexFromWordToWords(tokens,_match["end_index"]-1)
  453. entity_id = "%s_%d_%d_%d"%(doc_id,sentence_index,begin_index,end_index)
  454. add_entity = Entity(p_sentence.doc_id,entity_id,entity_text,entity_type,sentence_index,begin_index,end_index,_match["begin_index"],_match["end_index"],in_attachment=p_sentence.in_attachment)
  455. list_entity.append(add_entity)
  456. range_entity.append(add_entity)
  457. list_calibrate.append({"type":"add","from":"","to":entity_text})
  458. #去重
  459. set_calibrate = set()
  460. list_match_enterprise = []
  461. for _calibrate in list_calibrate:
  462. _from = _calibrate.get("from","")
  463. _to = _calibrate.get("to","")
  464. _key = _from+_to
  465. if _key not in set_calibrate:
  466. list_match_enterprise.append(_calibrate)
  467. set_calibrate.add(_key)
  468. match_enterprise_type = 0
  469. if match_add:
  470. match_enterprise_type += 1
  471. if match_replace:
  472. match_enterprise_type += 2
  473. _article.match_enterprise = list_match_enterprise
  474. _article.match_enterprise_type = match_enterprise_type
  475. def isLegalEnterprise(name):
  476. is_legal = True
  477. if re.search("^[省市区县]",name) is not None or re.search("^\**.{,3}(分(公司|行|支)|街道|中心|办事处|经营部|委员会|有限公司)$",name) or re.search("标段|标包|名称|联系人|联系方式|中标单位|中标人|测试单位|采购单位|采购人|代理人|代理机构|盖章|(主)",name) is not None:
  478. is_legal = False
  479. return is_legal
  480. def fix_LEGAL_ENTERPRISE():
  481. unlegal_enterprise = []
  482. _path = getEnterprisePath()
  483. _sum = 0
  484. set_enter = set()
  485. paths = [_path]
  486. for _p in paths:
  487. with open(_p,"r",encoding="utf8") as f:
  488. while True:
  489. line = f.readline()
  490. if not line:
  491. break
  492. line = line.strip()
  493. if isLegalEnterprise(line):
  494. set_enter.add(line)
  495. if line=="有限责任公司" or line=='设计研究院' or line=='限责任公司' or (re.search("^.{,4}(分公司|支行|分行)$",line) is not None and re.search("电信|移动|联通|建行|工行|农行|中行|交行",line) is None):
  496. print(line)
  497. if line in set_enter:
  498. set_enter.remove(line)
  499. with open("enter.txt","w",encoding="utf8") as fwrite:
  500. for line in list(set_enter):
  501. fwrite.write(line.replace("(","(").replace(")",")"))
  502. fwrite.write("\n")
  503. # if re.search("标段|地址|标包|名称",line) is not None:#\(|\)||
  504. # _count += 1
  505. # print("=",line)
  506. # print("%d/%d"%(_count,_sum))
  507. # a_list = []
  508. # with open("电信分公司.txt","r",encoding="utf8") as f:
  509. # while True:
  510. # _line = f.readline()
  511. # if not _line:
  512. # break
  513. # if _line.strip()!="":
  514. # a_list.append(_line.strip())
  515. # with open("enter.txt","a",encoding="utf8") as f:
  516. # for _line in a_list:
  517. # f.write(_line)
  518. # f.write("\n")
  519. if __name__=="__main__":
  520. # edit_distance("GUMBO","GAMBOL")
  521. # print(jaccard_score("周口经济开发区陈营运粮河两岸拆迁工地土工布覆盖项目竞争性谈判公告","周口经济开发区陈营运粮河两岸拆迁工地土工布覆盖项目-成交公告"))
  522. #
  523. # sentences = "广州比地数据科技有限公司比地数据科技有限公司1111111123沈阳南光工贸有限公司"
  524. # print(match_enterprise_max_first(sentences))
  525. #
  526. # print("takes %d s"%(time.time()-_time))
  527. # fix_LEGAL_ENTERPRISE()
  528. # print(jaccard_score("吉林省九台","吉林省建苑设计集团有限公司"))
  529. print(match_enterprise_max_first("中国南方航空股份有限公司黑龙江分公司"))