proposedBuildingProject.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. from odps.udf import annotate
  2. from odps.distcache import get_cache_archive
  3. from odps.distcache import get_cache_file
  4. from odps.udf import BaseUDTF
  5. from odps.udf import BaseUDAF
  6. import threading
  7. import logging
  8. logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  9. import time
  10. import uuid
  11. import re
  12. import traceback
  13. from multiprocessing import Process,Queue
  14. def log(msg):
  15. logging.info(msg)
  16. # 配置pandas依赖包
  17. def include_package_path(res_name):
  18. import os, sys
  19. archive_files = get_cache_archive(res_name)
  20. dir_names = sorted([os.path.dirname(os.path.normpath(f.name)) for f in archive_files
  21. if '.dist_info' not in f.name], key=lambda v: len(v))
  22. _path = dir_names[0].split(".zip/files")[0]+".zip/files"
  23. log("add path:%s"%(_path))
  24. sys.path.append(_path)
  25. return _path
  26. # 可能出现类似RuntimeError: xxx has been blocked by sandbox
  27. # 这是因为包含C的库,会被沙盘block,可设置set odps.isolation.session.enable = true
  28. def include_file(file_name):
  29. import os, sys
  30. so_file = get_cache_file(file_name)
  31. sys.path.append(os.path.dirname(os.path.abspath(so_file.name)))
  32. def include_so(file_name):
  33. import os, sys
  34. so_file = get_cache_file(file_name)
  35. with open(so_file.name, 'rb') as fp:
  36. content=fp.read()
  37. so = open(file_name, "wb")
  38. so.write(content)
  39. so.flush()
  40. so.close()
  41. #初始化业务数据包,由于上传限制,python版本以及archive解压包不统一等各种问题,需要手动导入
  42. def init_env(list_files,package_name):
  43. import os,sys
  44. if len(list_files)==1:
  45. so_file = get_cache_file(list_files[0])
  46. cmd_line = os.path.abspath(so_file.name)
  47. os.system("unzip -o %s -d %s"%(cmd_line,package_name))
  48. elif len(list_files)>1:
  49. cmd_line = "cat"
  50. for _file in list_files:
  51. so_file = get_cache_file(_file)
  52. cmd_line += " "+os.path.abspath(so_file.name)
  53. cmd_line += " > temp.zip"
  54. os.system(cmd_line)
  55. os.system("unzip -o temp.zip -d %s"%(package_name))
  56. # os.system("rm -rf %s/*.dist-info"%(package_name))
  57. # return os.listdir(os.path.abspath("local_package"))
  58. # os.system("echo export LD_LIBRARY_PATH=%s >> ~/.bashrc"%(os.path.abspath("local_package")))
  59. # os.system("source ~/.bashrc")
  60. sys.path.insert(0,os.path.abspath(package_name))
  61. # sys.path.append(os.path.join(os.path.abspath("local_package"),"interface_real"))
  62. def multiLoadEnv():
  63. def load_project():
  64. start_time = time.time()
  65. include_package_path("BiddingKG.backup.zip")
  66. logging.info("init biddingkg.zip.env.line cost %d"%(time.time()-start_time))
  67. def load_vector():
  68. start_time = time.time()
  69. init_env(["wiki_128_word_embedding_new.vector.env"],".")
  70. logging.info("init wiki_128_word_embedding_new cost %d"%(time.time()-start_time))
  71. start_time = time.time()
  72. init_env(["enterprise.zip.env"],".")
  73. # init_env(["LEGAL_ENTERPRISE.zip.env"],".")
  74. logging.info("init legal_enterprise.zip.env cost %d"%(time.time()-start_time))
  75. start_time = time.time()
  76. init_env(["so.env"],".")
  77. logging.info("init so.env cost %d"%(time.time()-start_time))
  78. def load_py():
  79. start_time = time.time()
  80. # self.out = init_env(["envs_py37.zip.env"],str(uuid.uuid4()))
  81. include_package_path("envs_py37.env.zip")
  82. logging.info("init envs_py37 cost %d"%(time.time()-start_time))
  83. load_project()
  84. load_vector()
  85. load_py()
  86. def getPattern():
  87. filename = "proposedBuildingKeyword.zip.env"
  88. init_env([filename],".")
  89. df = pd.read_excel("proposedBuildingKeyword.xlsx")
  90. dict_industry_keywords = {}
  91. for _industry,_keyword in zip(df["类别"],df["关键词"]):
  92. if _industry not in dict_industry_keywords:
  93. dict_industry_keywords[_industry] = set()
  94. dict_industry_keywords[_industry].add(_keyword)
  95. list_industry_p = []
  96. for k,v in dict_industry_keywords.items():
  97. if len(v)>0:
  98. list_industry_p.append("(?P<%s>%s)"%(k,"|".join(list(v))))
  99. _pattern = re.compile("|".join(list_industry_p))
  100. return _pattern
  101. dict_stage = {"设计阶段":"设计",
  102. "环评阶段":"环评",
  103. "施工准备":"监理",
  104. "施工在建":"施工"}
  105. list_stage_v = []
  106. for k,v in dict_stage.items():
  107. list_stage_v.append("(?P<%s>%s)"%(k,v))
  108. stage_pattern = "|".join(list_stage_v)
  109. def extract_industry(content,_pattern):
  110. list_stage = []
  111. for stage_search in re.finditer(_pattern,content):
  112. for k,v in stage_search.groupdict().items():
  113. if v is not None:
  114. list_stage.append(k)
  115. if len(list_stage)>0:
  116. return list_stage[0]
  117. return None
  118. def extract_legal_stage(content):
  119. if re.search("拍卖|转让|产权|出让|租赁|招租|采购",content) is not None:
  120. return None
  121. list_stage = []
  122. for stage_search in re.finditer(stage_pattern,content):
  123. for k,v in stage_search.groupdict().items():
  124. if v is not None:
  125. list_stage.append(k)
  126. if len(list_stage)>0:
  127. return list_stage[-1]
  128. return None
  129. def extract_proportion(content):
  130. _pattern = "(?P<proportion>((建筑|建设)面积|全长)[大概约为是::【\[\s]*[\d,]+(\.\d+)?[十百千万亿]*([\]】平方kK千万公㎡mM米里顷亩]+2?))"
  131. _pattern_search = re.search(_pattern,content)
  132. _proportion = ""
  133. if _pattern_search is not None:
  134. _proportion = _pattern_search.groupdict().get("proportion","")
  135. if _proportion=="":
  136. _pattern = "(?P<proportion>((建筑|建设|区域)?面积|全长|项目规模)[大概约为是::【\[\s]*[\d,]+(\.\d+)?[十百千万亿]*([\]】平方kK千万公㎡mM米里顷亩]+2?))"
  137. _pattern_search = re.search(_pattern,content)
  138. if _pattern_search is not None:
  139. _proportion = _pattern_search.groupdict().get("proportion","")
  140. return _proportion
  141. def extract_projectDigest(content):
  142. _pattern = "(?P<projectDigest>(项目|工程|标的|需求|建设|招标|采购|内容)(概况|规模|简介|信息|范围|内容|说明|摘要).{10,300})"
  143. _pattern_search = re.search(_pattern,content)
  144. _projectDigest = ""
  145. _find = ""
  146. if _pattern_search is not None:
  147. _find = _pattern_search.groupdict().get("projectDigest","")
  148. if len(_find)>0:
  149. _projectDigest = "。".join(_find.split("。")[0:3])
  150. return _projectDigest
  151. def extract_projectAddress(list_sentence,list_entity):
  152. for p_entity in list_entity:
  153. if len(p_entity.entity_text)>10 and p_entity.entity_type=="location":
  154. for _sentence in list_sentence:
  155. if _sentence.sentence_index==p_entity.sentence_index:
  156. _span = spanWindow(tokens=_sentence.tokens,begin_index=p_entity.begin_index,end_index=p_entity.end_index,size=20,center_include=True,word_flag=True,text=p_entity.entity_text)
  157. if re.search("(项目|建设)(地址|地点)",_span[0]) is not None:
  158. return p_entity.entity_text
  159. return None
  160. def extract_begin_end_time(list_sentence,list_entity):
  161. _begin_time = None
  162. _end_time = None
  163. for p_entity in list_entity:
  164. if p_entity.entity_type=="time":
  165. for _sentence in list_sentence:
  166. if _sentence.sentence_index==p_entity.sentence_index:
  167. _span = spanWindow(tokens=_sentence.tokens,begin_index=p_entity.begin_index,end_index=p_entity.end_index,size=20,center_include=True,word_flag=True,text=p_entity.entity_text)
  168. if re.search("开工(时间|日期)",_span[0]) is not None:
  169. _time_temp = timeFormat(p_entity.entity_text)
  170. if len(_time_temp)>0:
  171. _begin_time = _time_temp
  172. if re.search("(竣工|完工)(时间|日期)",_span[0]) is not None:
  173. _time_temp = timeFormat(p_entity.entity_text)
  174. if len(_time_temp)>0:
  175. _end_time = _time_temp
  176. return _begin_time,_end_time
  177. @annotate('bigint,string,string,string -> string,string,string,string,string,string,string,string')
  178. class extract_proposedBuilding(BaseUDTF):
  179. def __init__(self):
  180. multiLoadEnv()
  181. import pandas as pd
  182. global pd
  183. self._pattern = getPattern()
  184. self.task_queue = Queue()
  185. self.result_queue = Queue()
  186. self.deal_process = Process(target=self.f_queue_process,args=(self.task_queue,self.result_queue))
  187. self.deal_process.start()
  188. import numpy as np
  189. self.last_timeout = False
  190. def f_queue_process(self,task_queue,result_queue):
  191. log("start import predict function")
  192. import BiddingKG.dl.interface.Preprocessing as Preprocessing
  193. from BiddingKG.dl.common.Utils import spanWindow,timeFormat
  194. global spanWindow,timeFormat
  195. log("import done")
  196. while True:
  197. try:
  198. item = task_queue.get(True,timeout=10)
  199. doc_id = item.get("docid","")
  200. dochtmlcon = item.get("dochtmlcon","")
  201. doctitle = item.get("doctitle","")
  202. project_name = item.get("project_name","")
  203. log("start process docid:%s"%(str(doc_id)))
  204. _stage = extract_legal_stage(doctitle)
  205. result_json = None
  206. if _stage is not None:
  207. list_articles,list_sentences,list_entitys,list_outlines,_cost_time = Preprocessing.get_preprocessed([[doc_id,dochtmlcon,"","",doctitle,"",""]],useselffool=True)
  208. for list_article,list_sentence,list_entity in zip(list_articles,list_sentences,list_entitys):
  209. content = list_article.content
  210. _stage = extract_legal_stage(doctitle)
  211. if _stage is None:
  212. continue
  213. _industry = extract_industry(content,self._pattern)
  214. if _industry is None:
  215. continue
  216. _proportion = extract_proportion(content)
  217. _projectDigest = extract_projectDigest(content)
  218. _projectAddress = extract_projectAddress(list_sentence,list_entity)
  219. _begin_time,_end_time = extract_begin_end_time(list_sentence,list_entity)
  220. project_name_refind = ""
  221. if project_name is not None and len(project_name)>0:
  222. project_name_refind = re.sub("设计|环评|监理|施工","",project_name)
  223. if _stage is not None:
  224. result_json = {"_stage":_stage,
  225. "_proportion":_proportion,
  226. "_projectAddress":_projectAddress,
  227. "_projectDigest":_projectDigest,
  228. "_begin_time":_begin_time,
  229. "_end_time":_end_time,
  230. "project_name_refind":project_name_refind,
  231. "_industry":_industry}
  232. result_queue.put(result_json,True)
  233. log("end process docid:%s"%(str(doc_id)))
  234. except Exception as e:
  235. traceback.print_exc()
  236. log("get data time out")
  237. pass
  238. def process(self,doc_id,dochtmlcon,doctitle,project_name):
  239. # #直接处理
  240. # if content is not None and _doc_id not in [105677700,126694044,126795572,126951461,71708072,137850637]:
  241. # result_json = predict(str(_doc_id),content,str(_title))
  242. # self.forward(page_time,int(_doc_id),result_json)
  243. if dochtmlcon is not None and doc_id not in [105677700,126694044,126795572,126951461,71708072,137850637]:
  244. #清除队列中的数据
  245. try:
  246. while(self.task_queue.qsize()>0):
  247. self.task_queue.get(timeout=5)
  248. except Exception as e:
  249. pass
  250. try:
  251. while(self.result_queue.qsize()>0):
  252. self.result_queue.get(timeout=5)
  253. except Exception as e:
  254. pass
  255. _item = {"docid":doc_id,"dochtmlcon":dochtmlcon,"doctitle":doctitle,"project_name":project_name}
  256. try:
  257. _timeout = 60*4
  258. if self.last_timeout:
  259. _timeout += 60*5
  260. self.last_timeout = False
  261. if not self.deal_process.is_alive():
  262. log("deal process is down")
  263. self.task_queue = Queue()
  264. self.deal_process = Process(target=self.f_queue_process,args=(self.task_queue,self.result_queue))
  265. self.deal_process.start()
  266. _timeout += 60*5
  267. log("putting item to task_queue with docid:%s"%(str(doc_id)))
  268. self.task_queue.put(_item)
  269. result_json = self.result_queue.get(timeout=_timeout)
  270. if result_json is not None:
  271. self.forward(result_json.get("_stage"),result_json.get("_proportion"),result_json.get("_projectDigest"),result_json.get("_projectAddress"),result_json.get("_begin_time"),result_json.get("_end_time"),result_json.get("project_name_refind"),result_json.get("_industry"))
  272. except Exception as e:
  273. log("dealing docid %s failed by timeout"%(str(doc_id)))
  274. self.last_timeout = True
  275. self.deal_process.kill()
  276. time.sleep(5)
  277. self.task_queue = Queue()
  278. self.deal_process = Process(target=self.f_queue_process,args=(self.task_queue,self.result_queue))
  279. self.deal_process.start()
  280. @annotate('bigint,string,string,string,string,string,string,string,string,string,string,string,string,string,string,string,string,string,string->string')
  281. class f_remege_proposedBuildingProject(BaseUDAF):
  282. '''
  283. 项目编号、中标单位、len(项目编号)>7、中标单位<> ""、合并后非空招标单位数<2、合并后同公告类型非空金额相同
  284. '''
  285. def __init__(self):
  286. import logging
  287. import json,re
  288. global json,logging,re
  289. logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  290. def new_buffer(self):
  291. return [list()]
  292. def iterate(self, buffer,docid,page_time,province,city,district,tenderee,tenderee_contact,tenderee_phone,agency,
  293. project_code,project_name,stage,proportion,projectDigest,projectAddress,begin_time,end_time,
  294. project_name_refind,industry):
  295. buffer[0].append({"docid":docid,"page_time":page_time,"province":province,"city":city,"district":district,
  296. "tenderee":tenderee,"tenderee_contact":tenderee_contact,"tenderee_phone":tenderee_phone,
  297. "agency":agency,"project_code":project_code,"project_name":project_name,"stage":stage,"proportion":proportion,
  298. "projectDigest":projectDigest,"projectAddress":projectAddress,"begin_time":begin_time,"end_time":end_time,
  299. "project_name_refind":project_name_refind,"industry":industry})
  300. def merge(self, buffer, pbuffer):
  301. buffer[0].extend(pbuffer[0])
  302. def terminate(self, buffer):
  303. list_group = buffer[0]
  304. return json.dumps(list_group,ensure_ascii=False)