outline_extractor.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: bidikeji
  5. @time: 2024/7/19 10:05
  6. """
  7. import re
  8. from BiddingKG.dl.interface.htmlparser import ParseDocument,get_childs
  9. class Sentence2():
  10. def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
  11. self.name = 'sentence2'
  12. self.text = text
  13. self.sentence_index = sentence_index
  14. self.wordOffset_begin = wordOffset_begin
  15. self.wordOffset_end = wordOffset_end
  16. def get_text(self):
  17. return self.text
  18. def extract_sentence_list(sentence_list):
  19. new_sentence2_list = []
  20. new_sentence2_list_attach = []
  21. for sentence in sentence_list:
  22. sentence_index = sentence.sentence_index
  23. sentence_text = sentence.sentence_text
  24. begin_index = 0
  25. end_index = 0
  26. for it in re.finditer('([^一二三四五六七八九十,。][一二三四五六七八九十]{1,3}|[^\d\.、,。a-zA-Z]\d{1,2}(\.\d{1,2}){,2})、', sentence_text): # 例:289699210 1、招标内容:滑触线及配件2、招标品牌:3、参标供应商经营形式要求:厂家4、参标供应商资质要求:5、
  27. temp = it.group(0)
  28. sentence_text = sentence_text.replace(temp, temp[0] + ',' + temp[1:])
  29. for item in re.finditer('[,。;;!!?]+', sentence_text): # 20240725去掉英文问号,避免网址被分隔
  30. end_index = item.end()
  31. # if end_index!=len(sentence_text):
  32. # # if end_index-begin_index<6 and item.group(0) in [',', ';', ';'] and re.match('[一二三四五六七八九十\d.]+、', sentence_text[begin_index:end_index])==None: # 20240725 注销,避免标题提取错误
  33. # # continue
  34. if end_index != len(sentence_text) and re.match('[一二三四五六七八九十\d.]{1,2}[、,.]+$', sentence_text[begin_index:end_index]): # 避免表格序号和内容在不同表格情况 例:293178161
  35. continue
  36. new_sentence_text = sentence_text[begin_index:end_index]
  37. sentence2 = Sentence2(new_sentence_text,sentence_index,begin_index,end_index)
  38. if sentence.in_attachment:
  39. new_sentence2_list_attach.append(sentence2)
  40. else:
  41. new_sentence2_list.append(sentence2)
  42. begin_index = end_index
  43. if end_index!=len(sentence_text):
  44. end_index = len(sentence_text)
  45. new_sentence_text = sentence_text[begin_index:end_index]
  46. sentence2 = Sentence2(new_sentence_text, sentence_index, begin_index, end_index)
  47. if sentence.in_attachment:
  48. new_sentence2_list_attach.append(sentence2)
  49. else:
  50. new_sentence2_list.append(sentence2)
  51. return new_sentence2_list, new_sentence2_list_attach
  52. requirement_pattern = "(采购需求|需求分析|项目说明|(采购|合同|招标|询比?价|项目|服务|工程|标的|需求|建设)(的?(主要|简要|基本|具体|名称及))?" \
  53. "(内容|概况|概述|范围|信息|规模|简介|介绍|说明|摘要|情况)([及与和]((其它|\w{,2})[要需]求|发包范围|数量))?" \
  54. "|招标项目技术要求|服务要求|服务需求|项目目标|需求内容如下|建设规模)为?([::,]|$)"
  55. winter_pattern = "((乙方|竞得|受让|买受|签约|供货|供应|承做|承包|承建|承销|承保|承接|承制|承担|承修|承租(?:(包))?|入围|入选|竞买|中标|中选|中价|中签|成交|候选)[\u4e00-\u9fa5]{0,5}" \
  56. "(公示)?(信息|概况|情况|名称|联系人|联系方式|负责人)|中标公示单位)为?([::,、]|$)"
  57. aptitude_pattern = "资质(资格)要求|资格(资质)要求|单位要求|资质及业绩要求|((资格|资质|准入)[的及]?(要求|条件|标准|限定|门槛)|竞买资格及要求|供应商报价须知)|按以下要求参与竞买|((报名|应征|竞买|投标|竞投|受让|报价|竞价|竞包|竞租|承租|申请|参与|参选|遴选)的?(人|方|单位|企业|客户|机构)?|供应商|受让方)((必?须|需|应[该当]?)(具备|满足|符合|提供)+以?下?)?的?(一般|基本|主要)?(条件|要求|资格(能力)?|资质)+|乙方应当符合下列要求|参与比选条件|合格的投标人|询价要求"
  58. addr_bidopen_pattern = "([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件)[))]?(时间[与及和、])?(地址|地点)([与及和、]时间)?([::,]|$)|开启([::,]|$)"
  59. addr_bidsend_pattern = "((\w{,4}文件)?(提交|递交)(\w{,4}文件)?|投标)(截止时间[与及和、])?地[点址]([与及和、]截止时间)?([::,]|$)"
  60. pinmu_name_pattern = "采购品目(名称)?([::,]|$)"
  61. policy_pattern = "《.+?(通知|办法|条例|规定|规程|规范|须知|规则|标准|细则|意见|协议|条件|要求|手册|法典|方案|指南|指引|法)》"
  62. not_policy_pattern = "(表|函|书|证|\d页|公告|合同|文件|清单)》$|采购合同|响应方须知|响应文件格式|营业执照|开标一览|采购需求"
  63. correction_pattern = "(更正|更改|修正|修改|变更|延期)(信息|内容|事项|详情)"
  64. def extract_parameters(parse_document):
  65. '''
  66. 通过大纲、预处理后文本正则获取需要字段
  67. :param parse_document: ParseDocument() 方法返回结果
  68. :return:
  69. '''
  70. list_data = parse_document.tree
  71. requirement_text = '' # 采购内容
  72. aptitude_text = '' # 资质要求
  73. addr_bidopen_text = '' # 开标地址
  74. addr_bidsend_text = '' # 投标地址
  75. requirement_scope = [] # 采购内容始末位置
  76. winter_scope = [] # 中标信息始末位置
  77. pinmu_name = '' # 品目名称
  78. list_policy = [] # 政策法规
  79. correction_content = "" # 更正内容
  80. out_lines = []
  81. _find_count = 0
  82. _data_i = -1
  83. while _data_i<len(list_data)-1:
  84. _data_i += 1
  85. _data = list_data[_data_i]
  86. _type = _data["type"]
  87. _text = _data["text"].strip()
  88. # print(_data.keys())
  89. if _type=="sentence":
  90. if _data["sentence_title"] is not None:
  91. if re.search('[((][一二三四五六七八九十}]+[))]|[一二三四五六七八九十]+\s*[..、]|^\d{1,2}[..、][\u4e00-\u9fa5]', _text[:10]):
  92. idx = _text.replace(':', ':').find(':')
  93. outline_text = _text[:idx] if idx >= 4 else _text
  94. out_lines.append((outline_text, _data['sentence_index'], _data['wordOffset_begin']))
  95. if re.search(requirement_pattern,_text[:30]) is not None and re.search('符合采购需求,', _text[:30])==None:
  96. b = (_data['sentence_index'], _data['wordOffset_begin'])
  97. childs = get_childs([_data])
  98. for c in childs:
  99. # requirement_text += c["text"]+"\n"
  100. requirement_text += c["text"]
  101. e = (c['sentence_index'], c["wordOffset_end"]) if len(childs)>0 else (_data['sentence_index'], _data['wordOffset_end'])
  102. requirement_scope.append(b)
  103. requirement_scope.append(e)
  104. _data_i += len(childs)
  105. _data_i -= 1
  106. _data_i = -1
  107. # 中标信息
  108. while _data_i<len(list_data)-1:
  109. _data_i += 1
  110. _data = list_data[_data_i]
  111. _type = _data["type"]
  112. _text = _data["text"].strip()
  113. # print(_data.keys())
  114. if _type=="sentence":
  115. # print('_text',_text)
  116. # print('sentence_title',_data["sentence_title"])
  117. if _data["sentence_title"] is not None:
  118. if re.search(winter_pattern,_text[:30]) is not None:
  119. b = (_data['sentence_index'], _data['wordOffset_begin'])
  120. childs = get_childs([_data])
  121. e = (childs[-1]['sentence_index'], childs[-1]["wordOffset_end"]) if len(childs)>0 else (_data['sentence_index'], _data['wordOffset_end'])
  122. winter_scope.append(b)
  123. winter_scope.append(e)
  124. _data_i += len(childs)
  125. _data_i -= 1
  126. _data_i = -1
  127. # 更正内容
  128. while _data_i < len(list_data) - 1:
  129. _data_i += 1
  130. _data = list_data[_data_i]
  131. _type = _data["type"]
  132. _text = _data["text"].strip()
  133. if _type == "sentence":
  134. if _data["sentence_title"] is not None:
  135. if re.search(correction_pattern, _text[:20]) is not None:
  136. childs = get_childs([_data])
  137. correction_text = ""
  138. for c in childs:
  139. correction_text += c["text"].strip()
  140. # print('correction_text',correction_text)
  141. correction_content += correction_text
  142. _data_i += len(childs)
  143. _data_i -= 1
  144. _data_i = -1
  145. while _data_i<len(list_data)-1:
  146. _data_i += 1
  147. _data = list_data[_data_i]
  148. _type = _data["type"]
  149. _text = _data["text"].strip()
  150. # print(_data.keys())
  151. if _type=="sentence":
  152. # print("aptitude_pattern", _text)
  153. if _data["sentence_title"] is not None:
  154. # print("aptitude_pattern",_text)
  155. # outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
  156. # re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
  157. if re.search(aptitude_pattern,_text[:15]) is not None:
  158. childs = get_childs([_data])
  159. for c in childs:
  160. aptitude_text += c["text"]
  161. # if c["sentence_title"]:
  162. # aptitude_text += c["text"]+"\n"
  163. # else:
  164. # aptitude_text += c["text"]
  165. _data_i += len(childs)
  166. _data_i -= 1
  167. # elif re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', _text) and len(_text)<30 and re.search('资质|资格', _text):
  168. # out_lines.append(outline)
  169. if _type=="table":
  170. list_table = _data["list_table"]
  171. parent_title = _data["parent_title"]
  172. if list_table is not None:
  173. for line in list_table[:2]:
  174. for cell_i in range(len(line)):
  175. cell = line[cell_i]
  176. cell_text = cell[0]
  177. if len(cell_text)>120 and re.search(aptitude_pattern,cell_text) is not None:
  178. aptitude_text += cell_text+"\n"
  179. _data_i = -1
  180. while _data_i < len(list_data) - 1:
  181. _data_i += 1
  182. _data = list_data[_data_i]
  183. _type = _data["type"]
  184. _text = _data["text"].strip()
  185. # print(_data.keys())
  186. if _type == "sentence":
  187. if _data["sentence_title"] is not None:
  188. if re.search(addr_bidopen_pattern, _text[:20]) is not None:
  189. childs = get_childs([_data], max_depth=1)
  190. for c in childs:
  191. addr_bidopen_text += c["text"]
  192. _data_i += len(childs)
  193. _data_i -= 1
  194. elif re.search(addr_bidsend_pattern, _text[:20]):
  195. childs = get_childs([_data], max_depth=1)
  196. for c in childs:
  197. addr_bidsend_text += c["text"]
  198. _data_i += len(childs)
  199. _data_i -= 1
  200. elif re.search(pinmu_name_pattern, _text):
  201. childs = get_childs([_data], max_depth=1)
  202. for c in childs:
  203. pinmu_name += c["text"]
  204. _data_i += len(childs)
  205. _data_i -= 1
  206. _data_i = -1
  207. while _data_i<len(list_data)-1:
  208. _data_i += 1
  209. _data = list_data[_data_i]
  210. _type = _data["type"]
  211. _text = _data["text"].strip()
  212. # print(_data.keys())
  213. if _type=="sentence":
  214. for it in re.finditer(policy_pattern, _text):
  215. if it not in list_policy:
  216. list_policy.append(it.group(0))
  217. ser = re.search('地[址点][:为](?P<addr>([\w()()【】]{2,25}([省市县区州旗]|采购网|平台|公司)[\w()()【】-]{,60}))[,。]', addr_bidopen_text)
  218. addr_bidopen_text = ser.group('addr') if ser else ''
  219. ser = re.search('地[址点][:为](?P<addr>([\w()()【】]{2,25}([省市县区州旗]|采购网|平台|公司)[\w()()【】-]{,60}))[,。]', addr_bidsend_text)
  220. addr_bidsend_text = ser.group('addr') if ser else ''
  221. if re.search('开启', addr_bidopen_text) and re.search('时间:\d{2,4}年\d{1,2}月\d{1,2}日', addr_bidopen_text) and len(addr_bidopen_text)<40: # 优化类似 364991684只有时间没地址情况
  222. addr_bidopen_text = ""
  223. ser = re.search(pinmu_name_pattern, pinmu_name)
  224. if ser:
  225. pinmu_name = pinmu_name[ser.end():]
  226. if re.search('[^\w]$', pinmu_name):
  227. pinmu_name = pinmu_name[:-1]
  228. return requirement_text, aptitude_text, addr_bidopen_text, addr_bidsend_text, out_lines, requirement_scope, pinmu_name, list_policy, winter_scope,correction_content
  229. def extract_addr(content):
  230. '''
  231. 通过正则提取地址
  232. :param content: 公告预处理后文本
  233. :return:
  234. '''
  235. addr_bidopen_text = ''
  236. ser = re.search('([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件))?(会议)?地[点址]([((]网址[))])?[:为][^,;。]{2,100}[,;。]', content)
  237. if ser:
  238. addr_bidopen_text = ser.group(0)
  239. return addr_bidopen_text
  240. if __name__ == "__main__":
  241. # with open('D:\html/2.html', 'r', encoding='UTF-8') as f:
  242. # html = f.read()
  243. #
  244. l = []
  245. import pandas as pd
  246. from collections import Counter
  247. from BiddingKG.dl.interface import Preprocessing
  248. from BiddingKG.dl.interface.get_label_dic import get_all_label
  249. from bs4 import BeautifulSoup
  250. import json
  251. # df = pd.read_excel('E:/公告招标内容提取结果2.xlsx')
  252. # df['len']= df['招标内容'].apply(lambda x: len(x))
  253. # print(len(df), sum(df['len']),sum(df['len'])/len(df), max(df['len']), min(df['len']))
  254. # print(len([it for it in df['len'] if it>1500]))
  255. # df = pd.read_csv(r'E:\channel分类数据\2022年每月两天数据/指定日期_html2022-12-10.csv')
  256. # df1 = pd.read_excel('E:/公告招标内容提取结果.xlsx')
  257. # df = df[df['docid'].isin(df1['docid'])]
  258. #
  259. # df.drop_duplicates(subset=['docchannel', 'web_source_name', 'exist_table'], inplace=True)
  260. # print(df.columns, len(df))
  261. #
  262. #
  263. # # def get_text(html):
  264. # # soup = BeautifulSoup(html, 'lxml')
  265. # # text = soup.get_text()
  266. # # return text
  267. # # df['content'] = df['dochtmlcon'].apply(lambda x: get_text(x))
  268. # # df['标签'] = df.apply(lambda x: get_all_label(x['doctitle'], x['content']), axis=1)
  269. # # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  270. # # df1 = df[['docid', '标签']]
  271. #
  272. # n = 0
  273. # datas = []
  274. # for id,title, html in zip(df['docid'],df['doctitle'], df['dochtmlcon']):
  275. # # if id not in [289647738, 289647739]:
  276. # # continue
  277. # # print(id, type(id))
  278. # # parse_document = ParseDocument(html, True)
  279. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  280. # # if re.search('资\s*[格质]', html)==None:
  281. # # continue
  282. #
  283. # list_articles, list_sentences, list_entitys, list_outlines, _cost_time = Preprocessing.get_preprocessed([[id,html,"","",title,'', '']],useselffool=True)
  284. # sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
  285. #
  286. # # sentence2_list = []
  287. #
  288. # parse_document = ParseDocument(html, True, list_obj=sentence2_list)
  289. # requirement_text, aptitude_text = extract_parameters(parse_document)
  290. # # if len(aptitude_text)>0:
  291. # # datas.append((id, aptitude_text[:1500]))
  292. # # print(id, aptitude_text[:10], aptitude_text[-20:])
  293. # # else:
  294. # # parse_document = ParseDocument(html, True, list_obj=sentence2_list_attach)
  295. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  296. #
  297. # # if 0<len(aptitude_text)<20:
  298. # # l.append(len(aptitude_text))
  299. # # n += 1
  300. # # print(id, aptitude_text)
  301. # # if n > 5:
  302. # # break
  303. #
  304. # if len(requirement_text)>0:
  305. # label_dic = get_all_label(title, list_articles[0].content)
  306. # # datas.append((id, requirement_text))
  307. # datas.append((id, requirement_text, label_dic))
  308. #
  309. # c = Counter(out_lines)
  310. # print(c.most_common(1000))
  311. # #
  312. # # df = pd.DataFrame(datas, columns=['docid', '资质要求'])
  313. # # df.to_excel('E:/公告资质要求提取结果.xlsx')
  314. #
  315. # df = pd.DataFrame(datas, columns=['docid', '招标内容', '标签'])
  316. # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  317. # df.to_excel('E:/公告招标内容提取结果2.xlsx')
  318. # if len(aptitude_text)> 1000:
  319. # print(id, aptitude_text[:10], aptitude_text[-20:])
  320. # print(Counter(l).most_common(50))
  321. # print(len(df), len(l), min(l), max(l), sum(l)/len(l))
  322. # n1 = len([it for it in l if it < 500])
  323. # n2 = len([it for it in l if it < 1000])
  324. # n3 = len([it for it in l if it < 1500])
  325. # n4 = len([it for it in l if it < 2000])
  326. # print(n1, n2, n3, n4, n1/len(l), n2/len(l), n3/len(l), n4/len(l))
  327. # parse_document = ParseDocument(html,True)
  328. # requirement_text, new_list_policy, aptitude_text = extract_parameters(parse_document)
  329. # print(aptitude_text)
  330. # sentence_text = '5、要求:3.1投标其他条件:1、中国宝武集团项目未列入禁入名单的投标人。2、具有有效的营业执照;'
  331. # begin_index = 0
  332. # for item in re.finditer('[,。;;!!??]+', sentence_text):
  333. # end_index = item.end()
  334. # if end_index != len(sentence_text):
  335. # if end_index - begin_index < 6:
  336. # continue
  337. # new_sentence_text = sentence_text[begin_index:end_index]
  338. # print(new_sentence_text)
  339. # df = pd.read_excel('E:/公告资质要求提取结果.xlsx')
  340. # docids = []
  341. # pos = neg = 0
  342. # for docid, text in zip(df['docid'], df['资质要求']):
  343. # if re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', text) and re.search(aptitude_pattern, text[:15]):
  344. # pos += 1
  345. # pass
  346. # else:
  347. # neg += 1
  348. # print(docid, text[:50])
  349. # docids.append(docid)
  350. # print('异常:%d, 正常:%d'%(neg, pos))
  351. # print(docids)