outline_extractor.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: bidikeji
  5. @time: 2024/7/19 10:05
  6. """
  7. import re
  8. from BiddingKG.dl.interface.htmlparser import ParseDocument,get_childs
  9. class Sentence2():
  10. def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
  11. self.name = 'sentence2'
  12. self.text = text
  13. self.sentence_index = sentence_index
  14. self.wordOffset_begin = wordOffset_begin
  15. self.wordOffset_end = wordOffset_end
  16. def get_text(self):
  17. return self.text
  18. def extract_sentence_list(sentence_list):
  19. new_sentence2_list = []
  20. new_sentence2_list_attach = []
  21. for sentence in sentence_list:
  22. sentence_index = sentence.sentence_index
  23. sentence_text = sentence.sentence_text
  24. begin_index = 0
  25. end_index = 0
  26. for it in re.finditer('([^一二三四五六七八九十,。][一二三四五六七八九十]{1,3}|[^\d\.、,。a-zA-Z]\d{1,2}(\.\d{1,2}){,2})、', sentence_text): # 例:289699210 1、招标内容:滑触线及配件2、招标品牌:3、参标供应商经营形式要求:厂家4、参标供应商资质要求:5、
  27. temp = it.group(0)
  28. sentence_text = sentence_text.replace(temp, temp[0] + ',' + temp[1:])
  29. for item in re.finditer('[,。;;!!?]+', sentence_text): # 20240725去掉英文问号,避免网址被分隔
  30. end_index = item.end()
  31. # if end_index!=len(sentence_text):
  32. # # if end_index-begin_index<6 and item.group(0) in [',', ';', ';'] and re.match('[一二三四五六七八九十\d.]+、', sentence_text[begin_index:end_index])==None: # 20240725 注销,避免标题提取错误
  33. # # continue
  34. if end_index != len(sentence_text) and re.match('[一二三四五六七八九十\d.]{1,2}[、,.]+$', sentence_text[begin_index:end_index]): # 避免表格序号和内容在不同表格情况 例:293178161
  35. continue
  36. new_sentence_text = sentence_text[begin_index:end_index]
  37. sentence2 = Sentence2(new_sentence_text,sentence_index,begin_index,end_index)
  38. if sentence.in_attachment:
  39. new_sentence2_list_attach.append(sentence2)
  40. else:
  41. new_sentence2_list.append(sentence2)
  42. begin_index = end_index
  43. if end_index!=len(sentence_text):
  44. end_index = len(sentence_text)
  45. new_sentence_text = sentence_text[begin_index:end_index]
  46. sentence2 = Sentence2(new_sentence_text, sentence_index, begin_index, end_index)
  47. if sentence.in_attachment:
  48. new_sentence2_list_attach.append(sentence2)
  49. else:
  50. new_sentence2_list.append(sentence2)
  51. return new_sentence2_list, new_sentence2_list_attach
  52. requirement_pattern = "(采购需求|需求分析|项目说明|(采购|合同|招标|询比?价|项目|服务|工程|标的|需求|建设)(的?(主要|简要|基本|具体|名称及))?" \
  53. "(内容|概况|概述|范围|信息|规模|简介|介绍|说明|摘要|情况)([及与和]((其它|\w{,2})[要需]求|发包范围|数量))?" \
  54. "|招标项目技术要求|服务要求|服务需求|项目目标|需求内容如下|建设规模)为?([::,]|$)"
  55. aptitude_pattern = "((资格|资质)[的及]?(要求|条件)|竞买资格及要求|供应商报价须知)([::,]|$)|(竞买|竞买人|竞投人)?资格(条件)?:|按以下要求参与竞买"
  56. addr_bidopen_pattern = "([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件)[))]?(时间[与及和、])?(地址|地点)([与及和、]时间)?([::,]|$)|开启([::,]|$)"
  57. addr_bidsend_pattern = "((\w{,4}文件)?(提交|递交)(\w{,4}文件)?|投标)(截止时间[与及和、])?地[点址]([与及和、]截止时间)?([::,]|$)"
  58. pinmu_name_pattern = "采购品目名称([::,]|$)"
  59. out_lines = []
  60. policy_pattern = "《.+?(通知|办法|条例|规定|规程|规范|须知|规则|标准|细则|意见|协议|条件|要求|手册|法典|方案|指南|指引|法)》"
  61. not_policy_pattern = "(表|函|书|证|\d页|公告|合同|文件|清单)》$|采购合同|响应方须知|响应文件格式|营业执照|开标一览|采购需求"
  62. def extract_parameters(parse_document):
  63. '''
  64. 通过大纲、预处理后文本正则获取需要字段
  65. :param parse_document: ParseDocument() 方法返回结果
  66. :return:
  67. '''
  68. list_data = parse_document.tree
  69. requirement_text = '' # 采购内容
  70. aptitude_text = '' # 资质要求
  71. addr_bidopen_text = '' # 开标地址
  72. addr_bidsend_text = '' # 投标地址
  73. requirement_scope = [] # 采购内容始末位置
  74. pinmu_name = '' # 品目名称
  75. list_policy = [] # 政策法规
  76. _find_count = 0
  77. _data_i = -1
  78. while _data_i<len(list_data)-1:
  79. _data_i += 1
  80. _data = list_data[_data_i]
  81. _type = _data["type"]
  82. _text = _data["text"].strip()
  83. # print(_data.keys())
  84. if _type=="sentence":
  85. if _data["sentence_title"] is not None:
  86. if re.search('[((][一二三四五六七八九十}]+[))]|[一二三四五六七八九十]+\s*、|^\d{1,2}[.、][\u4e00-\u9fa5]', _text[:10]):
  87. out_lines.append((_text, _data['sentence_index'], _data['wordOffset_begin']))
  88. if re.search(requirement_pattern,_text[:30]) is not None and re.search('符合采购需求,', _text[:30])==None:
  89. b = (_data['sentence_index'], _data['wordOffset_begin'])
  90. childs = get_childs([_data])
  91. for c in childs:
  92. # requirement_text += c["text"]+"\n"
  93. requirement_text += c["text"]
  94. e = (c['sentence_index'], c["wordOffset_end"]) if len(childs)>0 else (_data['sentence_index'], _data['wordOffset_end'])
  95. requirement_scope.append(b)
  96. requirement_scope.append(e)
  97. _data_i += len(childs)
  98. _data_i -= 1
  99. _data_i = -1
  100. while _data_i<len(list_data)-1:
  101. _data_i += 1
  102. _data = list_data[_data_i]
  103. _type = _data["type"]
  104. _text = _data["text"].strip()
  105. # print(_data.keys())
  106. if _type=="sentence":
  107. # print("aptitude_pattern", _text)
  108. if _data["sentence_title"] is not None:
  109. # print("aptitude_pattern",_text)
  110. # outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
  111. # re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
  112. if re.search(aptitude_pattern,_text[:30]) is not None:
  113. childs = get_childs([_data])
  114. for c in childs:
  115. aptitude_text += c["text"]
  116. # if c["sentence_title"]:
  117. # aptitude_text += c["text"]+"\n"
  118. # else:
  119. # aptitude_text += c["text"]
  120. _data_i += len(childs)
  121. _data_i -= 1
  122. # elif re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', _text) and len(_text)<30 and re.search('资质|资格', _text):
  123. # out_lines.append(outline)
  124. if _type=="table":
  125. list_table = _data["list_table"]
  126. parent_title = _data["parent_title"]
  127. if list_table is not None:
  128. for line in list_table[:2]:
  129. for cell_i in range(len(line)):
  130. cell = line[cell_i]
  131. cell_text = cell[0]
  132. if len(cell_text)>120 and re.search(aptitude_pattern,cell_text) is not None:
  133. aptitude_text += cell_text+"\n"
  134. _data_i = -1
  135. while _data_i < len(list_data) - 1:
  136. _data_i += 1
  137. _data = list_data[_data_i]
  138. _type = _data["type"]
  139. _text = _data["text"].strip()
  140. # print(_data.keys())
  141. if _type == "sentence":
  142. if _data["sentence_title"] is not None:
  143. if re.search(addr_bidopen_pattern, _text[:20]) is not None:
  144. childs = get_childs([_data], max_depth=1)
  145. for c in childs:
  146. addr_bidopen_text += c["text"]
  147. _data_i += len(childs)
  148. _data_i -= 1
  149. elif re.search(addr_bidsend_pattern, _text[:20]):
  150. childs = get_childs([_data], max_depth=1)
  151. for c in childs:
  152. addr_bidsend_text += c["text"]
  153. _data_i += len(childs)
  154. _data_i -= 1
  155. elif re.search(pinmu_name_pattern, _text):
  156. childs = get_childs([_data], max_depth=1)
  157. for c in childs:
  158. pinmu_name += c["text"]
  159. _data_i += len(childs)
  160. _data_i -= 1
  161. _data_i = -1
  162. while _data_i<len(list_data)-1:
  163. _data_i += 1
  164. _data = list_data[_data_i]
  165. _type = _data["type"]
  166. _text = _data["text"].strip()
  167. # print(_data.keys())
  168. if _type=="sentence":
  169. for it in re.finditer(policy_pattern, _text):
  170. if it not in list_policy:
  171. list_policy.append(it.group(0))
  172. if re.search('时间:', addr_bidopen_text) and re.search('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
  173. for ser in re.finditer('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
  174. b, e = ser.span()
  175. addr_bidopen_text = addr_bidopen_text[b:e]
  176. elif re.search('开启', addr_bidopen_text) and re.search('时间:\d{2,4}年\d{1,2}月\d{1,2}日', addr_bidopen_text) and len(addr_bidopen_text)<40: # 优化类似 364991684只有时间没地址情况
  177. addr_bidopen_text = ""
  178. if re.search('时间:', addr_bidsend_text) and re.search('((\w{,4}文件)?(提交|递交)(\w{,4}文件)?|投标)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidsend_text):
  179. for ser in re.finditer('((\w{,4}文件)?(提交|递交)(\w{,4}文件)?|投标)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidsend_text):
  180. b, e = ser.span()
  181. addr_bidsend_text = addr_bidsend_text[b:e]
  182. ser = re.search(pinmu_name_pattern, pinmu_name)
  183. if ser:
  184. pinmu_name = pinmu_name[ser.end():]
  185. if re.search('[^\w]$', pinmu_name):
  186. pinmu_name = pinmu_name[:-1]
  187. return requirement_text, aptitude_text, addr_bidopen_text, addr_bidsend_text, out_lines, requirement_scope, pinmu_name, list_policy
  188. def extract_addr(content):
  189. '''
  190. 通过正则提取地址
  191. :param content: 公告预处理后文本
  192. :return:
  193. '''
  194. addr_bidopen_text = ''
  195. ser = re.search('([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件))?(会议)?地[点址]([((]网址[))])?[:为][^,;。]{2,100}[,;。]', content)
  196. if ser:
  197. addr_bidopen_text = ser.group(0)
  198. return addr_bidopen_text
  199. if __name__ == "__main__":
  200. # with open('D:\html/2.html', 'r', encoding='UTF-8') as f:
  201. # html = f.read()
  202. #
  203. l = []
  204. import pandas as pd
  205. from collections import Counter
  206. from BiddingKG.dl.interface import Preprocessing
  207. from BiddingKG.dl.interface.get_label_dic import get_all_label
  208. from bs4 import BeautifulSoup
  209. import json
  210. df = pd.read_excel('E:/公告招标内容提取结果2.xlsx')
  211. df['len']= df['招标内容'].apply(lambda x: len(x))
  212. print(len(df), sum(df['len']),sum(df['len'])/len(df), max(df['len']), min(df['len']))
  213. print(len([it for it in df['len'] if it>1500]))
  214. # df = pd.read_csv(r'E:\channel分类数据\2022年每月两天数据/指定日期_html2022-12-10.csv')
  215. # df1 = pd.read_excel('E:/公告招标内容提取结果.xlsx')
  216. # df = df[df['docid'].isin(df1['docid'])]
  217. #
  218. # df.drop_duplicates(subset=['docchannel', 'web_source_name', 'exist_table'], inplace=True)
  219. # print(df.columns, len(df))
  220. #
  221. #
  222. # # def get_text(html):
  223. # # soup = BeautifulSoup(html, 'lxml')
  224. # # text = soup.get_text()
  225. # # return text
  226. # # df['content'] = df['dochtmlcon'].apply(lambda x: get_text(x))
  227. # # df['标签'] = df.apply(lambda x: get_all_label(x['doctitle'], x['content']), axis=1)
  228. # # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  229. # # df1 = df[['docid', '标签']]
  230. #
  231. # n = 0
  232. # datas = []
  233. # for id,title, html in zip(df['docid'],df['doctitle'], df['dochtmlcon']):
  234. # # if id not in [289647738, 289647739]:
  235. # # continue
  236. # # print(id, type(id))
  237. # # parse_document = ParseDocument(html, True)
  238. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  239. # # if re.search('资\s*[格质]', html)==None:
  240. # # continue
  241. #
  242. # list_articles, list_sentences, list_entitys, list_outlines, _cost_time = Preprocessing.get_preprocessed([[id,html,"","",title,'', '']],useselffool=True)
  243. # sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
  244. #
  245. # # sentence2_list = []
  246. #
  247. # parse_document = ParseDocument(html, True, list_obj=sentence2_list)
  248. # requirement_text, aptitude_text = extract_parameters(parse_document)
  249. # # if len(aptitude_text)>0:
  250. # # datas.append((id, aptitude_text[:1500]))
  251. # # print(id, aptitude_text[:10], aptitude_text[-20:])
  252. # # else:
  253. # # parse_document = ParseDocument(html, True, list_obj=sentence2_list_attach)
  254. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  255. #
  256. # # if 0<len(aptitude_text)<20:
  257. # # l.append(len(aptitude_text))
  258. # # n += 1
  259. # # print(id, aptitude_text)
  260. # # if n > 5:
  261. # # break
  262. #
  263. # if len(requirement_text)>0:
  264. # label_dic = get_all_label(title, list_articles[0].content)
  265. # # datas.append((id, requirement_text))
  266. # datas.append((id, requirement_text, label_dic))
  267. #
  268. # c = Counter(out_lines)
  269. # print(c.most_common(1000))
  270. # #
  271. # # df = pd.DataFrame(datas, columns=['docid', '资质要求'])
  272. # # df.to_excel('E:/公告资质要求提取结果.xlsx')
  273. #
  274. # df = pd.DataFrame(datas, columns=['docid', '招标内容', '标签'])
  275. # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  276. # df.to_excel('E:/公告招标内容提取结果2.xlsx')
  277. # if len(aptitude_text)> 1000:
  278. # print(id, aptitude_text[:10], aptitude_text[-20:])
  279. # print(Counter(l).most_common(50))
  280. # print(len(df), len(l), min(l), max(l), sum(l)/len(l))
  281. # n1 = len([it for it in l if it < 500])
  282. # n2 = len([it for it in l if it < 1000])
  283. # n3 = len([it for it in l if it < 1500])
  284. # n4 = len([it for it in l if it < 2000])
  285. # print(n1, n2, n3, n4, n1/len(l), n2/len(l), n3/len(l), n4/len(l))
  286. # parse_document = ParseDocument(html,True)
  287. # requirement_text, new_list_policy, aptitude_text = extract_parameters(parse_document)
  288. # print(aptitude_text)
  289. # sentence_text = '5、要求:3.1投标其他条件:1、中国宝武集团项目未列入禁入名单的投标人。2、具有有效的营业执照;'
  290. # begin_index = 0
  291. # for item in re.finditer('[,。;;!!??]+', sentence_text):
  292. # end_index = item.end()
  293. # if end_index != len(sentence_text):
  294. # if end_index - begin_index < 6:
  295. # continue
  296. # new_sentence_text = sentence_text[begin_index:end_index]
  297. # print(new_sentence_text)
  298. # df = pd.read_excel('E:/公告资质要求提取结果.xlsx')
  299. # docids = []
  300. # pos = neg = 0
  301. # for docid, text in zip(df['docid'], df['资质要求']):
  302. # if re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', text) and re.search(aptitude_pattern, text[:15]):
  303. # pos += 1
  304. # pass
  305. # else:
  306. # neg += 1
  307. # print(docid, text[:50])
  308. # docids.append(docid)
  309. # print('异常:%d, 正常:%d'%(neg, pos))
  310. # print(docids)