outline_extractor.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: bidikeji
  5. @time: 2024/7/19 10:05
  6. """
  7. import re
  8. from BiddingKG.dl.interface.htmlparser import ParseDocument,get_childs
  9. class Sentence2():
  10. def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
  11. self.name = 'sentence2'
  12. self.text = text
  13. self.sentence_index = sentence_index
  14. self.wordOffset_begin = wordOffset_begin
  15. self.wordOffset_end = wordOffset_end
  16. def get_text(self):
  17. return self.text
  18. def extract_sentence_list(sentence_list):
  19. new_sentence2_list = []
  20. new_sentence2_list_attach = []
  21. for sentence in sentence_list:
  22. sentence_index = sentence.sentence_index
  23. sentence_text = sentence.sentence_text
  24. begin_index = 0
  25. end_index = 0
  26. for it in re.finditer('([^一二三四五六七八九十,。][一二三四五六七八九十]{1,3}|[^\d,。]\d{1,2}(\.\d{1,2}){,2})、', sentence_text): # 例:289699210 1、招标内容:滑触线及配件2、招标品牌:3、参标供应商经营形式要求:厂家4、参标供应商资质要求:5、
  27. temp = it.group(0)
  28. sentence_text = sentence_text.replace(temp, temp[0] + ',' + temp[1:])
  29. for item in re.finditer('[,。;;!!?]+', sentence_text): # 20240725去掉英文问号,避免网址被分隔
  30. end_index = item.end()
  31. # if end_index!=len(sentence_text):
  32. # # if end_index-begin_index<6 and item.group(0) in [',', ';', ';'] and re.match('[一二三四五六七八九十\d.]+、', sentence_text[begin_index:end_index])==None: # 20240725 注销,避免标题提取错误
  33. # # continue
  34. new_sentence_text = sentence_text[begin_index:end_index]
  35. sentence2 = Sentence2(new_sentence_text,sentence_index,begin_index,end_index)
  36. if sentence.in_attachment:
  37. new_sentence2_list_attach.append(sentence2)
  38. else:
  39. new_sentence2_list.append(sentence2)
  40. begin_index = end_index
  41. if end_index!=len(sentence_text):
  42. end_index = len(sentence_text)
  43. new_sentence_text = sentence_text[begin_index:end_index]
  44. sentence2 = Sentence2(new_sentence_text, sentence_index, begin_index, end_index)
  45. if sentence.in_attachment:
  46. new_sentence2_list_attach.append(sentence2)
  47. else:
  48. new_sentence2_list.append(sentence2)
  49. return new_sentence2_list, new_sentence2_list_attach
  50. requirement_pattern = "(采购需求|需求分析|项目说明|(采购|合同|招标|项目|服务|工程)(的?主要)?(内容|概况|范围|信息)([及与和](其它|\w{,2})要求)?" \
  51. "|招标项目技术要求|服务要求|服务需求|项目目标|需求内容如下|建设规模)([::,]|$)"
  52. aptitude_pattern = "(资格要求|资质要求)([::,]|$)"
  53. addr_bidopen_pattern = "([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件)[))]?(时间[与及和、])?(地址|地点)([与及和、]时间)?([::,]|$)|开启([::,]|$)"
  54. addr_bidsend_pattern = "((\w{,4}文件)?(提交|递交)(\w{,4}文件)?|投标)(截止时间[与及和、])?地[点址]([与及和、]截止时间)?([::,]|$)"
  55. out_lines = []
  56. def extract_parameters(parse_document, content):
  57. '''
  58. 通过大纲、预处理后文本正则获取需要字段
  59. :param parse_document: ParseDocument() 方法返回结果
  60. :param content: 公告预处理后文本
  61. :return:
  62. '''
  63. list_data = parse_document.tree
  64. requirement_text = ''
  65. aptitude_text = ''
  66. addr_bidopen_text = ''
  67. addr_bidsend_text = ''
  68. _find_count = 0
  69. _data_i = -1
  70. while _data_i<len(list_data)-1:
  71. _data_i += 1
  72. _data = list_data[_data_i]
  73. _type = _data["type"]
  74. _text = _data["text"].strip()
  75. # print(_data.keys())
  76. if _type=="sentence":
  77. if _data["sentence_title"] is not None:
  78. outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
  79. re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
  80. if re.search(requirement_pattern,_text[:30]) is not None and re.search('符合采购需求,', _text[:30])==None:
  81. out_lines.append(outline)
  82. childs = get_childs([_data])
  83. for c in childs:
  84. # requirement_text += c["text"]+"\n"
  85. requirement_text += c["text"]
  86. _data_i += len(childs)
  87. _data_i -= 1
  88. _data_i = -1
  89. while _data_i<len(list_data)-1:
  90. _data_i += 1
  91. _data = list_data[_data_i]
  92. _type = _data["type"]
  93. _text = _data["text"].strip()
  94. # print(_data.keys())
  95. if _type=="sentence":
  96. # print("aptitude_pattern", _text)
  97. if _data["sentence_title"] is not None:
  98. # print("aptitude_pattern",_text)
  99. # outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
  100. # re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
  101. if re.search(aptitude_pattern,_text[:30]) is not None:
  102. childs = get_childs([_data])
  103. for c in childs:
  104. aptitude_text += c["text"]
  105. # if c["sentence_title"]:
  106. # aptitude_text += c["text"]+"\n"
  107. # else:
  108. # aptitude_text += c["text"]
  109. _data_i += len(childs)
  110. _data_i -= 1
  111. # elif re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', _text) and len(_text)<30 and re.search('资质|资格', _text):
  112. # out_lines.append(outline)
  113. if _type=="table":
  114. list_table = _data["list_table"]
  115. parent_title = _data["parent_title"]
  116. if list_table is not None:
  117. for line in list_table[:2]:
  118. for cell_i in range(len(line)):
  119. cell = line[cell_i]
  120. cell_text = cell[0]
  121. if len(cell_text)>120 and re.search(aptitude_pattern,cell_text) is not None:
  122. aptitude_text += cell_text+"\n"
  123. _data_i = -1
  124. while _data_i < len(list_data) - 1:
  125. _data_i += 1
  126. _data = list_data[_data_i]
  127. _type = _data["type"]
  128. _text = _data["text"].strip()
  129. # print(_data.keys())
  130. if _type == "sentence":
  131. if _data["sentence_title"] is not None:
  132. if re.search(addr_bidopen_pattern, _text[:20]) is not None:
  133. childs = get_childs([_data], max_depth=1)
  134. for c in childs:
  135. addr_bidopen_text += c["text"]
  136. _data_i += len(childs)
  137. _data_i -= 1
  138. elif re.search(addr_bidsend_pattern, _text[:20]):
  139. childs = get_childs([_data], max_depth=1)
  140. for c in childs:
  141. addr_bidsend_text += c["text"]
  142. _data_i += len(childs)
  143. _data_i -= 1
  144. if re.search('时间:', addr_bidopen_text) and re.search('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
  145. for ser in re.finditer('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
  146. b, e = ser.span()
  147. addr_bidopen_text = addr_bidopen_text[b:e]
  148. elif re.search('开启', addr_bidopen_text) and re.search('时间:\d{2,4}年\d{1,2}月\d{1,2}日', addr_bidopen_text) and len(addr_bidopen_text)<40: # 优化类似 364991684只有时间没地址情况
  149. addr_bidopen_text = ""
  150. if addr_bidopen_text == "":
  151. ser = re.search('([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件))?(会议)?地[点址]([((]网址[))])?[:为][^,;。]{2,100}[,;。]', content)
  152. if ser:
  153. addr_bidopen_text = ser.group(0)
  154. if re.search('时间:', addr_bidsend_text) and re.search('((\w{,4}文件)?(提交|递交)(\w{,4}文件)?|投标)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidsend_text):
  155. for ser in re.finditer('((\w{,4}文件)?(提交|递交)(\w{,4}文件)?|投标)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidsend_text):
  156. b, e = ser.span()
  157. addr_bidsend_text = addr_bidsend_text[b:e]
  158. return requirement_text, aptitude_text, addr_bidopen_text, addr_bidsend_text
  159. if __name__ == "__main__":
  160. # with open('D:\html/2.html', 'r', encoding='UTF-8') as f:
  161. # html = f.read()
  162. #
  163. l = []
  164. import pandas as pd
  165. from collections import Counter
  166. from BiddingKG.dl.interface import Preprocessing
  167. from BiddingKG.dl.interface.get_label_dic import get_all_label
  168. from bs4 import BeautifulSoup
  169. import json
  170. df = pd.read_excel('E:/公告招标内容提取结果2.xlsx')
  171. df['len']= df['招标内容'].apply(lambda x: len(x))
  172. print(len(df), sum(df['len']),sum(df['len'])/len(df), max(df['len']), min(df['len']))
  173. print(len([it for it in df['len'] if it>1500]))
  174. # df = pd.read_csv(r'E:\channel分类数据\2022年每月两天数据/指定日期_html2022-12-10.csv')
  175. # df1 = pd.read_excel('E:/公告招标内容提取结果.xlsx')
  176. # df = df[df['docid'].isin(df1['docid'])]
  177. #
  178. # df.drop_duplicates(subset=['docchannel', 'web_source_name', 'exist_table'], inplace=True)
  179. # print(df.columns, len(df))
  180. #
  181. #
  182. # # def get_text(html):
  183. # # soup = BeautifulSoup(html, 'lxml')
  184. # # text = soup.get_text()
  185. # # return text
  186. # # df['content'] = df['dochtmlcon'].apply(lambda x: get_text(x))
  187. # # df['标签'] = df.apply(lambda x: get_all_label(x['doctitle'], x['content']), axis=1)
  188. # # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  189. # # df1 = df[['docid', '标签']]
  190. #
  191. # n = 0
  192. # datas = []
  193. # for id,title, html in zip(df['docid'],df['doctitle'], df['dochtmlcon']):
  194. # # if id not in [289647738, 289647739]:
  195. # # continue
  196. # # print(id, type(id))
  197. # # parse_document = ParseDocument(html, True)
  198. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  199. # # if re.search('资\s*[格质]', html)==None:
  200. # # continue
  201. #
  202. # list_articles, list_sentences, list_entitys, list_outlines, _cost_time = Preprocessing.get_preprocessed([[id,html,"","",title,'', '']],useselffool=True)
  203. # sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
  204. #
  205. # # sentence2_list = []
  206. #
  207. # parse_document = ParseDocument(html, True, list_obj=sentence2_list)
  208. # requirement_text, aptitude_text = extract_parameters(parse_document)
  209. # # if len(aptitude_text)>0:
  210. # # datas.append((id, aptitude_text[:1500]))
  211. # # print(id, aptitude_text[:10], aptitude_text[-20:])
  212. # # else:
  213. # # parse_document = ParseDocument(html, True, list_obj=sentence2_list_attach)
  214. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  215. #
  216. # # if 0<len(aptitude_text)<20:
  217. # # l.append(len(aptitude_text))
  218. # # n += 1
  219. # # print(id, aptitude_text)
  220. # # if n > 5:
  221. # # break
  222. #
  223. # if len(requirement_text)>0:
  224. # label_dic = get_all_label(title, list_articles[0].content)
  225. # # datas.append((id, requirement_text))
  226. # datas.append((id, requirement_text, label_dic))
  227. #
  228. # c = Counter(out_lines)
  229. # print(c.most_common(1000))
  230. # #
  231. # # df = pd.DataFrame(datas, columns=['docid', '资质要求'])
  232. # # df.to_excel('E:/公告资质要求提取结果.xlsx')
  233. #
  234. # df = pd.DataFrame(datas, columns=['docid', '招标内容', '标签'])
  235. # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  236. # df.to_excel('E:/公告招标内容提取结果2.xlsx')
  237. # if len(aptitude_text)> 1000:
  238. # print(id, aptitude_text[:10], aptitude_text[-20:])
  239. # print(Counter(l).most_common(50))
  240. # print(len(df), len(l), min(l), max(l), sum(l)/len(l))
  241. # n1 = len([it for it in l if it < 500])
  242. # n2 = len([it for it in l if it < 1000])
  243. # n3 = len([it for it in l if it < 1500])
  244. # n4 = len([it for it in l if it < 2000])
  245. # print(n1, n2, n3, n4, n1/len(l), n2/len(l), n3/len(l), n4/len(l))
  246. # parse_document = ParseDocument(html,True)
  247. # requirement_text, new_list_policy, aptitude_text = extract_parameters(parse_document)
  248. # print(aptitude_text)
  249. # sentence_text = '5、要求:3.1投标其他条件:1、中国宝武集团项目未列入禁入名单的投标人。2、具有有效的营业执照;'
  250. # begin_index = 0
  251. # for item in re.finditer('[,。;;!!??]+', sentence_text):
  252. # end_index = item.end()
  253. # if end_index != len(sentence_text):
  254. # if end_index - begin_index < 6:
  255. # continue
  256. # new_sentence_text = sentence_text[begin_index:end_index]
  257. # print(new_sentence_text)
  258. # df = pd.read_excel('E:/公告资质要求提取结果.xlsx')
  259. # docids = []
  260. # pos = neg = 0
  261. # for docid, text in zip(df['docid'], df['资质要求']):
  262. # if re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', text) and re.search(aptitude_pattern, text[:15]):
  263. # pos += 1
  264. # pass
  265. # else:
  266. # neg += 1
  267. # print(docid, text[:50])
  268. # docids.append(docid)
  269. # print('异常:%d, 正常:%d'%(neg, pos))
  270. # print(docids)