outline_extractor.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. @author: bidikeji
  5. @time: 2024/7/19 10:05
  6. """
  7. import re
  8. from BiddingKG.dl.interface.htmlparser import ParseDocument,get_childs
  9. class Sentence2():
  10. def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
  11. self.name = 'sentence2'
  12. self.text = text
  13. self.sentence_index = sentence_index
  14. self.wordOffset_begin = wordOffset_begin
  15. self.wordOffset_end = wordOffset_end
  16. def get_text(self):
  17. return self.text
  18. def extract_sentence_list(sentence_list):
  19. new_sentence2_list = []
  20. new_sentence2_list_attach = []
  21. for sentence in sentence_list:
  22. sentence_index = sentence.sentence_index
  23. sentence_text = sentence.sentence_text
  24. begin_index = 0
  25. end_index = 0
  26. for it in re.finditer('([^一二三四五六七八九十,。][一二三四五六七八九十]{1,3}|[^\d,。]\d{1,2}(\.\d{1,2}){,2})、', sentence_text): # 例:289699210 1、招标内容:滑触线及配件2、招标品牌:3、参标供应商经营形式要求:厂家4、参标供应商资质要求:5、
  27. temp = it.group(0)
  28. sentence_text = sentence_text.replace(temp, temp[0] + ',' + temp[1:])
  29. for item in re.finditer('[,。;;!!?]+', sentence_text): # 20240725去掉英文问号,避免网址被分隔
  30. end_index = item.end()
  31. # if end_index!=len(sentence_text):
  32. # # if end_index-begin_index<6 and item.group(0) in [',', ';', ';'] and re.match('[一二三四五六七八九十\d.]+、', sentence_text[begin_index:end_index])==None: # 20240725 注销,避免标题提取错误
  33. # # continue
  34. new_sentence_text = sentence_text[begin_index:end_index]
  35. sentence2 = Sentence2(new_sentence_text,sentence_index,begin_index,end_index)
  36. if sentence.in_attachment:
  37. new_sentence2_list_attach.append(sentence2)
  38. else:
  39. new_sentence2_list.append(sentence2)
  40. begin_index = end_index
  41. if end_index!=len(sentence_text):
  42. end_index = len(sentence_text)
  43. new_sentence_text = sentence_text[begin_index:end_index]
  44. sentence2 = Sentence2(new_sentence_text, sentence_index, begin_index, end_index)
  45. if sentence.in_attachment:
  46. new_sentence2_list_attach.append(sentence2)
  47. else:
  48. new_sentence2_list.append(sentence2)
  49. return new_sentence2_list, new_sentence2_list_attach
  50. requirement_pattern = "(采购需求|需求分析|项目说明|(采购|合同|招标|项目|服务|工程)(的?主要)?(内容|概况|范围|信息)([及与和](其它|\w{,2})要求)?" \
  51. "|招标项目技术要求|服务要求|服务需求|项目目标|需求内容如下|建设规模)([::,]|$)"
  52. aptitude_pattern = "(资格要求|资质要求)([::,]|$)"
  53. addr_bidopen_pattern = "([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件)[))]?(时间[与及和、])?(地址|地点)([与及和、]时间)?([::,]|$)|开启([::,]|$)"
  54. out_lines = []
  55. def extract_parameters(parse_document, content):
  56. '''
  57. 通过大纲、预处理后文本正则获取需要字段
  58. :param parse_document: ParseDocument() 方法返回结果
  59. :param content: 公告预处理后文本
  60. :return:
  61. '''
  62. list_data = parse_document.tree
  63. requirement_text = ''
  64. aptitude_text = ''
  65. addr_bidopen_text = ''
  66. _find_count = 0
  67. _data_i = -1
  68. while _data_i<len(list_data)-1:
  69. _data_i += 1
  70. _data = list_data[_data_i]
  71. _type = _data["type"]
  72. _text = _data["text"].strip()
  73. # print(_data.keys())
  74. if _type=="sentence":
  75. if _data["sentence_title"] is not None:
  76. outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
  77. re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
  78. if re.search(requirement_pattern,_text[:30]) is not None and re.search('符合采购需求,', _text[:30])==None:
  79. out_lines.append(outline)
  80. childs = get_childs([_data])
  81. for c in childs:
  82. # requirement_text += c["text"]+"\n"
  83. requirement_text += c["text"]
  84. _data_i += len(childs)
  85. _data_i -= 1
  86. _data_i = -1
  87. while _data_i<len(list_data)-1:
  88. _data_i += 1
  89. _data = list_data[_data_i]
  90. _type = _data["type"]
  91. _text = _data["text"].strip()
  92. # print(_data.keys())
  93. if _type=="sentence":
  94. # print("aptitude_pattern", _text)
  95. if _data["sentence_title"] is not None:
  96. # print("aptitude_pattern",_text)
  97. # outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
  98. # re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
  99. if re.search(aptitude_pattern,_text[:30]) is not None:
  100. childs = get_childs([_data])
  101. for c in childs:
  102. aptitude_text += c["text"]
  103. # if c["sentence_title"]:
  104. # aptitude_text += c["text"]+"\n"
  105. # else:
  106. # aptitude_text += c["text"]
  107. _data_i += len(childs)
  108. _data_i -= 1
  109. # elif re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', _text) and len(_text)<30 and re.search('资质|资格', _text):
  110. # out_lines.append(outline)
  111. if _type=="table":
  112. list_table = _data["list_table"]
  113. parent_title = _data["parent_title"]
  114. if list_table is not None:
  115. for line in list_table[:2]:
  116. for cell_i in range(len(line)):
  117. cell = line[cell_i]
  118. cell_text = cell[0]
  119. if len(cell_text)>120 and re.search(aptitude_pattern,cell_text) is not None:
  120. aptitude_text += cell_text+"\n"
  121. _data_i = -1
  122. while _data_i < len(list_data) - 1:
  123. _data_i += 1
  124. _data = list_data[_data_i]
  125. _type = _data["type"]
  126. _text = _data["text"].strip()
  127. # print(_data.keys())
  128. if _type == "sentence":
  129. if _data["sentence_title"] is not None:
  130. if re.search(addr_bidopen_pattern, _text[:20]) is not None:
  131. childs = get_childs([_data], max_depth=1)
  132. for c in childs:
  133. addr_bidopen_text += c["text"]
  134. _data_i += len(childs)
  135. _data_i -= 1
  136. if re.search('时间:', addr_bidopen_text) and re.search('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
  137. for ser in re.finditer('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
  138. b, e = ser.span()
  139. addr_bidopen_text = addr_bidopen_text[b:e]
  140. elif re.search('开启', addr_bidopen_text) and re.search('时间:\d{2,4}年\d{1,2}月\d{1,2}日', addr_bidopen_text) and len(addr_bidopen_text)<40: # 优化类似 364991684只有时间没地址情况
  141. addr_bidopen_text = ""
  142. if addr_bidopen_text == "":
  143. ser = re.search('([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件))?(会议)?地[点址]([((]网址[))])?[:为][^,;。]{2,100}[,;。]', content)
  144. if ser:
  145. addr_bidopen_text = ser.group(0)
  146. return requirement_text, aptitude_text, addr_bidopen_text
  147. if __name__ == "__main__":
  148. # with open('D:\html/2.html', 'r', encoding='UTF-8') as f:
  149. # html = f.read()
  150. #
  151. l = []
  152. import pandas as pd
  153. from collections import Counter
  154. from BiddingKG.dl.interface import Preprocessing
  155. from BiddingKG.dl.interface.get_label_dic import get_all_label
  156. from bs4 import BeautifulSoup
  157. import json
  158. df = pd.read_excel('E:/公告招标内容提取结果2.xlsx')
  159. df['len']= df['招标内容'].apply(lambda x: len(x))
  160. print(len(df), sum(df['len']),sum(df['len'])/len(df), max(df['len']), min(df['len']))
  161. print(len([it for it in df['len'] if it>1500]))
  162. # df = pd.read_csv(r'E:\channel分类数据\2022年每月两天数据/指定日期_html2022-12-10.csv')
  163. # df1 = pd.read_excel('E:/公告招标内容提取结果.xlsx')
  164. # df = df[df['docid'].isin(df1['docid'])]
  165. #
  166. # df.drop_duplicates(subset=['docchannel', 'web_source_name', 'exist_table'], inplace=True)
  167. # print(df.columns, len(df))
  168. #
  169. #
  170. # # def get_text(html):
  171. # # soup = BeautifulSoup(html, 'lxml')
  172. # # text = soup.get_text()
  173. # # return text
  174. # # df['content'] = df['dochtmlcon'].apply(lambda x: get_text(x))
  175. # # df['标签'] = df.apply(lambda x: get_all_label(x['doctitle'], x['content']), axis=1)
  176. # # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  177. # # df1 = df[['docid', '标签']]
  178. #
  179. # n = 0
  180. # datas = []
  181. # for id,title, html in zip(df['docid'],df['doctitle'], df['dochtmlcon']):
  182. # # if id not in [289647738, 289647739]:
  183. # # continue
  184. # # print(id, type(id))
  185. # # parse_document = ParseDocument(html, True)
  186. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  187. # # if re.search('资\s*[格质]', html)==None:
  188. # # continue
  189. #
  190. # list_articles, list_sentences, list_entitys, list_outlines, _cost_time = Preprocessing.get_preprocessed([[id,html,"","",title,'', '']],useselffool=True)
  191. # sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
  192. #
  193. # # sentence2_list = []
  194. #
  195. # parse_document = ParseDocument(html, True, list_obj=sentence2_list)
  196. # requirement_text, aptitude_text = extract_parameters(parse_document)
  197. # # if len(aptitude_text)>0:
  198. # # datas.append((id, aptitude_text[:1500]))
  199. # # print(id, aptitude_text[:10], aptitude_text[-20:])
  200. # # else:
  201. # # parse_document = ParseDocument(html, True, list_obj=sentence2_list_attach)
  202. # # requirement_text, aptitude_text = extract_parameters(parse_document)
  203. #
  204. # # if 0<len(aptitude_text)<20:
  205. # # l.append(len(aptitude_text))
  206. # # n += 1
  207. # # print(id, aptitude_text)
  208. # # if n > 5:
  209. # # break
  210. #
  211. # if len(requirement_text)>0:
  212. # label_dic = get_all_label(title, list_articles[0].content)
  213. # # datas.append((id, requirement_text))
  214. # datas.append((id, requirement_text, label_dic))
  215. #
  216. # c = Counter(out_lines)
  217. # print(c.most_common(1000))
  218. # #
  219. # # df = pd.DataFrame(datas, columns=['docid', '资质要求'])
  220. # # df.to_excel('E:/公告资质要求提取结果.xlsx')
  221. #
  222. # df = pd.DataFrame(datas, columns=['docid', '招标内容', '标签'])
  223. # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
  224. # df.to_excel('E:/公告招标内容提取结果2.xlsx')
  225. # if len(aptitude_text)> 1000:
  226. # print(id, aptitude_text[:10], aptitude_text[-20:])
  227. # print(Counter(l).most_common(50))
  228. # print(len(df), len(l), min(l), max(l), sum(l)/len(l))
  229. # n1 = len([it for it in l if it < 500])
  230. # n2 = len([it for it in l if it < 1000])
  231. # n3 = len([it for it in l if it < 1500])
  232. # n4 = len([it for it in l if it < 2000])
  233. # print(n1, n2, n3, n4, n1/len(l), n2/len(l), n3/len(l), n4/len(l))
  234. # parse_document = ParseDocument(html,True)
  235. # requirement_text, new_list_policy, aptitude_text = extract_parameters(parse_document)
  236. # print(aptitude_text)
  237. # sentence_text = '5、要求:3.1投标其他条件:1、中国宝武集团项目未列入禁入名单的投标人。2、具有有效的营业执照;'
  238. # begin_index = 0
  239. # for item in re.finditer('[,。;;!!??]+', sentence_text):
  240. # end_index = item.end()
  241. # if end_index != len(sentence_text):
  242. # if end_index - begin_index < 6:
  243. # continue
  244. # new_sentence_text = sentence_text[begin_index:end_index]
  245. # print(new_sentence_text)
  246. # df = pd.read_excel('E:/公告资质要求提取结果.xlsx')
  247. # docids = []
  248. # pos = neg = 0
  249. # for docid, text in zip(df['docid'], df['资质要求']):
  250. # if re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', text) and re.search(aptitude_pattern, text[:15]):
  251. # pos += 1
  252. # pass
  253. # else:
  254. # neg += 1
  255. # print(docid, text[:50])
  256. # docids.append(docid)
  257. # print('异常:%d, 正常:%d'%(neg, pos))
  258. # print(docids)