瀏覽代碼

新增大纲树解析、资质要求提取

lsm 10 月之前
父節點
當前提交
555130871e
共有 3 個文件被更改,包括 1473 次插入9 次删除
  1. 23 9
      BiddingKG/dl/interface/extract.py
  2. 1244 0
      BiddingKG/dl/interface/htmlparser.py
  3. 206 0
      BiddingKG/dl/interface/outline_extractor.py

+ 23 - 9
BiddingKG/dl/interface/extract.py

@@ -27,6 +27,7 @@ import BiddingKG.dl.complaint.punish_predictor as punish_rule
 import json
 from BiddingKG.dl.money.re_money_total_unit import extract_total_money, extract_unit_money
 from BiddingKG.dl.ratio.re_ratio import extract_ratio
+from BiddingKG.dl.interface.outline_extractor import ParseDocument, extract_parameters, extract_sentence_list
 
 
 # 自定义jsonEncoder
@@ -248,6 +249,16 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
     cost_time["preprocess"] = round(time.time()-start_time,2)
     cost_time.update(_cost_time)
 
+    '''大纲提取及大纲内容相关提取'''
+    sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
+    tt = time.time()
+    parse_document = ParseDocument(text, True,list_obj=sentence2_list)
+    requirement_text, aptitude_text = extract_parameters(parse_document)
+    if aptitude_text == '':
+        parse_document = ParseDocument(text, True, list_obj=sentence2_list_attach)
+        requirement_text, aptitude_text = extract_parameters(parse_document)
+    parse_document.print_tree(parse_document.tree)
+
     # 过滤掉Redis里值为0的错误实体
     # list_entitys[0] = entityLink.enterprise_filter(list_entitys[0])
     # #依赖句子顺序
@@ -419,7 +430,7 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
 
     # data_res = Preprocessing.union_result(Preprocessing.union_result(codeName, prem),list_punish_dic)[0]
     # data_res = Preprocessing.union_result(Preprocessing.union_result(Preprocessing.union_result(codeName, prem),list_punish_dic), list_channel_dic)[0]
-    version_date = {'version_date': '2024-07-18'}
+    version_date = {'version_date': '2024-07-22'}
     data_res = dict(codeName[0], **prem[0], **channel_dic, **product_attrs[0], **product_attrs[1], **payment_way_dic, **fail_reason, **industry, **district, **candidate_dic, **version_date, **all_moneys, **pb_json)
 
     if original_docchannel == 302:
@@ -461,14 +472,17 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
     data_res["proportion"] = pb_json.get('pb').get('proportion', '')
     data_res["pb_project_name"] = pb_json.get('pb').get('project_name_refind', '')
 
-    # for _article in list_articles:
-    #         log(_article.content)
-    #
-    # for list_entity in list_entitys:
-    #     for _entity in list_entity:
-    #         log("type:%s,text:%s,label:%s,values:%s,sentence:%s,begin_index:%s,end_index:%s"%
-    #               (str(_entity.entity_type),str(_entity.entity_text),str(_entity.label),str(_entity.values),str(_entity.sentence_index),
-    #                str(_entity.begin_index),str(_entity.end_index)))
+    # 资质要求
+    data_res['aptitude'] = aptitude_text[:1500]
+
+    for _article in list_articles:
+            log(_article.content)
+
+    for list_entity in list_entitys:
+        for _entity in list_entity:
+            log("type:%s,text:%s,label:%s,values:%s,sentence:%s,begin_index:%s,end_index:%s"%
+                  (str(_entity.entity_type),str(_entity.entity_text),str(_entity.label),str(_entity.values),str(_entity.sentence_index),
+                   str(_entity.begin_index),str(_entity.end_index)))
     _extract_json = json.dumps(data_res,cls=MyEncoder,sort_keys=True,indent=4,ensure_ascii=False)
     _extract_json = _extract_json.replace("\x06", "").replace("\x05", "").replace("\x07", "")
     return _extract_json#, list_articles[0].content, get_ent_context(list_sentences, list_entitys)

+ 1244 - 0
BiddingKG/dl/interface/htmlparser.py

@@ -0,0 +1,1244 @@
+#coding:utf8
+
+import re
+
+# from BaseDataMaintenance.maintenance.product.productUtils import is_similar
+# from BiddingKG.dl.common.Utils import log
+import logging
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+def log(msg):
+    '''
+    @summary:打印信息
+    '''
+    logger.info(msg)
+
+from bs4 import BeautifulSoup
+import copy
+
+import Levenshtein
+def jaccard_score(source,target):
+    source_set = set([s for s in source])
+    target_set = set([s for s in target])
+    if len(source_set)==0 or len(target_set)==0:
+        return 0
+    return max(len(source_set&target_set)/len(source_set),len(source_set&target_set)/len(target_set))
+def judge_pur_chinese(keyword):
+    """
+    中文字符的编码范围为: u'\u4e00' -- u'\u9fff:只要在此范围内就可以判断为中文字符串
+    @param keyword:
+    @return:
+    """
+    # 定义一个需要删除的标点符号字符串列表
+    remove_chars = '[·’!"\#$%&\'()#!()*+,-./:;<=>?\@,:?¥★、….>【】[]《》?“”‘’\[\\]^_`{|}~]+'
+    # 利用re.sub来删除中文字符串中的标点符号
+    strings = re.sub(remove_chars, "", keyword)  # 将keyword中文字符串中remove_chars中包含的标点符号替换为空字符串
+    for ch in strings:
+        if u'\u4e00' <= ch <= u'\u9fff':
+            pass
+        else:
+            return False
+    return True
+
+def is_similar(source,target,_radio=None):
+    source = str(source).lower()
+    target = str(target).lower()
+    max_len = max(len(source),len(target))
+    min_len = min(len(source),len(target))
+
+    min_ratio = 90
+    if min_len>=3:
+        min_ratio = 87
+    if min_len>=5:
+        min_ratio = 85
+    if _radio is not None:
+        min_ratio = _radio
+    # dis_len = abs(len(source)-len(target))
+    # min_dis = min(max_len*0.2,4)
+    if min_len==0 and max_len>0:
+        return False
+    if max_len<=2:
+        if source==target:
+            return True
+    if min_len<2:
+        return False
+    #判断相似度
+    similar = Levenshtein.ratio(source,target)*100
+    if similar>=min_ratio:
+        log("%s and %s similar_jaro %d"%(source,target,similar))
+        return True
+    similar_jaro = Levenshtein.jaro(source,target)
+    if similar_jaro*100>=min_ratio:
+        log("%s and %s similar_jaro %d"%(source,target,similar_jaro*100))
+        return True
+    similar_jarow = Levenshtein.jaro_winkler(source,target)
+    if similar_jarow*100>=min_ratio:
+        log("%s and %s similar_jaro %d"%(source,target,similar_jarow*100))
+        return True
+
+    if min_len>=5:
+        if len(source)==max_len and str(source).find(target)>=0:
+                return True
+        elif len(target)==max_len and target.find(source)>=0:
+                return True
+        elif jaccard_score(source, target)==1 and judge_pur_chinese(source) and judge_pur_chinese(target):
+            return True
+    return False
+
+end_pattern = "商务要求|评分标准|商务条件|商务条件"
+_param_pattern = "(产品|技术|清单|配置|参数|具体|明细|项目|招标|货物|服务|规格|工作|具体)[及和与]?(指标|配置|条件|要求|参数|需求|规格|条款|名称及要求)|配置清单|(质量|技术).{,10}要求|验收标准|^(参数|功能)$"
+meter_pattern = "[><≤≥±]\d+|\d+(?:[μucmkK微毫千]?[米升LlgGmMΩ]|摄氏度|英寸|度|天|VA|dB|bpm|rpm|kPa|mol|cmH20|%|°|Mpa|Hz|K?HZ|℃|W|min|[*×xX])|[*×xX]\d+|/min|\ds[^a-zA-Z]|GB.{,20}标准|PVC|PP|角度|容积|色彩|自动|流量|外径|轴位|折射率|帧率|柱镜|振幅|磁场|镜片|防漏|强度|允差|心率|倍数|瞳距|底座|色泽|噪音|间距|材质|材料|表面|频率|阻抗|浓度|兼容|防尘|防水|内径|实时|一次性|误差|性能|距离|精确|温度|超温|范围|跟踪|对比度|亮度|[横纵]向|均压|负压|正压|可调|设定值|功能|检测|高度|厚度|宽度|深度|[单双多]通道|效果|指数|模式|尺寸|重量|峰值|谷值|容量|寿命|稳定性|高温|信号|电源|电流|转换率|效率|释放量|转速|离心力|向心力|弯曲|电压|功率|气量|国标|标准协议|灵敏度|最大值|最小值|耐磨|波形|高压|性强|工艺|光源|低压|压力|压强|速度|湿度|重量|毛重|[MLX大中小]+码|净重|颜色|[红橙黄绿青蓝紫]色|不锈钢|输入|输出|噪声|认证|配置"
+not_meter_pattern = "投标报价|中标金额|商务部分|公章|分值构成|业绩|详见|联系人|联系电话|合同价|金额|采购预算|资金来源|费用|质疑|评审因素|评审标准|商务资信|商务评分|专家论证意见|评标方法|代理服务费|售后服务|评分类型|评分项目|预算金额|得\d+分|项目金额|详见招标文件|乙方"
+
+
+def getTrs(tbody):
+    #获取所有的tr
+    trs = []
+    if tbody.name=="table":
+        body = tbody.find("tbody",recursive=False)
+        if body is not None:
+            tbody = body
+    objs = tbody.find_all(recursive=False)
+    for obj in objs:
+        if obj.name=="tr":
+            trs.append(obj)
+        if obj.name=="tbody" or obj.name=="table":
+            for tr in obj.find_all("tr",recursive=False):
+                trs.append(tr)
+    return trs
+
+def fixSpan(tbody):
+    # 处理colspan, rowspan信息补全问题
+    #trs = tbody.findChildren('tr', recursive=False)
+
+    trs = getTrs(tbody)
+    ths_len = 0
+    ths = list()
+    trs_set = set()
+    #修改为先进行列补全再进行行补全,否则可能会出现表格解析混乱
+    # 遍历每一个tr
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有colspan 则补全同一行下一个位置
+            if 'colspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['colspan'])))!="":
+                    col = int(re.sub("[^0-9]","",str(td['colspan'])))
+                    if col<100 and len(td.get_text())<1000:
+                        td['colspan'] = 1
+                        for i in range(1, col, 1):
+                            td.insert_after(copy.copy(td))
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有rowspan 则补全下一行同样位置
+            if 'rowspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['rowspan'])))!="":
+                    row = int(re.sub("[^0-9]","",str(td['rowspan'])))
+                    td['rowspan'] = 1
+                    for i in range(1, row, 1):
+                        # 获取下一行的所有td, 在对应的位置插入
+                        if indtr+i<len(trs):
+                            tds1 = trs[indtr + i].findChildren(['td','th'], recursive=False)
+                            if len(tds1) >= (indtd) and len(tds1)>0:
+                                if indtd > 0:
+                                    tds1[indtd - 1].insert_after(copy.copy(td))
+                                else:
+                                    tds1[0].insert_before(copy.copy(td))
+                            elif indtd-2>0 and len(tds1) > 0 and len(tds1) == indtd - 1:  # 修正某些表格最后一列没补全
+                                tds1[indtd-2].insert_after(copy.copy(td))
+def getTable(tbody):
+    #trs = tbody.findChildren('tr', recursive=False)
+    fixSpan(tbody)
+    trs = getTrs(tbody)
+    inner_table = []
+    for tr in trs:
+        tr_line = []
+        tds = tr.findChildren(['td','th'], recursive=False)
+        if len(tds)==0:
+            tr_line.append([re.sub('\xa0','',tr.get_text()),0]) # 2021/12/21 修复部分表格没有td 造成数据丢失
+        for td in tds:
+            tr_line.append([re.sub('\xa0','',td.get_text()),0])
+            #tr_line.append([td.get_text(),0])
+        inner_table.append(tr_line)
+    return inner_table
+
+class Sentence2():
+    def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
+        self.name = 'sentence2'
+        self.text = text
+        self.sentence_index = sentence_index
+        self.wordOffset_begin = wordOffset_begin
+        self.wordOffset_end = wordOffset_end
+
+    def get_text(self):
+        return self.text
+
+class ParseDocument():
+
+    def __init__(self,_html,auto_merge_table=True,list_obj = []):
+        if _html is None:
+            _html = ""
+        self.html = _html
+
+        # self.soup = BeautifulSoup(self.html,"lxml")
+        # self.soup = BeautifulSoup(self.html,"html.parser")
+        self.auto_merge_table = auto_merge_table
+
+        if list_obj:
+            self.list_obj = list_obj
+        else:
+            self.soup = BeautifulSoup(self.html, "lxml")
+            _body = self.soup.find("body")
+            if _body is not None:
+                self.soup = _body
+            self.list_obj = self.get_soup_objs(self.soup)
+
+            # self.list_obj = [it.get_text().strip().replace(' ', '') for it in self.list_obj]
+            # self.list_obj = [Sentence2(text, 1,1,5) for text in self.list_obj]
+
+        # for obj in self.list_obj:
+        #     print("obj",obj.get_text()[:20])
+
+        self.tree = self.buildParsetree(self.list_obj,[],auto_merge_table)
+
+
+        # #识别目录树
+        # if self.parseTree:
+        #     self.parseTree.printParseTree()
+        # self.print_tree(self.tree,"-|")
+
+    def get_soup_objs(self,soup,list_obj=None):
+        if list_obj is None:
+            list_obj = []
+        childs = soup.find_all(recursive=False)
+        for _obj in childs:
+            childs1 = _obj.find_all(recursive=False)
+            if len(childs1)==0 or len(_obj.get_text())<40 or _obj.name=="table":
+                list_obj.append(_obj)
+            elif _obj.name=="p":
+                list_obj.append(_obj)
+            else:
+                self.get_soup_objs(_obj,list_obj)
+        return list_obj
+
+    def fix_tree(self,_product):
+        products = extract_products(self.tree,_product)
+        if len(products)>0:
+            self.tree = self.buildParsetree(self.list_obj,products,self.auto_merge_table)
+
+    def print_tree(self,tree,append=""):
+        self.set_tree_id = set()
+        if append=="":
+            for t in tree:
+                logger.debug("%s text:%s title:%s title_text:%s before:%s after%s product:%s"%("==>",t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"]))
+
+        for t in tree:
+            _id = id(t)
+            if _id in self.set_tree_id:
+                continue
+            self.set_tree_id.add(_id)
+            logger.info("%s text:%s title:%s title_text:%s before:%s after%s product:%s"%(append,t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"]))
+            childs = t["child_title"]
+            self.print_tree(childs,append=append+"-|")
+
+    def is_title_first(self,title):
+        if title in ("一","1","Ⅰ","a","A"):
+            return True
+        return False
+
+    def find_title_by_pattern(self,_text,_pattern="(^|★|▲|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册包标部.::、、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_3>(?P<title_3_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>[、章册包标部.::、、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_4>(?P<title_4_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_5>(?P<title_5_index_0_0>^)(?P<title_5_index_1_1>[一二三四五六七八九十]+)(?P<title_5_index_2_0>)[^一二三四五六七八九十节章册部\.::、、])|" \
+                                             "([\s★▲\*]*)(?P<title_12>(?P<title_12_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_12_index_1_1>\d{1,2})(?P<title_12_index_2_0>[\..、\s\-]?))|"\
+                                             "([\s★▲\*]*)(?P<title_11>(?P<title_11_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_10>(?P<title_10_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_7>(?P<title_7_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..包标::、\s\-]*))|" \
+                                             "(^[\s★▲\*]*)(?P<title_6>(?P<title_6_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-包标]*))|" \
+                                             "([\s★▲\*]*)(?P<title_15>(?P<title_15_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>[))包标\..::、]+))|" \
+                                             "([\s★▲\*]+)(?P<title_17>(?P<title_17_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_17_index_1_1>[a-zA-Z]+)(?P<title_17_index_2_0>[))包标\..::、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_19>(?P<title_19_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>[))]))"
+                              ):
+        _se = re.search(_pattern,_text)
+        groups = []
+        if _se is not None:
+            _gd = _se.groupdict()
+            for k,v in _gd.items():
+                if v is not None:
+                    groups.append((k,v))
+        if len(groups):
+            # groups.sort(key=lambda x:x[0])
+            return groups
+        return None
+
+    def make_increase(self,_sort,_title,_add=1):
+        if len(_title)==0 and _add==0:
+            return ""
+        if len(_title)==0 and _add==1:
+            return _sort[0]
+        _index = _sort.index(_title[-1])
+        next_index = (_index+_add)%len(_sort)
+        next_chr = _sort[next_index]
+        if _index==len(_sort)-1:
+            _add = 1
+        else:
+            _add = 0
+        return next_chr+self.make_increase(_sort,_title[:-1],_add)
+
+
+    def get_next_title(self,_title):
+        if re.search("^\d+$",_title) is not None:
+            return str(int(_title)+1)
+        if re.search("^[一二三四五六七八九十百]+$",_title) is not None:
+            if _title[-1]=="十":
+                return _title+"一"
+            if _title[-1]=="百":
+                return _title+"零一"
+
+            if _title[-1]=="九":
+                if len(_title)==1:
+                    return "十"
+                if len(_title)==2:
+                    if _title[0]=="十":
+                        return "二十"
+                if len(_title)==3:
+                    if _title[0]=="九":
+                        return "一百"
+                    else:
+                        _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title[0]))
+                        return _next_title+"十"
+
+            _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title))
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            if _next_title[-1]!="十":
+                if len(_next_title)>=2:
+                    _next_title.insert(-1,'十')
+            if len(_next_title)>=4:
+                _next_title.insert(-3,'百')
+            if _title[0]=="十":
+                if _next_title=="十":
+                    _next_title = ["二","十"]
+                _next_title.insert(0,"十")
+            _next_title = "".join(_next_title)
+            return _next_title
+        if re.search("^[a-z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('a')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[A-Z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('A')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$",_title) is not None:
+            _sort = ["Ⅰ","Ⅱ","Ⅲ","Ⅳ","Ⅴ","Ⅵ","Ⅶ","Ⅷ","Ⅸ","Ⅹ","Ⅺ","Ⅻ"]
+            _index = _sort.index(_title)
+            if _index<len(_sort)-1:
+                return _sort[_index+1]
+            return None
+
+    def count_title_before(self,list_obj):
+        dict_before = {}
+        dict_sentence_count = {}
+        illegal_sentence = set()
+        for obj_i in range(len(list_obj)):
+            obj = list_obj[obj_i]
+            _type = "sentence"
+            _text = obj.text.strip()
+            if obj.name=="table":
+                _type = "table"
+                _text = str(obj)
+            _append = False
+
+
+            if _type=="sentence":
+                if len(_text)>10 and len(_text)<100:
+                    if _text not in dict_sentence_count:
+                        dict_sentence_count[_text] = 0
+                    dict_sentence_count[_text] += 1
+                    if re.search("\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+                elif len(_text)<10:
+                    if re.search("第\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+
+                sentence_groups = self.find_title_by_pattern(_text[:10])
+                if sentence_groups:
+                    # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups)
+                    sentence_title = sentence_groups[0][0]
+                    sentence_title_text = sentence_groups[0][1]
+                    title_index = sentence_groups[-2][1]
+                    title_before = sentence_groups[1][1].replace("(","(").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
+                    title_after = sentence_groups[-1][1].replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
+                    next_index = self.get_next_title(title_index)
+                    if title_before not in dict_before:
+                        dict_before[title_before] = 0
+                    dict_before[title_before] += 1
+
+        for k,v in dict_sentence_count.items():
+            if v>10:
+                illegal_sentence.add(k)
+        return dict_before,illegal_sentence
+
+    def is_page_no(self,sentence):
+        if len(sentence)<10:
+            if re.search("\d+页|^\-\d+\-$",sentence) is not None:
+                return True
+
+    def block_tree(self,childs):
+        for child in childs:
+
+            if not child["block"]:
+                child["block"] = True
+                childs2 = child["child_title"]
+                self.block_tree(childs2)
+
+
+    def buildParsetree(self,list_obj,products=[],auto_merge_table=True):
+
+        self.parseTree = None
+        trees = []
+        list_length = []
+        for obj in list_obj[:200]:
+            if obj.name!="table":
+                list_length.append(len(obj.get_text()))
+        if len(list_length)>0:
+            max_length = max(list_length)
+        else:
+            max_length = 40
+        max_length = min(max_length,40)
+
+        logger.debug("%s:%d"%("max_length",max_length))
+
+
+        list_data = []
+        last_table_index = None
+        last_table_columns = None
+        last_table = None
+        dict_before,illegal_sentence = self.count_title_before(list_obj)
+        for obj_i in range(len(list_obj)):
+            obj = list_obj[obj_i]
+
+            # logger.debug("==obj %s"%obj.text[:20])
+
+            _type = "sentence"
+            _text = standard_product(obj.text)
+            if obj.name=="table":
+                _type = "table"
+                _text = standard_product(str(obj))
+            _append = False
+            sentence_title = None
+            sentence_title_text = None
+            sentence_groups = None
+            title_index = None
+            next_index = None
+            parent_title = None
+            title_before = None
+            title_after = None
+            title_next = None
+            childs = []
+            # new
+            sentence_index = obj.sentence_index
+            wordOffset_begin = obj.wordOffset_begin
+            wordOffset_end = obj.wordOffset_end
+
+            list_table = None
+            block = False
+
+            has_product = False
+
+            if _type=="sentence":
+                if _text in illegal_sentence:
+                    continue
+
+
+                sentence_groups = self.find_title_by_pattern(_text[:10])
+                if sentence_groups:
+                    title_before = standard_title_context(sentence_groups[1][1])
+                    title_after = sentence_groups[-1][1]
+                    sentence_title_text = sentence_groups[0][1]
+                    other_text = _text.replace(sentence_title_text,"")
+                    if (title_before in dict_before and dict_before[title_before]>1) or title_after!="":
+                        sentence_title = sentence_groups[0][0]
+
+                        title_index = sentence_groups[-2][1]
+                        next_index = self.get_next_title(title_index)
+
+                        other_text = _text.replace(sentence_title_text,"")
+
+                        for p in products:
+                            if other_text.strip()==p.strip():
+                                has_product = True
+
+                    else:
+                        _fix = False
+
+                        for p in products:
+                            if other_text.strip()==p.strip():
+                                title_before = "=产品"
+                                sentence_title = "title_0"
+                                sentence_title_text = p
+                                title_index = "0"
+                                title_after = "产品="
+                                next_index = "0"
+                                _fix = True
+                                has_product = True
+                                break
+                        if not _fix:
+                            title_before = None
+                            title_after = None
+                            sentence_title_text = None
+                else:
+                    if len(_text)<40 and re.search(_param_pattern,_text) is not None:
+                        for p in products:
+                            if _text.find(p)>=0:
+                                title_before = "=产品"
+                                sentence_title = "title_0"
+                                sentence_title_text = p
+                                title_index = "0"
+                                title_after = "产品="
+                                next_index = "0"
+                                _fix = True
+                                has_product = True
+                                break
+
+            if _type=="sentence":
+                if sentence_title is None and len(list_data)>0 and list_data[-1]["sentence_title"] is not None and list_data[-1]["line_width"]>=max_length*0.6:
+                    list_data[-1]["text"] += _text
+                    list_data[-1]["line_width"] = len(_text)
+                    _append = True
+                elif sentence_title is None and len(list_data)>0 and _type==list_data[-1]["type"]:
+                    if list_data[-1]["line_width"]>=max_length*0.7:
+                        list_data[-1]["text"] += _text
+                        list_data[-1]["line_width"] = len(_text)
+                        _append = True
+
+            if _type=="table":
+                _soup = BeautifulSoup(_text,"lxml")
+                _table = _soup.find("table")
+                if _table is not None:
+                    list_table = getTable(_table)
+                    if len(list_table)==0:
+                        continue
+                    table_columns = len(list_table[0])
+
+                    if auto_merge_table:
+                        if last_table_index is not None and abs(obj_i-last_table_index)<=2 and last_table_columns is not None and last_table_columns==table_columns:
+                            if last_table is not None:
+                                trs = getTrs(_table)
+                                last_tbody = BeautifulSoup(last_table["text"],"lxml")
+                                _table = last_tbody.find("table")
+                                last_trs = getTrs(_table)
+                                _append = True
+
+                                for _line in list_table:
+                                    last_table["list_table"].append(_line)
+                                if len(last_trs)>0:
+                                    for _tr in trs:
+                                        last_trs[-1].insert_after(copy.copy(_tr))
+                                    last_table["text"] = re.sub("</?html>|</?body>","",str(last_tbody))
+
+                                last_table_index = obj_i
+                                last_table_columns = len(list_table[-1])
+
+
+            if not _append:
+                _data = {"type":_type, "text":_text,"list_table":list_table,"line_width":len(_text),"sentence_title":sentence_title,"title_index":title_index,
+                         "sentence_title_text":sentence_title_text,"sentence_groups":sentence_groups,"parent_title":parent_title,
+                         "child_title":childs,"title_before":title_before,"title_after":title_after,"title_next":title_next,"next_index":next_index,
+                         "block":block,"has_product":has_product,
+                         "sentence_index":sentence_index,"wordOffset_begin":wordOffset_begin,"wordOffset_end":wordOffset_end
+                        }
+
+                if _type=="table":
+                    last_table = _data
+                    last_table_index = obj_i
+                    if list_table:
+                        last_table_columns = last_table_columns = len(list_table[-1])
+
+                if sentence_title is not None:
+                    if len(list_data)>0:
+                        if self.is_title_first(title_index):
+                            for i in range(1,len(list_data)+1):
+                                _d = list_data[-i]
+                                if _d["sentence_title"] is not None:
+                                    _data["parent_title"] = _d
+                                    _d["child_title"].append(_data)
+                                    break
+                        else:
+                            _find = False
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
+                                        _data["parent_title"] = _d["parent_title"]
+                                        _d["title_next"] = _data
+                                        if len(_d["child_title"])>0:
+                                            _d["child_title"][-1]["title_next"] = ""
+                                            self.block_tree(_d["child_title"])
+                                        if _d["parent_title"] is not None:
+                                            _d["parent_title"]["child_title"].append(_data)
+                                        _find = True
+                                        break
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if i==1 and not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+                            title_before = standard_title_context(title_before)
+                            title_after = standard_title_context(title_after)
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
+                                    if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
+                                        _data["parent_title"] = _d["parent_title"]
+                                        _d["title_next"] = _data
+                                        if len(_d["child_title"])>0:
+                                            _d["child_title"][-1]["title_next"] = ""
+                                            self.block_tree(_d["child_title"])
+                                        if _d["parent_title"] is not None:
+                                            _d["parent_title"]["child_title"].append(_data)
+                                        _find = True
+                                        break
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        # self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+                            for i in range(1,min(len(list_data)+1,20)):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]):
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        # self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+
+                            if not _find:
+                                if len(list_data)>0:
+                                    for i in range(1,len(list_data)+1):
+                                        _d = list_data[-i]
+                                        if _d.get("sentence_title") is not None:
+                                            _data["parent_title"] = _d
+                                            _d["child_title"].append(_data)
+                                            break
+
+
+                else:
+                    if len(list_data)>0:
+                        for i in range(1,len(list_data)+1):
+                            _d = list_data[-i]
+                            if _d.get("sentence_title") is not None:
+                                _data["parent_title"] = _d
+                                _d["child_title"].append(_data)
+                                break
+
+                list_data.append(_data)
+
+        for _data in list_data:
+
+            childs = _data["child_title"]
+
+            for c_i in range(len(childs)):
+                cdata = childs[c_i]
+                if cdata["has_product"]:
+                    continue
+                else:
+                    if c_i>0:
+                        last_cdata = childs[c_i-1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+                    if c_i<len(childs)-1:
+                        last_cdata = childs[c_i+1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+            for c_i in range(len(childs)):
+                cdata = childs[len(childs)-1-c_i]
+                if cdata["has_product"]:
+                    continue
+                else:
+                    if c_i>0:
+                        last_cdata = childs[c_i-1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+                    if c_i<len(childs)-1:
+                        last_cdata = childs[c_i+1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+
+
+        return list_data
+
+
+def standard_title_context(_title_context):
+    return _title_context.replace("(","(").replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".").replace(".",".")
+
+def standard_product(sentence):
+    return sentence.replace("(","(").replace(")",")")
+
+def extract_products(list_data,_product,_param_pattern = "产品名称|设备材料|采购内存|标的名称|采购内容|(标的|维修|系统|报价构成|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品?|采购|物装|配件|资产|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|品目|^品名|气体|标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)[\))的]?([、\w]{,4}名称|内容|描述)|标的|标项|项目$|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品|物装|配件|资产|招标内容|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|菜名|^品目$|^品名$|^名称|^内容$"):
+    _product = standard_product(_product)
+    list_result = []
+    list_table_products = []
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+
+        if _type=="table":
+            list_table = _data["list_table"]
+            if list_table is None:
+                continue
+            _check = True
+            max_length = max([len(a) for a in list_table])
+            min_length = min([len(a) for a in list_table])
+            if min_length<max_length/2:
+                continue
+            list_head_index = []
+            _begin_index = 0
+            head_cell_text = ""
+            for line_i in range(len(list_table[:2])):
+                line = list_table[line_i]
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<10 and re.search(_param_pattern,cell_text) is not None and re.search("单价|数量|预算|限价|总价|品牌|规格|型号|用途|要求|采购量",line_text) is not None:
+                        _begin_index = line_i+1
+                        list_head_index.append(cell_i)
+
+            for line_i in range(len(list_table)):
+                line = list_table[line_i]
+                for cell_i in list_head_index:
+                    if cell_i>=len(line):
+                        continue
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    head_cell_text += cell_text
+
+            # print("===head_cell_text",head_cell_text)
+            if re.search("招标人|采购人|项目编号|项目名称|金额|^\d+$",head_cell_text) is not None:
+                list_head_index = []
+
+            for line in list_table:
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if cell_text is not None and _product is not None and len(cell_text)<len(_product)*10 and cell_text.find(_product)>=0 and re.search("单价|数量|总价|规格|品牌|型号|用途|要求|采购量",line_text) is not None:
+                        list_head_index.append(cell_i)
+
+            list_head_index = list(set(list_head_index))
+            if len(list_head_index)>0:
+                has_number = False
+                for cell_i in list_head_index:
+                    table_products = []
+
+                    for line_i in range(_begin_index,len(list_table)):
+                        line = list_table[line_i]
+
+                        for _i in range(len(line)):
+                            cell = line[_i]
+                            cell_text = cell[0]
+                            if re.search("^\d+$",cell_text) is not None:
+                                has_number = True
+
+                        if cell_i>=len(line):
+                            continue
+                        cell = line[cell_i]
+                        cell_text = cell[0]
+                        if re.search(_param_pattern,cell_text) is None or has_number:
+                            if re.search("^[\da-zA-Z]+$",cell_text) is None:
+                                table_products.append(cell_text)
+
+                    if len(table_products)>0:
+                        logger.debug("table products %s"%(str(table_products)))
+                        if min([len(x) for x in table_products])>0 and max([len(x) for x in table_products])<=30:
+                            if re.search("招标人|代理人|预算|数量|交货期|品牌|产地","".join(table_products)) is None:
+                                list_table_products.append(table_products)
+    _find = False
+    for table_products in list_table_products:
+        for _p in table_products:
+            if is_similar(_product,_p,90):
+                _find = True
+                logger.debug("similar table_products %s"%(str(table_products)))
+                list_result = list(set([a for a in table_products if len(a)>1 and len(a)<20 and re.search("费用|预算|合计|金额|万元|运费|^其他$",a) is None]))
+                break
+    if not _find:
+        for table_products in list_table_products:
+            list_result.extend(table_products)
+        list_result = list(set([a for a in list_result if len(a)>1 and len(a)<30 and re.search("费用|预算|合计|金额|万元|运费",a) is None]))
+    return list_result
+
+
+def get_childs(childs):
+    list_data = []
+    for _child in childs:
+        list_data.append(_child)
+        childs2 = _child.get("child_title",[])
+
+        if len(childs2)>0:
+            for _child2 in childs2:
+                list_data.extend(get_childs([_child2]))
+    return list_data
+
+def get_range_data_by_childs(list_data,childs):
+    range_data = []
+    list_child = get_childs(childs)
+    list_index = []
+    set_child = set([id(x) for x in list_child])
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _id = id(_data)
+        if _id in set_child:
+            list_index.append(_data_i)
+    if len(list_index)>0:
+        range_data = list_data[min(list_index):max(list_index)+1]
+    return range_data
+
+def get_correct_product(product,products):
+    list_data = []
+    for p in products:
+        is_sim = is_similar(product,p)
+        _d = {"product":p,"distance":abs(len(product)-len(p)),"is_sim":is_sim}
+        list_data.append(_d)
+    list_data.sort(key=lambda x:x["distance"])
+    for _d in list_data:
+        is_sim = _d["is_sim"]
+        if is_sim:
+            if len(_d["product"])>len(product) and _d["product"].find(product)>=0:
+                return product
+            return _d["product"]
+    return product
+
+def get_childs_text(childs,_product,products,is_begin=False,is_end=False):
+    _text = ""
+
+    end_next = False
+    for _child in childs:
+
+        child_text = _child.get("text")
+
+
+        if child_text.find(_product)>=0:
+            if not is_begin:
+                is_begin = True
+                if not end_next:
+                    if _child["sentence_title"] is not None and isinstance(_child["title_next"],dict) and _child["title_next"]["sentence_title"] is not None:
+                        end_next = True
+                        end_title = _child["title_next"]
+                        logger.debug("end_title %s "%end_title["text"])
+
+        logger.debug("%s-%s-%s"%("get_childs_text",child_text[:10],str(is_begin)))
+
+        for p in products:
+            if child_text.find(p)>=0 and is_similar(_product,p,90):
+                is_begin = True
+
+            if child_text.find(_product)<0  and not is_similar(_product,p,80) and  (child_text.find(p)>=0 or _child["has_product"]):
+                if is_begin:
+                    is_end = True
+                    logger.debug("%s-%s-%s"%("get_childs_text end",child_text[:10],p))
+                break
+        if re.search(end_pattern,child_text) is not None:
+            if is_begin:
+                is_end = True
+                logger.debug("%s-%s-%s"%("get_childs_text end",child_text[:10],str(is_end)))
+
+        if is_begin and is_end:
+            break
+
+        if is_begin:
+            _text += _child.get("text")+"\r\n"
+        childs2 = _child.get("child_title",[])
+
+
+        if len(childs2)>0:
+            for _child2 in childs2:
+                child_text,is_begin,is_end = get_childs_text([_child2],_product,products,is_begin)
+                if is_begin:
+                    _text += child_text
+                    if is_end:
+                        break
+
+        if end_next:
+            is_end = True
+
+    #     logger.debug("%s-%s-%s"%("get_childs_text1",_text,str(is_begin)))
+    # logger.debug("%s-%s-%s"%("get_childs_text2",_text,str(is_begin)))
+    return _text,is_begin,is_end
+
+def extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result,):
+    _data = list_data[_data_i]
+    childs = _data.get("child_title",[])
+    if len(childs)>0:
+        child_text,_,_ = get_childs_text([_data],_product,products)
+        if len(child_text)>0:
+            logger.info("extract_type by_tree child_text:%s"%child_text)
+            list_result.append(child_text)
+    if parent_title is not None:
+        child_text,_,_ = get_childs_text([parent_title],_product,products)
+        if len(child_text)>0:
+            logger.info("extract_type by_tree child_text:%s"%child_text)
+            list_result.append(child_text)
+
+        childs = parent_title.get("child_title",[])
+        if len(childs)>0:
+
+            range_data = get_range_data_by_childs(list_data[_data_i:],childs)
+            p_text = ""
+            _find = False
+            end_id = id(_data["title_next"]) if isinstance(_data["sentence_title"],dict) and _data["title_next"] is not None and _data["title_next"]["sentence_title"] is not None else None
+            for pdata in range_data:
+                ptext = pdata["text"]
+                for p in products:
+                    if ptext.find(_product)<0 and  (ptext.find(p)>=0 or pdata["has_product"]):
+                        _find = True
+                        break
+                if re.search(end_pattern,ptext) is not None:
+                    _find = True
+                if _find:
+                    break
+                if id(pdata)==end_id:
+                    break
+                p_text += ptext+"\r\n"
+            if len(p_text)>0:
+                logger.debug("extract_type by parent range_text:%s"%p_text)
+                list_result.append(p_text)
+                return True
+    return False
+
+
+def get_table_pieces(_text,_product,products,list_result,_find):
+    _soup = BeautifulSoup(_text,"lxml")
+    _table = _soup.find("table")
+    if _table is not None:
+        trs = getTrs(_table)
+        list_trs = []
+        for tr in trs:
+            tr_text = tr.get_text()
+            if tr_text.find(_product)>=0:
+                _find = True
+
+            logger.debug("%s-%s"%("table_html_tr",tr_text))
+            for p in products:
+                if _find and p!=_product and tr_text.find(p)>=0:
+                    _find = False
+                    break
+            if re.search(end_pattern,tr_text) is not None:
+                _find = False
+                break
+            if _find:
+                list_trs.append(tr)
+        if len(list_trs)>0:
+            table_html = "<table>%s</table>"%("\r\n".join([str(a) for a in list_trs]))
+            logger.debug("extract_type table slices %s"%(table_html))
+            list_result.append(table_html)
+
+def extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result):
+    _data = list_data[_data_i]
+    _text = _data["text"]
+    list_table = _data["list_table"]
+    parent_title = _data["parent_title"]
+    if list_table is not None:
+        _check = True
+        max_length = max([len(a) for a in list_table])
+        min_length = min([len(a) for a in list_table])
+        text_line_first = ",".join(a[0] for a in list_table[0])
+        if max_length>10:
+            if min_length<max_length/2:
+                return
+        last_data = list_data[_data_i-1]
+        _flag = False
+        if last_data["type"]=="sentence" and last_data["text"].find(_product)>=0:
+            logger.debug("last sentence find product %s-%s"%(_product,last_data["text"]))
+            _flag = True
+        # print(text_line_first,"text_line_first",re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0)
+        if re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0:
+            _flag = True
+        if _flag:
+            if len(products)==0:
+                logger.debug("extract_type whole table by param and product %s"%(_text))
+                list_result.append(_text)
+            else:
+                for p in products:
+                    if p!=_product and _text.find(p)>=0:
+                        logger.debug("extract_type add all table failed %s-%s"%(_product,p))
+                        _flag = False
+                        break
+                if _flag:
+                    logger.debug("extract_type add all table succeed")
+                    get_table_pieces(_text,_product,products,list_result,True)
+        else:
+            list_head_index = []
+            for line in list_table[:2]:
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<20 and re.search(_param_pattern,cell_text) is not None:
+                        list_head_index.append(cell_i)
+            list_head_index = list(set(list_head_index))
+            for line in list_table:
+                for cell in line:
+                    cell_text = cell[0]
+                    if len(cell_text)>50 and len(re.findall(meter_pattern,cell_text))>5 and cell_text.find(_product)>=0:
+                        _f = True
+                        for cell in line:
+                            if not _f:
+                                break
+                            cell_text = cell[0]
+                            for p in products:
+                                if cell_text.find(p)>=0 and p!=_product:
+                                    _f = False
+                                    break
+                        if _f:
+                            logger.debug("extract_type param column %s"%(cell_text))
+                            list_result.append(cell_text)
+                    if len(cell_text)<len(_product)*10 and str(cell_text).find(_product)>=0:
+                        for _index in list_head_index:
+                            if _index>=len(line):
+                                continue
+                            _cell = line[_index]
+                            if len(cell[0])>0:
+                                logger.info("%s-%s"%("extract_type add on table text:",_cell[0]))
+                                list_result.append(_cell[0])
+        if not _flag and (re.search(_param_pattern,_text) is not None or (parent_title is not None and re.search(_param_pattern,parent_title["text"]) is not None)) and _text.find(_product)>=0:
+            get_table_pieces(_text,_product,products,list_result,False)
+
+
+def extract_parameters_by_sentence(list_data,_data,_data_i,_product,products,list_result,is_project):
+    _text = _data["text"]
+    if _text.find(_product)>=0:
+        parent_title = _data.get("parent_title")
+        parent_text = ""
+        parent_parent_title = None
+        parent_parent_text = ""
+        parent_title_index = None
+        parent_parent_title_index = None
+        childs = get_childs([_data])
+
+        child_find = False
+        for c in childs:
+            if re.search(_param_pattern,c["text"]) is not None and len(c["text"])<30:
+                logger.debug("child text %s"%(c["text"]))
+                child_find = True
+                break
+
+        extract_text,_,_ = get_childs_text([_data],_product,products)
+        logger.debug("childs found extract_text %s %s"%(str(child_find),extract_text))
+        if child_find:
+            if len(extract_text)>0:
+                list_result.append(extract_text)
+        else:
+            limit_nums = len(_product)*2+5
+            if len(_product)<=3:
+                limit_nums += 6
+            if _text.find("数量")>=0:
+                limit_nums += 6
+            if len(_text)<=limit_nums and _data["sentence_title"] is not None:
+                if re.search(meter_pattern,extract_text) is not None:
+                    list_result.append(extract_text)
+            elif len(re.findall(meter_pattern,extract_text))>2:
+                list_result.append(extract_text)
+
+        if parent_title is not None:
+            parent_text = parent_title.get("text","")
+            parent_parent_title = parent_title.get("parent_title")
+            parent_title_index = parent_title["title_index"]
+            if parent_parent_title is not None:
+                parent_parent_text = parent_parent_title.get("text","")
+                parent_parent_title_index = parent_parent_title["title_index"]
+
+        _suit = False
+        if re.search(_param_pattern,_text) is not None and len(_text)<50:
+            _suit = True
+        if re.search(_param_pattern,parent_text) is not None and len(parent_text)<50:
+            _suit = True
+        if re.search(_param_pattern,parent_parent_text) is not None and len(parent_parent_text)<50:
+            _suit = True
+        if _suit:
+            logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
+            if not extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result):
+                logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
+                extract_parameters_by_tree(_product,products,list_data,_data_i,parent_parent_title,list_result)
+
+    if re.search(_param_pattern,_text) is not None and len(_text)<50:
+        childs = _data["child_title"]
+        if len(childs)>0:
+            extract_text,_,_ = get_childs_text([_data],_product,products)
+            if len(extract_text)>0:
+                logger.debug("extract_type param-product %s"%(extract_text))
+                list_result.append(extract_text)
+            elif is_project:
+                extract_text,_,_ = get_childs_text([_data],_product,products,is_begin=True)
+                if len(extract_text)>0 and re.search(meter_pattern,extract_text) is not None:
+                    logger.debug("extract_type sentence is_project param-product is product %s"%(extract_text))
+                    list_result.append(extract_text)
+
+def getBestProductText(list_result,_product,products):
+    list_result.sort(key=lambda x:len(re.findall(meter_pattern+"|"+'[::;;]|\d+[%A-Za-z]+',BeautifulSoup(x,"lxml").get_text())), reverse=True)
+
+    logger.debug("+++++++++++++++++++++")
+    for i in range(len(list_result)):
+        logger.debug("result%d %s"%(i,list_result[i]))
+    logger.debug("+++++++++++++++++++++")
+
+    for i in range(len(list_result)):
+        _result = list_result[i]
+        _check = True
+        _result_text = BeautifulSoup(_result,"lxml").get_text()
+        _search = re.search("项目编号[::]|项目名称[::]|联合体投标|开户银行",_result)
+        if _search is not None:
+            logger.debug("result%d error illegal text %s"%(i,str(_search)))
+            _check = False
+        if not (len(_result_text)<1000 and _result[:6]!="<table"):
+            for p in products:
+                if _result_text.find(p)>0 and not (is_similar(_product,p,80) or p.find(_product)>=0 or _product.find(p)>=0):
+                    logger.debug("result%d error product scoss %s"%(i,p))
+                    _check = False
+        if len(_result_text)<100:
+            if re.search(meter_pattern,_result_text) is None:
+                logger.debug("result%d error text min count"%(i))
+                _check = False
+        if len(_result_text)>5000:
+            if len(_result_text)>10000:
+                logger.debug("result%d error text max count"%(i))
+                _check = False
+            elif len(re.findall(meter_pattern,_result_text))<10:
+                logger.debug("result%d error text max count less meter"%(i))
+                _check = False
+
+        list_find = list(set(re.findall(meter_pattern,_result_text)))
+
+        not_list_find = list(set(re.findall(not_meter_pattern,_result_text)))
+        _count = len(list_find)-len(not_list_find)
+        has_num = False
+        for _find in list_find:
+            if re.search('[0-9a-zA-Z]',_find) is not None:
+                has_num = True
+                break
+        if not(_count>=2 and has_num or _count>=5):
+            logger.debug("result%d error match not enough"%(i))
+            _check = False
+
+        if _check:
+            return _result
+
+def format_text(_result):
+    list_result = re.split("\r|\n",_result)
+    _result = ""
+    for _r in list_result:
+        if len(_r)>0:
+            _result+="%s\n"%(_r)
+    _result = '<div style="white-space:pre">%s</div>'%(_result)
+    return _result
+
+def extract_product_parameters(list_data,_product):
+
+    list_result = []
+    _product = standard_product(_product.strip())
+    products = extract_products(list_data,_product)
+
+    _product = get_correct_product(_product,products)
+    logger.debug("all products %s-%s"%(_product,str(products)))
+    is_project = False
+    if re.search("项目名称|采购项目",_product) is not None:
+        is_project = True
+        
+    if len(products)==1 and is_similar(products[0],_product,90):
+        is_project = True
+    _find_count = 0
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+        if _type=="sentence":
+            if _text.find(_product)>=0:
+                _find_count += 1
+                if re.search("项目名称|采购项目",_text) is not None and re.search("等",_text) is not None:
+                    is_project = True
+            extract_parameters_by_sentence(list_data,_data,_data_i,_product,products,list_result,is_project)
+
+        elif _type=="table":
+            if _text.find(_product)>=0:
+                _find_count += 1
+            extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result)
+
+    _text = getBestProductText(list_result,_product,products)
+    return _text,_find_count
+
+
+if __name__ == '__main__':
+
+    filepath = "download/4597dcc128bfabc7584d10590ae50656.html"
+    _product = "彩色多普勒超声诊断仪"
+
+    _html = open(filepath, "r", encoding="utf8").read()
+
+    pd = ParseDocument(_html,False)
+
+    pd.fix_tree(_product)
+    list_data = pd.tree
+    pd.print_tree(list_data)
+
+    _text,_count = extract_product_parameters(list_data,_product)
+    logger.info("find count:%d"%(_count))
+    logger.info("extract_parameter_text::%s"%(_text))
+
+

+ 206 - 0
BiddingKG/dl/interface/outline_extractor.py

@@ -0,0 +1,206 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+@author: bidikeji
+@time: 2024/7/19 10:05
+"""
+import re
+from BiddingKG.dl.interface.htmlparser import ParseDocument,get_childs
+
+class Sentence2():
+    def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
+        self.name = 'sentence2'
+        self.text = text
+        self.sentence_index = sentence_index
+        self.wordOffset_begin = wordOffset_begin
+        self.wordOffset_end = wordOffset_end
+
+    def get_text(self):
+        return self.text
+
+def extract_sentence_list(sentence_list):
+    new_sentence2_list = []
+    new_sentence2_list_attach = []
+    for sentence in sentence_list:
+        sentence_index = sentence.sentence_index
+        sentence_text = sentence.sentence_text
+        begin_index = 0
+        end_index = 0
+        for it in re.finditer('([\w:][一二三四五六七八九十]{1,3}|[^\d,。]\d{1,2}(\.\d{1,2}){,2})、', sentence_text): # 例:289699210 1、招标内容:滑触线及配件2、招标品牌:3、参标供应商经营形式要求:厂家4、参标供应商资质要求:5、
+            temp = it.group(0)
+            sentence_text = sentence_text.replace(temp, temp[0] + ',' + temp[1:])
+        for item in re.finditer('[,。;;!!??]+', sentence_text):
+            end_index = item.end()
+            if end_index!=len(sentence_text):
+                if end_index-begin_index<6 and item.group()[-1] in [',', ';', ';'] and re.match('[一二三四五六七八九十\d.]+、', item.group())==None:
+                    continue
+            new_sentence_text = sentence_text[begin_index:end_index]
+            sentence2 = Sentence2(new_sentence_text,sentence_index,begin_index,end_index)
+            if sentence.in_attachment:
+                new_sentence2_list_attach.append(sentence2)
+            else:
+                new_sentence2_list.append(sentence2)
+            begin_index = end_index
+        if end_index!=len(sentence_text):
+            end_index = len(sentence_text)
+            new_sentence_text = sentence_text[begin_index:end_index]
+            sentence2 = Sentence2(new_sentence_text, sentence_index, begin_index, end_index)
+            if sentence.in_attachment:
+                new_sentence2_list_attach.append(sentence2)
+            else:
+                new_sentence2_list.append(sentence2)
+
+    return new_sentence2_list, new_sentence2_list_attach
+
+requirement_pattern = "(采购需求|需求分析|项目说明|(采购|合同|招标|项目|服务|工程)(的?主要)?(内容|概况|范围)([及与和](其它|\w{,2})要求)?" \
+                      "|招标项目技术要求|服务要求|服务需求|项目目标|需求内容如下|建设规模)([::]|$)"
+aptitude_pattern = "(资格要求|资质要求)([::,]|$)"
+
+# out_lines = []
+
+def extract_parameters(parse_document):
+    list_data = parse_document.tree
+    requirement_text = ''
+    aptitude_text = ''
+
+    _find_count = 0
+    _data_i = -1
+    while _data_i<len(list_data)-1:
+        _data_i += 1
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"].strip()
+        # print(_data.keys())
+        if _type=="sentence":
+            if _data["sentence_title"] is not None:
+                if re.search(requirement_pattern,_text) is not None:
+                    childs = get_childs([_data])
+                    for c in childs:
+                        requirement_text += c["text"]+"\n"
+                    _data_i += len(childs)
+                    _data_i -= 1
+    _data_i = -1
+    while _data_i<len(list_data)-1:
+        _data_i += 1
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"].strip()
+        # print(_data.keys())
+        if _type=="sentence":
+            # print("aptitude_pattern", _text)
+            if _data["sentence_title"] is not None:
+                # print("aptitude_pattern",_text)
+
+                # outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
+                #                  re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
+
+                if re.search(aptitude_pattern,_text[:30]) is not None:
+                    childs = get_childs([_data])
+                    for c in childs:
+                        aptitude_text += c["text"]
+                        # if c["sentence_title"]:
+                        #     aptitude_text += c["text"]+"\n"
+                        # else:
+                        #     aptitude_text += c["text"]
+                    _data_i += len(childs)
+                    _data_i -= 1
+
+                # elif re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', _text) and len(_text)<30 and re.search('资质|资格', _text):
+                #     out_lines.append(outline)
+
+        if _type=="table":
+            list_table = _data["list_table"]
+            parent_title = _data["parent_title"]
+            if list_table is not None:
+                for line in list_table[:2]:
+                    for cell_i in range(len(line)):
+                        cell = line[cell_i]
+                        cell_text = cell[0]
+                        if len(cell_text)>120 and re.search(aptitude_pattern,cell_text) is not None:
+                            aptitude_text += cell_text+"\n"
+
+    return requirement_text,aptitude_text
+
+if __name__ == "__main__":
+    # with open('D:\html/2.html', 'r', encoding='UTF-8') as f:
+    #     html = f.read()
+    #
+    # l = []
+    import pandas as pd
+    # from collections import Counter
+    # from BiddingKG.dl.interface import Preprocessing
+    # df = pd.read_csv(r'E:\channel分类数据\2022年每月两天数据/指定日期_html2022-12-10.csv')
+    # n = 0
+    # datas = []
+    # for id,title, html in zip(df['docid'],df['doctitle'], df['dochtmlcon']):
+    #     # if id not in [289647738, 289647739]:
+    #     #     continue
+    #     # print(id, type(id))
+    #     # parse_document = ParseDocument(html, True)
+    #     # requirement_text, aptitude_text = extract_parameters(parse_document)
+    #     if re.search('资\s*[格质]', html)==None:
+    #         continue
+    #
+    #     list_articles, list_sentences, list_entitys, list_outlines, _cost_time = Preprocessing.get_preprocessed([[id,html,"","",title,'', '']],useselffool=True)
+    #     sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
+    #
+    #     # sentence2_list = []
+    #
+    #     parse_document = ParseDocument(html, True, list_obj=sentence2_list)
+    #     requirement_text, aptitude_text = extract_parameters(parse_document)
+    #     if len(aptitude_text)>0:
+    #         datas.append((id, aptitude_text[:1500]))
+    #         print(id, aptitude_text[:10], aptitude_text[-20:])
+    #     else:
+    #         parse_document = ParseDocument(html, True, list_obj=sentence2_list_attach)
+    #         requirement_text, aptitude_text = extract_parameters(parse_document)
+    #
+    #     # if 0<len(aptitude_text)<20:
+    #     #     l.append(len(aptitude_text))
+    #     #     n += 1
+    #     #     print(id, aptitude_text)
+    #     #     if n > 5:
+    #     #         break
+    #
+    # c = Counter(out_lines)
+    # print(c.most_common(1000))
+    #
+    # df = pd.DataFrame(datas, columns=['docid', '资质要求'])
+    # df.to_excel('E:/公告资质要求提取结果.xlsx')
+
+    #     if len(aptitude_text)> 1000:
+    #         print(id, aptitude_text[:10], aptitude_text[-20:])
+    # print(Counter(l).most_common(50))
+    # print(len(df), len(l), min(l), max(l), sum(l)/len(l))
+    # n1 = len([it for it in l if it < 500])
+    # n2 = len([it for it in l if it < 1000])
+    # n3 = len([it for it in l if it < 1500])
+    # n4 = len([it for it in l if it < 2000])
+    # print(n1, n2, n3, n4, n1/len(l), n2/len(l), n3/len(l), n4/len(l))
+
+    # parse_document = ParseDocument(html,True)
+    # requirement_text, new_list_policy, aptitude_text = extract_parameters(parse_document)
+    # print(aptitude_text)
+
+    # sentence_text = '5、要求:3.1投标其他条件:1、中国宝武集团项目未列入禁入名单的投标人。2、具有有效的营业执照;'
+    # begin_index = 0
+    # for item in re.finditer('[,。;;!!??]+', sentence_text):
+    #     end_index = item.end()
+    #     if end_index != len(sentence_text):
+    #         if end_index - begin_index < 6:
+    #             continue
+    #     new_sentence_text = sentence_text[begin_index:end_index]
+    #     print(new_sentence_text)
+
+    df = pd.read_excel('E:/公告资质要求提取结果.xlsx')
+    pos = neg = 0
+    for docid, text in zip(df['docid'], df['资质要求']):
+        if re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', text) and re.search(aptitude_pattern, text[:15]):
+            pos += 1
+            pass
+        else:
+            neg += 1
+            print(docid, text[:50])
+    print('异常:%d, 正常:%d'%(neg, pos))
+