فهرست منبع

Merge branch 'master' of http://192.168.2.103:3000/luojiehua/BIDI_ML_INFO_EXTRACTION

 Conflicts:
	.idea/misc.xml
	BiddingKG/dl/interface/getAttributes.py
znj 10 ماه پیش
والد
کامیت
994a067a2a

+ 1 - 1
.idea/misc.xml

@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
-  <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7 (py37_bid)" project-jdk-type="Python SDK" />
+  <component name="ProjectRootManager" version="2" languageLevel="JDK_15" project-jdk-name="Python 3.7 (py37)" project-jdk-type="Python SDK" />
   <component name="PythonCompatibilityInspectionAdvertiser">
     <option name="version" value="3" />
   </component>

+ 18 - 2
BiddingKG/dl/common/Utils.py

@@ -636,9 +636,23 @@ def isValidDate(year, month, day):
     else:
         return True
 
-time_format_pattern = re.compile("((?P<year>20\d{2}|\d{2}|二[零〇0][零〇一二三四五六七八九0]{2})\s*[-/年.]\s*(?P<month>\d{1,2}|[一二三四五六七八九十]{1,3})\s*[-/月.]\s*(?P<day>\d{1,2}|[一二三四五六七八九十]{1,3}))")
+time_format_pattern = re.compile("((?P<year>20\d{2}|\d{2}|二[零〇0][零〇一二三四五六七八九0]{2})\s*[-/年.]\s*(?P<month>\d{1,2}|[一二三四五六七八九十]{1,3})\s*[-/月.]?\s*(?P<day>\d{1,2}|[一二三四五六七八九十]{1,3})?)")
 from BiddingKG.dl.ratio.re_ratio import getUnifyNum
-def timeFormat(_time):
+import calendar
+
+def get_maxday(year, month):
+    # calendar.monthrange(year, month) 返回一个元组,其中第一个元素是那个月第一天的星期几(0-6代表周一到周日),
+    # 第二个元素是那个月的天数。
+    _, last_day = calendar.monthrange(year, month)
+    return last_day
+
+def timeFormat(_time, default_first_day=True):
+    '''
+    日期格式化:年-月-日
+    :param _time:
+    :param default_first_day: True取当月第一天,否则取最后一天
+    :return:
+    '''
     current_year = time.strftime("%Y",time.localtime())
     all_match = re.finditer(time_format_pattern,_time)
     for _match in all_match:
@@ -682,6 +696,8 @@ def timeFormat(_time):
                         legal = False
             else:
                 legal = False
+            if day == None:
+                day = "01" if (default_first_day or legal == False) else str(get_maxday(int(year), int(month)))
             if day!="":
                 if re.search("^\d+$", day):
                     if int(day)>31:

+ 3 - 10
BiddingKG/dl/complaint/punish_predictor.py

@@ -393,6 +393,8 @@ class Punish_Extract():
             title = article.title
             text=article.content
             keyword, punishType = self.get_punishType(title, text)
+            if punishType == "未知类别":
+                punishType = ""
 
             # print('处罚类型:',punishType)
             punish_code = self.predict_punishCode(list_sentences)
@@ -413,16 +415,7 @@ class Punish_Extract():
                          'punishWhether':punishWhether,
                          'institutions':institutions,
                          'punishTimes':punishTimes}
-            _count = 0
-            for k,v in punish_dic.items():
-                if v!="":
-                    _count += 1
-            if _count>=2 and punish_dic["punishType"]!="未知类别":
-                list_result.append({"punish":punish_dic})
-            else:
-                list_result.append({"punish":{}})
-        return list_result
-
+            return {k: v for k, v in punish_dic.items() if v not in ['', ' ']}
 
 
 if __name__ == "__main__":

+ 32 - 5
BiddingKG/dl/entityLink/entityLink.py

@@ -14,6 +14,8 @@ from BiddingKG.dl.interface.Entitys import *
 import json
 from BiddingKG.dl.common.constDict import ConstDict
 
+business_dic = {}
+
 def edit_distance(source,target):
     dp = [["" for i in range(len(source)+1)] for j in range(len(target)+1)]
     for i in range(len(dp)):
@@ -167,7 +169,11 @@ def link_entitys(list_entitys,on_value=1):#on_value=0.81
                 if _entity.entity_text in bus_dic:
                     have_bus = True
                 else:
-                    have_bus, dic = get_business_data(_entity.entity_text)
+                    if _entity.entity_text not in business_dic:
+                        have_bus, dic = get_business_data(_entity.entity_text)
+                        business_dic[_entity.entity_text] = (have_bus, dic)
+                    else:
+                        have_bus, dic = business_dic.get(_entity.entity_text)  # 20240708 字典保存查询过的工商数据,避免重复查询redis
                     if re.search('^\w{,5}[分支](行|公司)$|^\w{1,3}公司$|^\w{2,5}段$', _entity.entity_text):
                         have_bus = False
                     if have_bus:
@@ -288,18 +294,35 @@ def doctitle_refine(doctitle):
 def get_nlp_enterprise(list_entity):
     nlp_enterprise = []
     nlp_enterprise_attachment = []
+    dict_enterprise = {}
     max_num = 100
     list_entity = sorted(list_entity,key=lambda x:(x.sentence_index,x.begin_index))
     for entity in list_entity:
         if entity.entity_type in ['org','company']:
+            if entity.entity_text not in dict_enterprise:
+                if entity.entity_text not in business_dic:
+                    have_bus, dic = get_business_data(entity.entity_text)
+                    business_dic[entity.entity_text] = (have_bus, dic)
+                else:
+                    have_bus, dic = business_dic.get(entity.entity_text)  # 20240708 字典保存查询过的工商数据,避免重复查询redis
+                credit_code = dic.get('credit_code', '')
+                in_text = 0 if entity.in_attachment else 1
+                if entity.label in [0,1,2,3,4] or len(dict_enterprise)<=max_num:
+                    dict_enterprise[entity.entity_text] = {'in_text': in_text}
+                    if credit_code != "":
+                        dict_enterprise[entity.entity_text]['credit_code'] = credit_code
+            else:
+                in_text = 0 if entity.in_attachment else 1
+                if in_text != dict_enterprise[entity.entity_text]['in_text']:
+                    dict_enterprise[entity.entity_text]['in_text'] = 2
+
             if not entity.in_attachment:
                 if entity.entity_text not in nlp_enterprise:
                     nlp_enterprise.append(entity.entity_text)
             else:
                 if entity.entity_text not in nlp_enterprise_attachment:
                     nlp_enterprise_attachment.append(entity.entity_text)
-
-    return nlp_enterprise[:max_num],nlp_enterprise_attachment[:max_num]
+    return nlp_enterprise[:max_num],nlp_enterprise_attachment[:max_num], dict_enterprise
 
 ENTERPRISE_HUGE = None
 
@@ -528,7 +551,11 @@ def match_enterprise_max_first(sentence):
                         if len(enter_name)<4: # 20240521 短于4个字的不要
                             break
                         if enter_tail in SET_TAIL_ENTERPRISE or re.search('(中心|中学|小学|医院|学院|大学|学校|监狱|大队|支队|林场|海关|分局|商行)$', enter_tail):
-                            have_bus, dic = get_business_data(enter_name) # 20210124 改为有工商数据的实体才添加
+                            if enter_name not in business_dic:
+                                have_bus, dic = get_business_data(enter_name) # 20210124 改为有工商数据的实体才添加
+                                business_dic[enter_name] = (have_bus, dic)
+                            else:
+                                have_bus, dic = business_dic.get(enter_name) # 20240708 字典保存查询过的工商数据,避免重复查询redis
                             if have_bus:
                             # if is_enterprise_exist(enter_name):
                                 match_item = {"entity_text":"%s"%(enter_name),"begin_index":begin_index,"end_index":begin_index+len(enter_name)}
@@ -590,7 +617,7 @@ def calibrateEnterprise(list_articles,list_sentences,list_entitys):
                         find_flag = True
                         # 判断是否是多个公司
                         if re.search('[分支](公司|中心|监狱|部|行)|^\w{4,15}公司\w{2,3}公司$'
-                                     '|(大学|学院)\w{,2}附属\w{,6}医院$|(\w{2,5}办事处\w{2,6}$'
+                                     '|(大学|学院)\w{,2}附属\w{,6}医院$|(\w{2,5}办事处\w{2,6}$|^\w{2,6}银行\w{2,10}[分支]行$'
                                      '|\w{2,4}[省市县]\w{2,14}村)(股份)?经济(合作|联合)社$|国家税务总局\w{2,10}税务局$',
                                      p_entity.entity_text):
                             continue

+ 34 - 14
BiddingKG/dl/interface/Preprocessing.py

@@ -1087,7 +1087,7 @@ def tableToText(soup, docid=None):
                                     continue
                                 if re.search(packPattern,head) is not None:
                                     pack_text += head+cell["text"]+","
-                                elif re.search(rankPattern,head) is not None:   # 2020/11/23 大网站规则发现问题,if 改elif
+                                elif re.search(rankPattern,head) is not None and re.search('(排名|排序|名次|顺序):?第?[\d一二三]', rank_text)==None:   # 2020/11/23 大网站规则发现问题,if 改elif 20240620修复同时有排名及评标情况造成错误
                                     #排名替换为同一种表达
                                     rank_text += head+cell["text"]+","
                                     #print(rank_text)
@@ -1101,10 +1101,12 @@ def tableToText(soup, docid=None):
                                         text_line += head+cell["text"]+","
                                 text_set.add(str(head+cell["text"]))
                                 last_text = cell['text']
-
+                        tr_text = pack_text+rank_text+entity_text+money_text+text_line
                         text += pack_text+rank_text+entity_text+money_text+text_line
                         # text = text[:-1] + "。" if len(text) > 0 else text
-                        if len(text_set)==1 and head == '' and len(last_text)< 20 and (re.search('[::]$', last_text) or re.search('[一二三四五六七八九十\d]+[、.]\w{2,}', last_text)):
+                        if len(text_set-set([' ']))==1 and head == '' and len(last_text)< 25: # 修复367694716分两行表达
+                            text = text if re.search('\w$', text[:-1]) else text[:-1]
+                        elif (width == 2 or len(text_set)==1) and head != '' and len(tr_text)<50: # 修复494731937只有两行的,分句不合理
                             text = text if re.search('\w$', text[:-1]) else text[:-1]
                         else:
                             text = text[:-1] + "。"
@@ -2067,7 +2069,7 @@ def segment(soup,final=True):
             # text = re.sub("\s+","##space##",text)
             return text
     segList = ["title"]
-    commaList = ["div","br","td","p","li"]
+    commaList = ["div","br","td","p","li","h1","h2","h3","h4","h5","h6"]
     #commaList = []
     spaceList = ["span"]
     tbodies = soup.find_all('tbody')
@@ -2115,11 +2117,13 @@ def segment(soup,final=True):
         for _sent in re.split("。+",text):
             for _sent2 in re.split(',+',_sent):
                 for _sent3 in re.split(":+",_sent2):
+                    pre_t = ''
                     for _t in re.split("\s{4,}",_sent3):
-                        if len(_t)<3:
+                        if len(_t)<3 or len(pre_t)<3 or re.search('[^\w\s]$', pre_t):  # 20240726 前文小于3字或以符合结尾的不加 避免乱加逗号 例:2)    申请人的资格要求
                             _text += _t
                         else:
                             _text += ","+_t
+                        pre_t = _t
                     _text += ":"
                 _text = _text[:-1]
                 _text += ","
@@ -2153,9 +2157,10 @@ def segment(soup,final=True):
                 text = re.sub(punc_del," ",text) # 多个空字符替换为一个空格(防止时间类连接),后面还有对空格处理
 
     #将连续的中文句号替换为一个
-    text_split = text.split("。")
-    text_split = [x for x in text_split if len(x)>0]
-    text = "。".join(text_split)
+    # text_split = text.split("。")
+    # text_split = [x for x in text_split if len(x)>0]
+    # text = "。".join(text_split)
+    text = re.sub('。+', '。', text).lstrip('。') # 20240703 修复上面的方法造成文末句号丢失问题。
 
     # #删除标签中的所有空格
     # for subs in subspaceList:
@@ -2626,6 +2631,8 @@ def special_treatment(sourceContent, web_source_no):
             sourceContent = re.sub('卖方[::\s]+宝山钢铁股份有限公司', '招标单位:宝山钢铁股份有限公司', sourceContent)
         elif web_source_no=='DX008791-1':
             sourceContent = re.sub('收货单位:', '最终用户:', sourceContent)
+        elif web_source_no=='DX011971':
+            sourceContent = re.sub('公司主体:', '业主单位:', sourceContent)
         return sourceContent
     except Exception as e:
         log('特殊数据源: %s 预处理特别修改抛出异常: %s'%(web_source_no, e))
@@ -3014,6 +3021,13 @@ def get_preprocessed_article(articles,cost_time = dict(),useselffool=True):
         if re.search('中标单位名称:[\w()]{5,25},中标候选人名次:\d,', article_processed) and re.search('中标候选人名次:\d,中标单位名称:[\w()]{5,25},', article_processed)==None:  # 处理类似 304706608 此篇的数据源正文特殊表达
             for it in re.finditer('(?P<tenderer>(中标单位名称:[\w()]{5,25},))(?P<rank>(中标候选人名次:\d,))', article_processed):
                 article_processed = article_processed.replace(it.group(0), it.group('rank')+it.group('tenderer'))
+        ser = re.search('竞得人:\d{8,15}-', article_processed)
+        if ser:
+            article_processed = article_processed.replace(ser.group(0), '竞得人:') # 修复类似 368120777 关键词角色被编号隔开情况
+        article_processed = re.sub("流出方信息:。", "流出方信息:", article_processed) # 修复 367520674 产权批量表格问题
+        idx = article_processed.find('供应商报名、缴纳保证金、下载采购文件流程.docx。##attachment##。') # 修复404230599 E交易站源批量附件中标人错误
+        if idx > 1000:
+            article_processed = article_processed[:idx]
 
         '''去除业绩内容'''
         article_processed = del_achievement(article_processed)
@@ -3031,7 +3045,7 @@ def get_preprocessed_article(articles,cost_time = dict(),useselffool=True):
             article_processed_list[1] = attachment_text
             article_processed = "##attachment##".join(article_processed_list)
         '''特别数据源对 预处理后文本 做特别修改'''
-        if web_source_no in ['03786-10', '00076-4', 'DX000105-2', '04080-3', '04080-4', '03761-3', '00695-7',"13740-2", '00811-8', '03795-1', '03795-2', 'DX000726-6','DX008791-1']:
+        if web_source_no in ['03786-10', '00076-4', 'DX000105-2', '04080-3', '04080-4', '03761-3', '00695-7',"13740-2", '00811-8', '03795-1', '03795-2', 'DX000726-6','DX008791-1','DX011971']:
             article_processed = special_treatment(article_processed, web_source_no)
 
         # 提取bidway
@@ -3113,7 +3127,8 @@ def get_preprocessed_sentences(list_articles,useselffool=True,cost_time=dict()):
             sentences_set = set()
             for _iter in re.finditer(split_patten,article_processed):
                 _sen = article_processed[_begin:_iter.span()[1]]
-                if len(_sen)>0 and _sen not in sentences_set:
+                # if len(_sen)>0 and _sen not in sentences_set: # 去重导致内容丢失
+                if len(_sen)>0 and (len(sentences)>0 and _sen != sentences[-1] or len(sentences)==0): # 2024/07/25 改为顺序去重
                     # 标识在附件里的句子
                     if re.search("##attachment##",_sen):
                         attachment_begin_index = len(sentences)
@@ -3125,7 +3140,8 @@ def get_preprocessed_sentences(list_articles,useselffool=True,cost_time=dict()):
             if re.search("##attachment##", _sen):
                 # _sen = re.sub("##attachment##", "", _sen)
                 attachment_begin_index = len(sentences)
-            if len(_sen)>0 and _sen not in sentences_set:
+            # if len(_sen)>0 and _sen not in sentences_set:
+            if len(_sen)>0 and (len(sentences)>0 and _sen != sentences[-1] or len(sentences)==0):  # 2024/07/25 改为顺序去重
                 sentences.append(_sen)
                 sentences_set.add(_sen)
             # 解析outline大纲分段
@@ -3230,7 +3246,7 @@ def get_money_entity(sentence_text, found_yeji, in_attachment=False):
     # 使用正则识别金额
     entity_type = "money"
     list_money_pattern = {"cn": "(()(?P<filter_kw>百分之)?(?P<money_cn>[零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]{3,})())",
-                          "key_word": "((?P<text_key_word>(?:[¥¥]+,?|(中标|成交|合同|承租|投资|服务))?(金?额|价格?)|价格|预算(金额)?|(监理|设计|勘察)(服务)?费|[单报标限总造]价款?|金额|租金|标的基本情况|CNY|成交结果|资金|(控制|拦标)价)(?:[,,\[(\(]*\s*(人民币|单位:)?/?(?P<unit_key_word_before>[万亿]?(?:[美日欧]元|元(/(M2|[\u4e00-\u9fa5]{1,3}))?)?(?P<filter_unit2>[台个只吨]*))\s*(/?费率)?(人民币)?[\])\)]?)\s*[,,::]*(RMB|USD|EUR|JPY|CNY)?[::]?(\s*[^壹贰叁肆伍陆柒捌玖拾佰仟萬億分万元编号时间日期计采a-zA-Z]{,8}?))(第[123一二三]名[::])?(\d+(\*\d+%)+=)?(?P<money_key_word>\d{1,3}([,,]\d{3})+(\.\d+)?|\d+(\.\d+)?[百千]{,1})(?P<science_key_word>(E-?\d+))?(?:[(\(]?(?P<filter_>[%%‰折])*\s*,?((金额)?单位[::])?(?P<unit_key_word_behind>[万亿]?(?:[美日欧]元|元)?(?P<filter_unit1>[台只吨斤棵株页亩方条天]*))\s*[)\)]?))",
+                          "key_word": "((?P<text_key_word>(?:[¥¥]+,?|(中标|成交|合同|承租|投资|服务))?(金?额|价格?)|价格|预算(金额)?|(监理|设计|勘察)(服务)?费|[单报标限总造]价款?|金额|租金|标的基本情况|CNY|成交结果|资金|(控制|拦标)价|投资)(?:[,,\[(\(]*\s*(人民币|单位:)?/?(?P<unit_key_word_before>[万亿]?(?:[美日欧]元|元(/(M2|[\u4e00-\u9fa5]{1,3}))?)?(?P<filter_unit2>[台个只吨]*))\s*(/?费率)?(人民币)?[\])\)]?)\s*[,,::]*(RMB|USD|EUR|JPY|CNY)?[::]?(\s*[^壹贰叁肆伍陆柒捌玖拾佰仟萬億分万元编号时间日期计采a-zA-Z]{,8}?))(第[123一二三]名[::])?(\d+(\*\d+%)+=)?(?P<money_key_word>\d{1,3}([,,]\d{3})+(\.\d+)?|\d+(\.\d+)?[百千]{,1})(?P<science_key_word>(E-?\d+))?(?:[(\(]?(?P<filter_>[%%‰折])*\s*,?((金额)?单位[::])?(?P<unit_key_word_behind>[万亿]?(?:[美日欧]元|元)?(?P<filter_unit1>[台只吨斤棵株页亩方条天]*))\s*[)\)]?))",
                           "front_m": "((?P<text_front_m>(?:[(\(]?\s*(?P<unit_front_m_before>[万亿]?(?:[美日欧]元|元))\s*[)\)]?)\s*[,,::]*(\s*[^壹贰叁肆伍陆柒捌玖拾佰仟萬億分万元编号时间日期计采a-zA-Z金额价格]{,2}?))(?P<money_front_m>\d{1,3}([,,]\d{3})+(\.\d+)?|\d+(\.\d+)?(?:,?)[百千]*)(?P<science_front_m>(E-?\d+))?())",
                           "behind_m": "(()()(?P<money_behind_m>\d{1,3}([,,]\d{3})+(\.\d+)?|\d+(\.\d+)?(?:,?)[百千]*)(?P<science_behind_m>(E-?\d+))?(人民币)?[\((]?(?P<unit_behind_m>[万亿]?(?:[美日欧]元|元)(?P<filter_unit3>[台个只吨斤棵株页亩方条米]*))[\))]?)"}
     # 2021/7/19 调整金额,单位提取正则,修复部分金额因为单位提取失败被过滤问题。  20240415 调整front_m 修复 详见合同元,合同金额:378.8万元 提取
@@ -3251,6 +3267,8 @@ def get_money_entity(sentence_text, found_yeji, in_attachment=False):
     # print('all_match:', all_match)
     for _match in all_match:
         # print('_match: ', _match.group())
+        if re.search('^元/1\d{10},$', _match.group(0)): # 修复 495042766 现场负责人 姚元 / 13488160460 预测为金额
+            continue
         if len(_match.group()) > 0:
             # print("===",_match.group())
             # # print(_match.groupdict())
@@ -3332,6 +3350,8 @@ def get_money_entity(sentence_text, found_yeji, in_attachment=False):
                 elif re.search('^[-—]+[\d,.]+万元', sentence_text[end_index:]):
                     # print('两个金额连接后面的有单位,用后面单位')
                     unit = '万元'
+                elif re.search('^,?(价格币种:\w{2,3},)?价格单位:万元', sentence_text[end_index:]): # 修复494731937金额单位缺漏 中标价格:39501.094425,价格币种:人民币,价格单位:万元,
+                    unit = '万元'
                 elif re.search('([单报标限总造]价款?|金额|租金|(中标|成交|合同|承租|投资|控制|拦标))?[价额]|价格|预算(金额)?|(监理|设计|勘察)(服务)?费)(小写)?[::为]*-?$', text_beforeMoney.strip()) and re.search('^0|1[3|4|5|6|7|8|9]\d{9}', entity_text) == None:  # 修复
                     if re.search('^[\d,,.]+$', entity_text) and float(re.sub('[,,]', '', entity_text))<500 and re.search('万元', sentence_text):
                         unit = '万元'
@@ -3456,7 +3476,7 @@ def get_preprocessed_entitys(list_sentences,useselffool=True,cost_time=dict()):
     '''
 
     list_entitys = []
-    not_extract_roles = ['黄埔军校', '国有资产管理处', '五金建材', '铝合金门窗', '华电XX发电有限公司', '华电XXX发电有限公司'] # 需要过滤掉的企业单位
+    not_extract_roles = ['黄埔军校', '国有资产管理处', '五金建材', '铝合金门窗', '华电XX发电有限公司', '华电XXX发电有限公司', '中标(成交)公司'] # 需要过滤掉的企业单位
     for list_sentence in list_sentences:
         sentences = []
         list_entitys_temp = []
@@ -3549,7 +3569,7 @@ def get_preprocessed_entitys(list_sentences,useselffool=True,cost_time=dict()):
                 entity_type = ner_entity[2]
                 entity_text = ner_entity[3]
 
-                if entity_type == 'location' and re.search('^\w{2,4}[市县]\w{3,15}(中心|监狱|殡仪馆)$', entity_text) and \
+                if entity_type == 'location' and re.search('^\w{2,4}[市县]\w{2,15}(中心|监狱|殡仪馆|水利站)$', entity_text) and \
                     re.search('\d[楼层号]', entity_text)==None: # 2024/06/07 修改错误地址实体为角色
                     entity_type = 'org'
 

+ 107 - 11
BiddingKG/dl/interface/extract.py

@@ -27,6 +27,8 @@ import BiddingKG.dl.complaint.punish_predictor as punish_rule
 import json
 from BiddingKG.dl.money.re_money_total_unit import extract_total_money, extract_unit_money
 from BiddingKG.dl.ratio.re_ratio import extract_ratio
+from BiddingKG.dl.interface.outline_extractor import ParseDocument, extract_parameters, extract_sentence_list
+from BiddingKG.dl.interface.get_label_dic import get_all_label
 
 
 # 自定义jsonEncoder
@@ -43,7 +45,31 @@ class MyEncoder(json.JSONEncoder):
             return obj
         return json.JSONEncoder.default(self, obj)
 
-def extractCount(extract_dict):
+def get_login_web_set():
+
+    file = os.path.join(os.path.dirname(__file__),"login_weblist.txt")
+    list_web = []
+    try:
+        if os.path.exists(file):
+            with open(file,"r",encoding="utf8") as f:
+                while 1:
+                    line = f.readline()
+                    if not line:
+                        break
+                    line = line.strip()
+                    if line:
+                        list_web.append(line)
+
+    except Exception as e:
+        traceback.print_exc()
+    _set = set(list_web)
+    log("get_login_web_set length %d"%(len(_set)))
+    return _set
+
+set_login_web = get_login_web_set()
+
+
+def extractCount(extract_dict,page_attachments,web_source_name):
     # time_pattern = "\d{4}\-\d{2}\-\d{2}.*"
 
     if len(extract_dict):
@@ -113,6 +139,42 @@ def extractCount(extract_dict):
         extract_count += 1
     if project_name!="":
         extract_count += 1
+
+    if page_attachments is not None and page_attachments!='':
+        try:
+            _attachments = json.loads(page_attachments)
+            has_zhaobiao = False
+            has_qingdan = False
+            if len(_attachments)>0:
+                for _atta in _attachments:
+                    classification = _atta.get("classification","")
+                    if str(classification)=='招标文件':
+                        has_zhaobiao = True
+                    if str(classification)=='采购清单':
+                        has_qingdan = True
+            if has_zhaobiao:
+                extract_count += 3
+            if has_qingdan:
+                extract_count += 2
+        except Exception as e:
+            traceback.print_exc()
+            pass
+
+    list_approval_dict = _extract.get("approval",[])
+    for _dict in list_approval_dict:
+        for k,v in _dict.items():
+            if v is not None and v!='' and v!="未知":
+                extract_count += 1
+
+
+    punish_dict = _extract.get("punish",{})
+    for k,v in punish_dict.items():
+        if v is not None and v!='' and v!="未知":
+            extract_count += 1
+
+    if web_source_name in set_login_web:
+        extract_count -= 1
+
     return extract_count
 
 # 字符编码标准化
@@ -176,7 +238,7 @@ def repair_entity(prem,district_dict,list_articles):
                             role['role_text'] = city + role_text
 
 
-def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="",original_docchannel='',**kwargs):
+def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="",original_docchannel='',page_attachments='[]',**kwargs):
     cost_time = dict()
 
     start_time = time.time()
@@ -188,6 +250,14 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
     cost_time["preprocess"] = round(time.time()-start_time,2)
     cost_time.update(_cost_time)
 
+    '''大纲提取及大纲内容相关提取'''
+    sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
+    parse_document = ParseDocument(text, True,list_obj=sentence2_list)
+    requirement_text, aptitude_text, addr_bidopen_text = extract_parameters(parse_document, list_articles[0].content)
+    if sentence2_list_attach!=[] and requirement_text == '' and aptitude_text == '' and addr_bidopen_text=="":
+        parse_document = ParseDocument(text, True, list_obj=sentence2_list_attach)
+        requirement_text, aptitude_text, addr_bidopen_text = extract_parameters(parse_document, list_articles[0].content)
+
     # 过滤掉Redis里值为0的错误实体
     # list_entitys[0] = entityLink.enterprise_filter(list_entitys[0])
     # #依赖句子顺序
@@ -268,7 +338,7 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
     start_time = time.time()  # 实体链接
     entityLink.link_entitys(list_entitys)
     doctitle_refine = entityLink.doctitle_refine(title)
-    nlp_enterprise,nlp_enterprise_attachment = entityLink.get_nlp_enterprise(list_entitys[0])
+    nlp_enterprise,nlp_enterprise_attachment, dict_enterprise = entityLink.get_nlp_enterprise(list_entitys[0])
     prem = getAttributes.getPREMs(list_sentences,list_entitys,list_articles,list_outlines,page_time)
     log("get attributes done of doc_id%s"%(doc_id))
     cost_time["attrs"] = round(time.time()-start_time,2)
@@ -289,12 +359,6 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
     '''获取联合体信息'''
     getAttributes.get_win_joint(prem, list_entitys, list_sentences, list_articles)
 
-    #暂时不执行
-    # start_time = time.time() #失信数据要素提取
-    # list_punish_dic = predictor.getPredictor("punish").get_punish_extracts(list_articles,list_sentences, list_entitys)
-    # cost_time["punish"] = round(time.time()-start_time,2)
-
-
     '''修正采购公告表格形式多种采购产品中标价格;中标金额小于所有产品总金额则改为总金额'''
     getAttributes.correct_rolemoney(prem, total_product_money, list_articles)
 
@@ -363,15 +427,32 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
     log("pb_extract done of doc_id%s"%(doc_id))
     cost_time["pb_extract"] = round(time.time() - start_time, 2)
 
+    '''打标签'''
+    label_dic = get_all_label(title, list_articles[0].content)
+
     # data_res = Preprocessing.union_result(Preprocessing.union_result(codeName, prem),list_punish_dic)[0]
     # data_res = Preprocessing.union_result(Preprocessing.union_result(Preprocessing.union_result(codeName, prem),list_punish_dic), list_channel_dic)[0]
-    version_date = {'version_date': '2024-06-18'}
+    version_date = {'version_date': '2024-07-26'}
     data_res = dict(codeName[0], **prem[0], **channel_dic, **product_attrs[0], **product_attrs[1], **payment_way_dic, **fail_reason, **industry, **district, **candidate_dic, **version_date, **all_moneys, **pb_json)
 
     if original_docchannel == 302:
         approval = predictor.getPredictor("approval").predict(list_sentences, list_entitys)
         data_res['approval'] = approval
 
+    if channel_dic['docchannel']['doctype'] == '处罚公告': # 20240627 处罚公告进行失信要素提取
+        start_time = time.time() #失信数据要素提取
+        punish_dic = predictor.getPredictor("punish").get_punish_extracts(list_articles,list_sentences, list_entitys)
+        cost_time["punish"] = round(time.time()-start_time,2)
+        data_res['punish'] = punish_dic
+        if "Project" in data_res['prem']:
+            for d in data_res['prem']['Project']['roleList']:
+                if d['role_name'] == 'tenderee' and d.get('role_prob', 0.6) < 0.6:  # 处罚公告 去掉低概率招标人
+                    data_res['prem']['Project']['roleList'] = [d for d in data_res['prem']['Project']['roleList'] if d['role_name'] != 'tenderee']
+                    break
+            if len(data_res['prem']['Project']['roleList']) == 0 and data_res['prem']['Project'].get('tendereeMoney', 0) in [0, '0']: # 删除空包
+                data_res['prem'].pop('Project')
+
+
     '''最终检查修正招标、中标金额'''
     getAttributes.limit_maximum_amount(data_res, list_entitys[0])
 
@@ -380,13 +461,28 @@ def predict(doc_id,text,title="",page_time="",web_source_no='',web_source_name="
     data_res["doctitle_refine"] = doctitle_refine
     data_res["nlp_enterprise"] = nlp_enterprise
     data_res["nlp_enterprise_attachment"] = nlp_enterprise_attachment
+    data_res["dict_enterprise"] = dict_enterprise
+
     # 要素的个数
-    data_res['extract_count'] = extractCount(data_res)
+    data_res['extract_count'] = extractCount(data_res,page_attachments,web_source_name)
     # 是否有表格
     data_res['exist_table'] = 1 if re.search("<td",text) else 0
     data_res["cost_time"] = cost_time
     data_res["success"] = True
 
+    # 拟在建需建索引字段
+    data_res["proportion"] = pb_json.get('pb').get('proportion', '')
+    data_res["pb_project_name"] = pb_json.get('pb').get('project_name_refind', '')
+
+    # 资质要求
+    data_res['aptitude'] = aptitude_text[:1500]
+    # 采购内容
+    data_res['requirement'] = requirement_text[:1500]
+    # 打标签
+    data_res['label_dic'] = label_dic
+    # 开标地点
+    data_res['addr_dic'] = {'addr_bidopen': addr_bidopen_text}
+
     # for _article in list_articles:
     #         log(_article.content)
     #

+ 14 - 6
BiddingKG/dl/interface/getAttributes.py

@@ -12,6 +12,7 @@ import os
 from scipy.optimize import linear_sum_assignment
 from BiddingKG.dl.interface.Entitys import Match
 import numpy as np
+import uuid
 import time,calendar
 from datetime import datetime
 
@@ -3951,7 +3952,7 @@ def getOtherAttributes(list_entity,page_time,prem):
             #     continue
             if re.search("[^之]日|天|年|月|周|星期", entity.entity_text) or re.search("\d{4}[-./]\d{1,2}", entity.entity_text):
                 list_serviceTime.append(entity)
-        elif entity.entity_type=="person" and entity.label ==4:
+        elif entity.entity_type=="person" and entity.label ==4 and entity.entity_text not in dict_other["person_review"]: # 20240624评审专家去重
             dict_other["person_review"].append(entity.entity_text)
         elif entity.entity_type=='product' and entity.entity_text not in dict_other["product"]: #顺序去重保留
             dict_other["product"].append(entity.entity_text)
@@ -4349,7 +4350,8 @@ def get_multi_winner_and_money(channel_dic, prem, list_entitys,list_sentences):
                 else:
                     moneys.append(money)
             if ent.entity_type in ['org', 'company'] and ent.label == 2 and ent.values[ent.label]>0.8:
-                multi_winner_l.append(ent.entity_text)
+                if ent.entity_text not in multi_winner_l:
+                    multi_winner_l.append(ent.entity_text)
                 sentence_text = sentences[ent.sentence_index].sentence_text
                 pre_text = sentence_text[max(0, b_idx_fr-10):b_idx_fr]
                 if re.search('入围', pre_text) and re.search('未入围', pre_text)==None and ent.entity_text not in finalists:
@@ -4361,13 +4363,15 @@ def get_multi_winner_and_money(channel_dic, prem, list_entitys,list_sentences):
                     if ent_bh.entity_type in ['org', 'company'] and ent_bh.label == 5 and ent_bh.sentence_index == ent.sentence_index and b_idx_bh-e_idx_fr==1:
                         sentence_text = sentences[ent_bh.sentence_index].sentence_text
                         if sentence_text[e_idx_fr:b_idx_bh] in [';','、','&',','] and (len(sentence_text)==e_idx_bh or sentence_text[e_idx_bh] in [';','、','&', ',', '。']): # 修复多中标人刚好在文末index超出报错,例子 407126558
-                            multi_winner_l.append(ent_bh.entity_text)
+                            if ent_bh.entity_text not in multi_winner_l:
+                                multi_winner_l.append(ent_bh.entity_text)
                             e_idx_fr = e_idx_bh
                             i = j + 1
                         else:
                             break
                     elif ent_bh.entity_type in ['org', 'company'] and ent_bh.label == 5 and ent_bh.sentence_index == ent.sentence_index and b_idx_bh==e_idx_fr:
-                        multi_winner_l.append(ent_bh.entity_text)
+                        if ent_bh.entity_text not in multi_winner_l:
+                            multi_winner_l.append(ent_bh.entity_text)
                         e_idx_fr = e_idx_bh
                         i = j + 1
                     else:
@@ -4379,7 +4383,8 @@ def get_multi_winner_and_money(channel_dic, prem, list_entitys,list_sentences):
                     for v in project.values():
                         for d in v['roleList']:
                             if d.get('role_name', '') == 'win_tenderer' and d.get('role_text', '') == multi_winner_l[0]:
-                                d['multi_winner'] = ','.join(set(multi_winner_l))
+                                # d['multi_winner'] = ','.join(set(multi_winner_l))
+                                d['multi_winner'] = ','.join(multi_winner_l)
                                 break
         if len(finalists)>=2:
             for project in prem[0].values():
@@ -4507,10 +4512,11 @@ def  confirm_prem(prem, channel_dic):
         other_winner = set()
         empty_roleList = []
         for k in prem:
+            prem[k]['uuid'] = str(uuid.uuid4()) # 20240627 每个包都添加uuid
             if prem[k]['roleList'] == []:
                 empty_roleList.append(k)
             for d in prem[k]['roleList']:
-                if d['role_name'] in ['win_tenderer', 'pre_win_tenderer']:
+                if d['role_name'] in ['win_tenderer', 'pre_win_tenderer', 'second_tenderer','third_tenderer']:
                     if k == 'Project':
                         pro_winner.add(d['role_text'])
                     else:
@@ -4522,6 +4528,8 @@ def  confirm_prem(prem, channel_dic):
         if other_winner and channel_dic['docchannel']['docchannel'] in ['中标信息', '候选人公示', '合同公告']:
             for k in empty_roleList:
                 prem.pop(k)
+    elif "Project" in prem:
+        prem['Project']['uuid'] = str(uuid.uuid4())
 
 
 def fix_single_source(prem, channel_dic, original_docchannel):

+ 272 - 0
BiddingKG/dl/interface/get_label_dic.py

@@ -0,0 +1,272 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+@author: bidikeji
+@time: 2024/7/23 14:45
+"""
+
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+@author: bidikeji
+@time: 2024/7/11 17:56
+"""
+from BiddingKG.dl.common.Utils import getUnifyMoney
+import re
+
+def chinese_to_arabic(s):
+    # 中文数字到阿拉伯数字的映射
+    num_map = {'零': 0, '一': 1, '二': 2, '两': 2, '三': 3, '四': 4,
+               '五': 5, '六': 6, '七': 7, '八': 8, '九': 9}
+    # 单位到倍数的映射
+    unit_map = {'十': 10, '百': 100}
+
+    # 初始化结果和当前数值
+    result = 0
+    current_num = 0
+    has_unit = False
+
+    if s.startswith('十'):
+        result = 10
+    # 遍历字符串
+    for char in s:
+        if char in num_map:
+            # 如果是数字,则进行处理
+            if has_unit:
+                # 如果之前已经有单位了,则需要将当前数字乘以前面的单位
+                result += current_num * unit_map[last_unit]
+                current_num = num_map[char]
+                has_unit = False
+            else:
+                # 如果之前没有单位,则直接累加
+                current_num = current_num * 10 + num_map[char]
+        elif char in unit_map:
+            # 如果是单位,则标记为已有单位,并保存最后一个单位
+            last_unit = char
+            has_unit = True
+
+            # 处理字符串末尾的数字(如果没有单位,则直接加上)
+    if current_num != 0:
+        if has_unit:
+            result += current_num * unit_map[last_unit]
+        else:
+            result += current_num
+
+    return result
+
+def get_all_label(title, content):
+    def is_direct_procurement():
+        # 企业直采
+        if re.search('询比价|询比|竞价|竞价|议价|报价', title) or re.search('我要报价|竞价起止时间|报价起止时间', content) or \
+                (re.search('公司|集团|企业', content) and re.search('招标|中标|投标', content) == None):
+            return 1
+        return 0
+
+    def is_target_small():
+        # 专门面向中小企业
+        if re.search('专门面向中小微?企业', content) and re.search('(非|不属于|不|是/否))?专门面向中小微?企业|部分面向中小微?企业', content) == None:
+            return 1
+        elif re.search('仅面向小微企业|专门面向.{,30}中小企业采购|是否专门面向中小微?企业(采购)?:是|本项目为中小型企业预留项目|专门面向中小微?企业', content):
+            return 1
+        elif re.search('落实政府采购政策需满足的资格要求.{,30}供应商为中小企业', content) and re.search('(非|不属于|不|是/否))?专门面向中小微?企业|部分面向中小微?企业',
+                                                                                content) == None:
+            return 1
+        return 0
+
+    def registered_years():
+        # 注册年限
+        ser = None
+        if re.search('禁止\w{,5}注册未满(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月))', content):
+            ser = re.search('禁止\w{,5}注册未满(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月))', content)
+        elif re.search('(成立|注册)时间:?\w{,10}(不[低少]于|大于(等于)?|需满)(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月))', content):
+            ser = re.search('(成立|注册)时间:?\w{,10}(不[低少]于|大于(等于)?|需满)(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月))',
+                            content)
+        elif re.search('(成立|注册)时间:?\w{,10}(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月)[或及]?以上)', content):
+            ser = re.search('(成立|注册)时间:?\w{,10}(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月)[或及]?以上)', content)
+        elif re.search('(成立|注册)时间:?\w{,10}不满(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月)\w{,5}请勿报价)', content):
+            ser = re.search('(成立|注册)时间:?\w{,10}不满(?P<num>([一二三四五六七八九十]+|\d+))(?P<unit>(年|个?月)\w{,5}请勿报价)', content)
+        if ser:
+            num = ser.group('num')
+            unit = ser.group('unit')
+            if num.isdigit():
+                num = int(num)
+            else:
+                num = chinese_to_arabic(num)
+            if unit == '年':
+                num *= 12
+            return num
+        return 0
+
+    def registered_capital():
+        # 注册资本
+        ser = None
+        if re.search('注册(资本|资金):?\w{,5}(不[低少]于|大于(等于)?|≥)(?P<num>(\d+[\d.]*))(?P<unit>([万亿]?元))', content):
+            ser = re.search('注册(资本|资金):?\w{,5}(不[低少]于|大于(等于)?|≥)(?P<num>(\d+[\d.]*))(?P<unit>([万亿]?元))', content)
+        elif re.search('注册(资本|资金):?\w{,5}(?P<num>(\d+[\d.]*))(?P<unit>([万亿]?元)[或及]?以上)', content):
+            ser = re.search('注册(资本|资金):?\w{,5}(?P<num>(\d+[\d.]*))(?P<unit>([万亿]?元)[或及]?以上)', content)
+        if ser:
+            num = ser.group('num')
+            unit = ser.group('unit')
+            return float(getUnifyMoney(num + unit))
+        return 0
+
+    def need_qualification():
+        # 有资质证书要求
+        if re.search('资质要求.{,150}(行业资质|证书|许可证|认证|经营范围|一级|二级|三级|甲级|乙级|丙级|特级|壹级|贰级|叁级)', content):
+            return 1
+        elif re.search('(提供|有|具备)\w{,50}(资质|认证|证书|许可证)', content):
+            return 1
+        elif re.search('资格)?要求:?\w{,30}(甲级|丙级|乙级|一级|二级|三级|特级|壹级|贰级|叁级)', content):
+            return 1
+        elif re.search('认证体系要求', content):
+            return 1
+        return 0
+
+    def need_ca():
+        # 7 是否需要办CA
+        if re.search('需要\w{,20}数字证书|使用\w{,20}签章', content):
+            return 1
+        elif re.search('办理\w{,20}(数字证书|CA|ca)', content) and re.search('无需\w{,15}办理', content) == None:
+            return 1
+        elif re.search('(数字证书|CA|ca)\w{,5}办理|是否要求供应商使用(CA|ca)数字证书参与:是', content):
+            return 1
+        if re.search('(不使用|无需)\w{,20}(数字证书|CA|ca)|是否要求供应商使用(CA|ca)数字证书参与:不要求', content):
+            return 0
+        return 0
+
+    def need_performance():
+        # 有业绩要求
+        if re.search('业绩证明|业绩要求|行业业绩|相关业绩', content):
+            return 1
+        elif re.search('类似\w{,10}业绩', content) or re.search('业绩.{,5}如有', content) == None:
+            return 1
+        elif re.search('完成[^,。]{,100}项目', content):
+            return 1
+        elif re.search('(提供|有|完成).{,100}业绩', content):
+            return 1
+        return 0
+
+    def mode_of_partipation():
+        # 参与方式 1线上 2线下 0其他
+        if re.search('(平台|网站|http|www|官网|网址|网页|网上中介|邮件|邮箱|客户端|采购网|系统|邮寄).{,20}(注册|报名)', content):
+            return 1
+        elif re.search('现场报名', content):
+            if re.search(
+                    '非现场报名|(在线报名|邮件|邮寄|邮箱|线上报名|网络报名).{,10}现场报名|现场报名.{,10}(在线报名|邮件|邮寄|邮箱|线上报名|网络报名)', content) == None:
+                return 1
+            return 2
+        elif re.search('(获取采购文件|文件的获取|文件获取|获取竞价文件|获取招标文件|文件的领取|文件领取|获取投标文件).{,200}'
+                       '(平台|线上|客户端|邮寄|网上获取|网站|网址|http|www|邮箱|寄送|网络获取|采购网|网络领购|系统|邮件|在线报名|网络报名|非现场报名|线上报名)', content):
+            return 1
+        elif re.search('(获取采购文件|文件的获取|文件获取|获取竞价文件|获取招标文件|文件的领取|文件领取|获取投标文件).{,200}'
+                       '([\d一二三四五六七八九十]号|接待室|开标室|现场领取|会议室|线下购买|现场获取|X[\d一二三四五六七八九十]|办公楼|现场报名)', content):
+            return 2
+        elif re.search('(报价地址|报价信息|报价请点击|报价方法|报价式|报价方式|报价提交|报价地点).{,30}'
+                       '(平台|网站|http|www|官网|网址|网页|网上中介|邮件|邮箱|客户端|采购网|系统|在线报价|线上报价)', content):
+            return 1
+        elif re.search('线下报价', content) and re.search('不接受线下报价|线下报价无效', content) == None:
+            return 2
+        elif re.search('(文件提交|文件递交|递交方式|文件的提交|文件送达地点|递交响应文件|证明材料的递交)', content):
+            b = re.search('(文件提交|文件递交|递交方式|文件的提交|文件送达地点|递交响应文件|证明材料的递交)', content).end()
+            ser = re.search('联系方式|发布公告的媒介', content[b:b + 200])
+            text = content[b:b + ser.start()] if ser else content[b:b + 200]
+            if re.search('平台|线上|客户端|邮寄|网站|网址|http|www|邮箱|寄送|采购网|系统|邮件|网页', text):
+                return 1
+            elif re.search('[\d一二三四五六七八九十]号|接待室|现场递交|开标室|会议室|[\d一二三四五六七八九十]楼|线下递交|办公楼', text):
+                return 2
+        if re.search('(平台|线上|客户端|网站|网址|http|www|采购网|系统|网页).{,10}递交.{,10}文件', content):
+            return 1
+        if re.search('(开标地点|投标地点|开标时间和地点|开标时间及地点|开标方式)', content):
+            b = re.search('(开标地点|投标地点|开标时间和地点|开标时间及地点|开标方式)', content).end()
+            ser = re.search('联系方式|发布公告的媒介', content[b:b + 70])
+            text = content[b:b + ser.start()] if ser else content[b:b + 70]
+            if re.search(
+                    '平台|线上|客户端|网站|网址|http|www|线上开标|采购网|非现场开标|不见面开标|远程异地开启|系统|线上观看开标|网上开标|在线直播的方式开标|远程开标|现场开启|电子卖场|电子开标|开标现场电话联系',
+                    text):
+                return 1
+            elif re.search('[\d一二三四五六七八九十]号|线下开标|接待室|现场递交|开标室|现场开标|会议室|[\d一二三四五六七八九十]楼|办公楼|街道', text):
+                return 2
+        if re.search('(平台|线上|客户端|网站|网址|http|www|采购网|系统|网页).{,10}开标', content):
+            return 1
+        elif re.search('不见面开标|非现场开标|远程异地开启|线上观看开标|网上开标|在线直播的方式开标|远程开标|现场开启|非公开开启|电子开标|开标现场电话联系', content):
+            return 1
+        elif re.search('开启.{,20}地点', content):
+            b = re.search('开启.{,20}地点', content).end()
+            ser = re.search('联系方式|发布公告的媒介', content[b:b + 70])
+            text = content[b:b + ser.start()] if ser else content[b:b + 70]
+            if re.search(
+                    '平台|线上|客户端|网站|网址|http|www|线上开标|采购网|非现场开标|不见面开标|远程异地开启|系统|线上观看开标|网上开标|在线直播的方式开标|远程开标|电子卖场|电子开标|开标现场电话联系',
+                    text):
+                return 1
+            elif re.search('[\d一二三四五六七八九十]号|线下开标|接待室|现场递交|开标室|现场开标|会议室|[\d一二三四五六七八九十]楼|办公楼|街道', text):
+                return 2
+        return 0
+
+    def suitable_small():
+        # 适合小微企业投标
+        if re.search('属于专门面向中小企业|有招标单位联系方式|无注册年限要求|无注册资本要求|无资质证书要求|无业绩要求', content):
+            return 1
+        elif re.search('属于企业直采|有招标单位联系方式|无注册年限要求|无注册资本要求|无资质证书要求|无业绩要求', content):
+            return 2
+        elif re.search('有招标单位联系方式|无注册年限要求|无注册资本要求|无资质证书要求|无业绩要求', content):
+            return 3
+        return 0
+
+    label_dic = {}
+    is_direct_procurement = is_direct_procurement() # 是否直接采购
+    is_target_small = is_target_small() # 是否面向中小企业
+    mode_of_partipation = mode_of_partipation() # 参与方式
+    need_ca = need_ca() # 是否需要CA
+    need_performance = need_performance() # 有业绩要求
+    need_qualification = need_qualification() # 资质要求
+    registered_capital = registered_capital() # 注册资本
+    registered_years = registered_years() # 注册年限
+    suitable_small = suitable_small() # 适合小微企业
+
+    label_dic['is_direct_procurement'] = is_direct_procurement
+    label_dic['is_target_small'] = is_target_small
+    label_dic['mode_of_partipation'] = mode_of_partipation
+    label_dic['need_ca'] = need_ca
+    label_dic['need_performance'] = need_performance
+    label_dic['need_qualification'] = need_qualification
+    label_dic['registered_capital'] = registered_capital
+    label_dic['registered_years'] = registered_years
+    label_dic['suitable_small'] = suitable_small
+
+    label_dic = {k: v for k, v in label_dic.items() if v!=0}
+
+    return label_dic
+
+if __name__ == "__main__":
+    # with open('D:\html/2.html', 'r', encoding='UTF-8') as f:
+    #     html = f.read()
+    # rs = get_all_label('', html)
+    # print('rs: ', rs)
+
+    import pandas as pd
+    from bs4 import BeautifulSoup
+    import json
+
+    df = pd.read_csv(r'E:\channel分类数据\2022年每月两天数据/指定日期_html2022-12-10.csv')[:]
+    print(df.columns, len(df))
+    df.drop_duplicates(subset=['docchannel', 'web_source_name', 'exist_table'], inplace=True)
+    print(len(df))
+    def get_text(html):
+        soup = BeautifulSoup(html, 'lxml')
+        text = soup.get_text()
+        return text
+    df['content'] = df['dochtmlcon'].apply(lambda x: get_text(x))
+    df['标签'] = df.apply(lambda x: get_all_label(x['doctitle'], x['content']), axis=1)
+    df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
+    df = df[['docid', 'docchannel', 'web_source_name', 'exist_table', '标签']]
+    df.to_excel('E:/公告标签提取结果.xlsx', index=False)
+
+
+
+
+
+
+
+

+ 1247 - 0
BiddingKG/dl/interface/htmlparser.py

@@ -0,0 +1,1247 @@
+#coding:utf8
+
+import re
+
+# from BaseDataMaintenance.maintenance.product.productUtils import is_similar
+# from BiddingKG.dl.common.Utils import log
+import logging
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+def log(msg):
+    '''
+    @summary:打印信息
+    '''
+    logger.info(msg)
+
+from bs4 import BeautifulSoup
+import copy
+
+import Levenshtein
+def jaccard_score(source,target):
+    source_set = set([s for s in source])
+    target_set = set([s for s in target])
+    if len(source_set)==0 or len(target_set)==0:
+        return 0
+    return max(len(source_set&target_set)/len(source_set),len(source_set&target_set)/len(target_set))
+def judge_pur_chinese(keyword):
+    """
+    中文字符的编码范围为: u'\u4e00' -- u'\u9fff:只要在此范围内就可以判断为中文字符串
+    @param keyword:
+    @return:
+    """
+    # 定义一个需要删除的标点符号字符串列表
+    remove_chars = '[·’!"\#$%&\'()#!()*+,-./:;<=>?\@,:?¥★、….>【】[]《》?“”‘’\[\\]^_`{|}~]+'
+    # 利用re.sub来删除中文字符串中的标点符号
+    strings = re.sub(remove_chars, "", keyword)  # 将keyword中文字符串中remove_chars中包含的标点符号替换为空字符串
+    for ch in strings:
+        if u'\u4e00' <= ch <= u'\u9fff':
+            pass
+        else:
+            return False
+    return True
+
+def is_similar(source,target,_radio=None):
+    source = str(source).lower()
+    target = str(target).lower()
+    max_len = max(len(source),len(target))
+    min_len = min(len(source),len(target))
+
+    min_ratio = 90
+    if min_len>=3:
+        min_ratio = 87
+    if min_len>=5:
+        min_ratio = 85
+    if _radio is not None:
+        min_ratio = _radio
+    # dis_len = abs(len(source)-len(target))
+    # min_dis = min(max_len*0.2,4)
+    if min_len==0 and max_len>0:
+        return False
+    if max_len<=2:
+        if source==target:
+            return True
+    if min_len<2:
+        return False
+    #判断相似度
+    similar = Levenshtein.ratio(source,target)*100
+    if similar>=min_ratio:
+        log("%s and %s similar_jaro %d"%(source,target,similar))
+        return True
+    similar_jaro = Levenshtein.jaro(source,target)
+    if similar_jaro*100>=min_ratio:
+        log("%s and %s similar_jaro %d"%(source,target,similar_jaro*100))
+        return True
+    similar_jarow = Levenshtein.jaro_winkler(source,target)
+    if similar_jarow*100>=min_ratio:
+        log("%s and %s similar_jaro %d"%(source,target,similar_jarow*100))
+        return True
+
+    if min_len>=5:
+        if len(source)==max_len and str(source).find(target)>=0:
+                return True
+        elif len(target)==max_len and target.find(source)>=0:
+                return True
+        elif jaccard_score(source, target)==1 and judge_pur_chinese(source) and judge_pur_chinese(target):
+            return True
+    return False
+
+end_pattern = "商务要求|评分标准|商务条件|商务条件"
+_param_pattern = "(产品|技术|清单|配置|参数|具体|明细|项目|招标|货物|服务|规格|工作|具体)[及和与]?(指标|配置|条件|要求|参数|需求|规格|条款|名称及要求)|配置清单|(质量|技术).{,10}要求|验收标准|^(参数|功能)$"
+meter_pattern = "[><≤≥±]\d+|\d+(?:[μucmkK微毫千]?[米升LlgGmMΩ]|摄氏度|英寸|度|天|VA|dB|bpm|rpm|kPa|mol|cmH20|%|°|Mpa|Hz|K?HZ|℃|W|min|[*×xX])|[*×xX]\d+|/min|\ds[^a-zA-Z]|GB.{,20}标准|PVC|PP|角度|容积|色彩|自动|流量|外径|轴位|折射率|帧率|柱镜|振幅|磁场|镜片|防漏|强度|允差|心率|倍数|瞳距|底座|色泽|噪音|间距|材质|材料|表面|频率|阻抗|浓度|兼容|防尘|防水|内径|实时|一次性|误差|性能|距离|精确|温度|超温|范围|跟踪|对比度|亮度|[横纵]向|均压|负压|正压|可调|设定值|功能|检测|高度|厚度|宽度|深度|[单双多]通道|效果|指数|模式|尺寸|重量|峰值|谷值|容量|寿命|稳定性|高温|信号|电源|电流|转换率|效率|释放量|转速|离心力|向心力|弯曲|电压|功率|气量|国标|标准协议|灵敏度|最大值|最小值|耐磨|波形|高压|性强|工艺|光源|低压|压力|压强|速度|湿度|重量|毛重|[MLX大中小]+码|净重|颜色|[红橙黄绿青蓝紫]色|不锈钢|输入|输出|噪声|认证|配置"
+not_meter_pattern = "投标报价|中标金额|商务部分|公章|分值构成|业绩|详见|联系人|联系电话|合同价|金额|采购预算|资金来源|费用|质疑|评审因素|评审标准|商务资信|商务评分|专家论证意见|评标方法|代理服务费|售后服务|评分类型|评分项目|预算金额|得\d+分|项目金额|详见招标文件|乙方"
+
+
+def getTrs(tbody):
+    #获取所有的tr
+    trs = []
+    if tbody.name=="table":
+        body = tbody.find("tbody",recursive=False)
+        if body is not None:
+            tbody = body
+    objs = tbody.find_all(recursive=False)
+    for obj in objs:
+        if obj.name=="tr":
+            trs.append(obj)
+        if obj.name=="tbody" or obj.name=="table":
+            for tr in obj.find_all("tr",recursive=False):
+                trs.append(tr)
+    return trs
+
+def fixSpan(tbody):
+    # 处理colspan, rowspan信息补全问题
+    #trs = tbody.findChildren('tr', recursive=False)
+
+    trs = getTrs(tbody)
+    ths_len = 0
+    ths = list()
+    trs_set = set()
+    #修改为先进行列补全再进行行补全,否则可能会出现表格解析混乱
+    # 遍历每一个tr
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有colspan 则补全同一行下一个位置
+            if 'colspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['colspan'])))!="":
+                    col = int(re.sub("[^0-9]","",str(td['colspan'])))
+                    if col<100 and len(td.get_text())<1000:
+                        td['colspan'] = 1
+                        for i in range(1, col, 1):
+                            td.insert_after(copy.copy(td))
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有rowspan 则补全下一行同样位置
+            if 'rowspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['rowspan'])))!="":
+                    row = int(re.sub("[^0-9]","",str(td['rowspan'])))
+                    td['rowspan'] = 1
+                    for i in range(1, row, 1):
+                        # 获取下一行的所有td, 在对应的位置插入
+                        if indtr+i<len(trs):
+                            tds1 = trs[indtr + i].findChildren(['td','th'], recursive=False)
+                            if len(tds1) >= (indtd) and len(tds1)>0:
+                                if indtd > 0:
+                                    tds1[indtd - 1].insert_after(copy.copy(td))
+                                else:
+                                    tds1[0].insert_before(copy.copy(td))
+                            elif indtd-2>0 and len(tds1) > 0 and len(tds1) == indtd - 1:  # 修正某些表格最后一列没补全
+                                tds1[indtd-2].insert_after(copy.copy(td))
+def getTable(tbody):
+    #trs = tbody.findChildren('tr', recursive=False)
+    fixSpan(tbody)
+    trs = getTrs(tbody)
+    inner_table = []
+    for tr in trs:
+        tr_line = []
+        tds = tr.findChildren(['td','th'], recursive=False)
+        if len(tds)==0:
+            tr_line.append([re.sub('\xa0','',tr.get_text()),0]) # 2021/12/21 修复部分表格没有td 造成数据丢失
+        for td in tds:
+            tr_line.append([re.sub('\xa0','',td.get_text()),0])
+            #tr_line.append([td.get_text(),0])
+        inner_table.append(tr_line)
+    return inner_table
+
+class Sentence2():
+    def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
+        self.name = 'sentence2'
+        self.text = text
+        self.sentence_index = sentence_index
+        self.wordOffset_begin = wordOffset_begin
+        self.wordOffset_end = wordOffset_end
+
+    def get_text(self):
+        return self.text
+
+class ParseDocument():
+
+    def __init__(self,_html,auto_merge_table=True,list_obj = []):
+        if _html is None:
+            _html = ""
+        self.html = _html
+        self.auto_merge_table = auto_merge_table
+
+        if list_obj:
+            self.list_obj = list_obj
+        else:
+            self.soup = BeautifulSoup(self.html, "lxml")
+            _body = self.soup.find("body")
+            if _body is not None:
+                self.soup = _body
+            self.list_obj = self.get_soup_objs(self.soup)
+
+            # self.list_obj = [it.get_text().strip().replace(' ', '') for it in self.list_obj]
+            # self.list_obj = [Sentence2(text, 1,1,5) for text in self.list_obj]
+
+        # for obj in self.list_obj:
+        #     print("obj",obj.get_text()[:20])
+
+        self.tree = self.buildParsetree(self.list_obj,[],auto_merge_table)
+
+
+        # #识别目录树
+        # if self.parseTree:
+        #     self.parseTree.printParseTree()
+        # self.print_tree(self.tree,"-|")
+
+    def get_soup_objs(self,soup,list_obj=None):
+        if list_obj is None:
+            list_obj = []
+        childs = soup.find_all(recursive=False)
+        for _obj in childs:
+            childs1 = _obj.find_all(recursive=False)
+            if len(childs1)==0 or len(_obj.get_text())<40 or _obj.name=="table":
+                list_obj.append(_obj)
+            elif _obj.name=="p":
+                list_obj.append(_obj)
+            else:
+                self.get_soup_objs(_obj,list_obj)
+        return list_obj
+
+    def fix_tree(self,_product):
+        products = extract_products(self.tree,_product)
+        if len(products)>0:
+            self.tree = self.buildParsetree(self.list_obj,products,self.auto_merge_table)
+
+    def print_tree(self,tree,append=""):
+        self.set_tree_id = set()
+        if append=="":
+            for t in tree:
+                logger.debug("%s text:%s title:%s title_text:%s before:%s after%s product:%s"%("==>",t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"]))
+
+        for t in tree:
+            _id = id(t)
+            if _id in self.set_tree_id:
+                continue
+            self.set_tree_id.add(_id)
+            logger.info("%s text:%s title:%s title_text:%s before:%s after%s product:%s"%(append,t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"]))
+            childs = t["child_title"]
+            self.print_tree(childs,append=append+"-|")
+
+    def is_title_first(self,title):
+        if title in ("一","1","Ⅰ","a","A"):
+            return True
+        return False
+
+    def find_title_by_pattern(self,_text,_pattern="(^|★|▲|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册包标部.::、、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_3>(?P<title_3_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>[、章册包标部.::、、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_4>(?P<title_4_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_5>(?P<title_5_index_0_0>^)(?P<title_5_index_1_1>[一二三四五六七八九十]+)(?P<title_5_index_2_0>)[^一二三四五六七八九十节章册部\.::、、])|" \
+                                             "([\s★▲\*]*)(?P<title_12>(?P<title_12_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_12_index_1_1>\d{1,2})(?P<title_12_index_2_0>[\..、\s\-]?))|"\
+                                             "([\s★▲\*]*)(?P<title_11>(?P<title_11_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_10>(?P<title_10_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_7>(?P<title_7_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..包标::、\s\-]*))|" \
+                                             "(^[\s★▲\*]*)(?P<title_6>(?P<title_6_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-包标]*))|" \
+                                             "([\s★▲\*]*)(?P<title_15>(?P<title_15_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>[))包标\..::、]+))|" \
+                                             "([\s★▲\*]+)(?P<title_17>(?P<title_17_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_17_index_1_1>[a-zA-Z]+)(?P<title_17_index_2_0>[))包标\..::、]+))|" \
+                                             "([\s★▲\*]*)(?P<title_19>(?P<title_19_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>[))]))"
+                              ):
+        _se = re.search(_pattern,_text)
+        groups = []
+        if _se is not None:
+            e = _se.end()
+            if re.search('(时间|日期|编号|账号|号码|手机|价格|\w价|人民币|金额|得分|分值|总分|满分|最高得|扣|减)[::]?\d', _se.group(0)) or (re.search('\d[.::]?$', _se.group(0)) and re.search('^[\d年月日万元天]', _text[e:])):
+                return None
+            _gd = _se.groupdict()
+            for k,v in _gd.items():
+                if v is not None:
+                    groups.append((k,v))
+        if len(groups):
+            # groups.sort(key=lambda x:x[0])
+            return groups
+        return None
+
+    def make_increase(self,_sort,_title,_add=1):
+        if len(_title)==0 and _add==0:
+            return ""
+        if len(_title)==0 and _add==1:
+            return _sort[0]
+        _index = _sort.index(_title[-1])
+        next_index = (_index+_add)%len(_sort)
+        next_chr = _sort[next_index]
+        if _index==len(_sort)-1:
+            _add = 1
+        else:
+            _add = 0
+        return next_chr+self.make_increase(_sort,_title[:-1],_add)
+
+
+    def get_next_title(self,_title):
+        if re.search("^\d+$",_title) is not None:
+            return str(int(_title)+1)
+        if re.search("^[一二三四五六七八九十百]+$",_title) is not None:
+            if _title[-1]=="十":
+                return _title+"一"
+            if _title[-1]=="百":
+                return _title+"零一"
+
+            if _title[-1]=="九":
+                if len(_title)==1:
+                    return "十"
+                if len(_title)==2:
+                    if _title[0]=="十":
+                        return "二十"
+                if len(_title)==3:
+                    if _title[0]=="九":
+                        return "一百"
+                    else:
+                        _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title[0]))
+                        return _next_title+"十"
+
+            _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title))
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            if _next_title[-1]!="十":
+                if len(_next_title)>=2:
+                    _next_title.insert(-1,'十')
+            if len(_next_title)>=4:
+                _next_title.insert(-3,'百')
+            if _title[0]=="十":
+                if _next_title=="十":
+                    _next_title = ["二","十"]
+                _next_title.insert(0,"十")
+            _next_title = "".join(_next_title)
+            return _next_title
+        if re.search("^[a-z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('a')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[A-Z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('A')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$",_title) is not None:
+            _sort = ["Ⅰ","Ⅱ","Ⅲ","Ⅳ","Ⅴ","Ⅵ","Ⅶ","Ⅷ","Ⅸ","Ⅹ","Ⅺ","Ⅻ"]
+            _index = _sort.index(_title)
+            if _index<len(_sort)-1:
+                return _sort[_index+1]
+            return None
+
+    def count_title_before(self,list_obj):
+        dict_before = {}
+        dict_sentence_count = {}
+        illegal_sentence = set()
+        for obj_i in range(len(list_obj)):
+            obj = list_obj[obj_i]
+            _type = "sentence"
+            _text = obj.text.strip()
+            if obj.name=="table":
+                _type = "table"
+                _text = str(obj)
+            _append = False
+
+
+            if _type=="sentence":
+                if len(_text)>10 and len(_text)<100:
+                    if _text not in dict_sentence_count:
+                        dict_sentence_count[_text] = 0
+                    dict_sentence_count[_text] += 1
+                    if re.search("\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+                elif len(_text)<10:
+                    if re.search("第\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+
+                sentence_groups = self.find_title_by_pattern(_text[:10])
+                if sentence_groups:
+                    # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups)
+                    sentence_title = sentence_groups[0][0]
+                    sentence_title_text = sentence_groups[0][1]
+                    title_index = sentence_groups[-2][1]
+                    title_before = sentence_groups[1][1].replace("(","(").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
+                    title_after = sentence_groups[-1][1].replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
+                    next_index = self.get_next_title(title_index)
+                    if title_before not in dict_before:
+                        dict_before[title_before] = 0
+                    dict_before[title_before] += 1
+
+        for k,v in dict_sentence_count.items():
+            if v>10:
+                illegal_sentence.add(k)
+        return dict_before,illegal_sentence
+
+    def is_page_no(self,sentence):
+        if len(sentence)<10:
+            if re.search("\d+页|^\-\d+\-$",sentence) is not None:
+                return True
+
+    def block_tree(self,childs):
+        for child in childs:
+
+            if not child["block"]:
+                child["block"] = True
+                childs2 = child["child_title"]
+                self.block_tree(childs2)
+
+
+    def buildParsetree(self,list_obj,products=[],auto_merge_table=True):
+
+        self.parseTree = None
+        trees = []
+        list_length = []
+        for obj in list_obj[:200]:
+            if obj.name!="table":
+                list_length.append(len(obj.get_text()))
+        if len(list_length)>0:
+            max_length = max(list_length)
+        else:
+            max_length = 40
+        max_length = min(max_length,40)
+
+        logger.debug("%s:%d"%("max_length",max_length))
+
+
+        list_data = []
+        last_table_index = None
+        last_table_columns = None
+        last_table = None
+        dict_before,illegal_sentence = self.count_title_before(list_obj)
+        for obj_i in range(len(list_obj)):
+            obj = list_obj[obj_i]
+
+            # logger.debug("==obj %s"%obj.text[:20])
+
+            _type = "sentence"
+            _text = standard_product(obj.text)
+            if obj.name=="table":
+                _type = "table"
+                _text = standard_product(str(obj))
+            _append = False
+            sentence_title = None
+            sentence_title_text = None
+            sentence_groups = None
+            title_index = None
+            next_index = None
+            parent_title = None
+            title_before = None
+            title_after = None
+            title_next = None
+            childs = []
+            # new
+            sentence_index = obj.sentence_index
+            wordOffset_begin = obj.wordOffset_begin
+            wordOffset_end = obj.wordOffset_end
+
+            list_table = None
+            block = False
+
+            has_product = False
+
+            if _type=="sentence":
+                if _text in illegal_sentence:
+                    continue
+
+
+                sentence_groups = self.find_title_by_pattern(_text[:10])
+                if sentence_groups:
+                    title_before = standard_title_context(sentence_groups[1][1])
+                    title_after = sentence_groups[-1][1]
+                    sentence_title_text = sentence_groups[0][1]
+                    other_text = _text.replace(sentence_title_text,"")
+                    if (title_before in dict_before and dict_before[title_before]>1) or title_after!="":
+                        sentence_title = sentence_groups[0][0]
+
+                        title_index = sentence_groups[-2][1]
+                        next_index = self.get_next_title(title_index)
+
+                        other_text = _text.replace(sentence_title_text,"")
+
+                        for p in products:
+                            if other_text.strip()==p.strip():
+                                has_product = True
+
+                    else:
+                        _fix = False
+
+                        for p in products:
+                            if other_text.strip()==p.strip():
+                                title_before = "=产品"
+                                sentence_title = "title_0"
+                                sentence_title_text = p
+                                title_index = "0"
+                                title_after = "产品="
+                                next_index = "0"
+                                _fix = True
+                                has_product = True
+                                break
+                        if not _fix:
+                            title_before = None
+                            title_after = None
+                            sentence_title_text = None
+                else:
+                    if len(_text)<40 and re.search(_param_pattern,_text) is not None:
+                        for p in products:
+                            if _text.find(p)>=0:
+                                title_before = "=产品"
+                                sentence_title = "title_0"
+                                sentence_title_text = p
+                                title_index = "0"
+                                title_after = "产品="
+                                next_index = "0"
+                                _fix = True
+                                has_product = True
+                                break
+
+            if _type=="sentence":
+                if sentence_title is None and len(list_data)>0 and list_data[-1]["sentence_title"] is not None and list_data[-1]["line_width"]>=max_length*0.6:
+                    list_data[-1]["text"] += _text
+                    list_data[-1]["line_width"] = len(_text)
+                    _append = True
+                elif sentence_title is None and len(list_data)>0 and _type==list_data[-1]["type"]:
+                    if list_data[-1]["line_width"]>=max_length*0.7:
+                        list_data[-1]["text"] += _text
+                        list_data[-1]["line_width"] = len(_text)
+                        _append = True
+
+            if _type=="table":
+                _soup = BeautifulSoup(_text,"lxml")
+                _table = _soup.find("table")
+                if _table is not None:
+                    list_table = getTable(_table)
+                    if len(list_table)==0:
+                        continue
+                    table_columns = len(list_table[0])
+
+                    if auto_merge_table:
+                        if last_table_index is not None and abs(obj_i-last_table_index)<=2 and last_table_columns is not None and last_table_columns==table_columns:
+                            if last_table is not None:
+                                trs = getTrs(_table)
+                                last_tbody = BeautifulSoup(last_table["text"],"lxml")
+                                _table = last_tbody.find("table")
+                                last_trs = getTrs(_table)
+                                _append = True
+
+                                for _line in list_table:
+                                    last_table["list_table"].append(_line)
+                                if len(last_trs)>0:
+                                    for _tr in trs:
+                                        last_trs[-1].insert_after(copy.copy(_tr))
+                                    last_table["text"] = re.sub("</?html>|</?body>","",str(last_tbody))
+
+                                last_table_index = obj_i
+                                last_table_columns = len(list_table[-1])
+
+
+            if not _append:
+                _data = {"type":_type, "text":_text,"list_table":list_table,"line_width":len(_text),"sentence_title":sentence_title,"title_index":title_index,
+                         "sentence_title_text":sentence_title_text,"sentence_groups":sentence_groups,"parent_title":parent_title,
+                         "child_title":childs,"title_before":title_before,"title_after":title_after,"title_next":title_next,"next_index":next_index,
+                         "block":block,"has_product":has_product,
+                         "sentence_index":sentence_index,"wordOffset_begin":wordOffset_begin,"wordOffset_end":wordOffset_end
+                        }
+
+                if _type=="table":
+                    last_table = _data
+                    last_table_index = obj_i
+                    if list_table:
+                        last_table_columns = last_table_columns = len(list_table[-1])
+
+                if sentence_title is not None:
+                    if len(list_data)>0:
+                        if self.is_title_first(title_index):
+                            for i in range(1,len(list_data)+1):
+                                _d = list_data[-i]
+                                if _d["sentence_title"] is not None:
+                                    _data["parent_title"] = _d
+                                    _d["child_title"].append(_data)
+                                    break
+                        else:
+                            _find = False
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
+                                        _data["parent_title"] = _d["parent_title"]
+                                        _d["title_next"] = _data
+                                        if len(_d["child_title"])>0:
+                                            _d["child_title"][-1]["title_next"] = ""
+                                            self.block_tree(_d["child_title"])
+                                        if _d["parent_title"] is not None:
+                                            _d["parent_title"]["child_title"].append(_data)
+                                        _find = True
+                                        break
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if i==1 and not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+                            title_before = standard_title_context(title_before)
+                            title_after = standard_title_context(title_after)
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
+                                    if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
+                                        _data["parent_title"] = _d["parent_title"]
+                                        _d["title_next"] = _data
+                                        if len(_d["child_title"])>0:
+                                            _d["child_title"][-1]["title_next"] = ""
+                                            self.block_tree(_d["child_title"])
+                                        if _d["parent_title"] is not None:
+                                            _d["parent_title"]["child_title"].append(_data)
+                                        _find = True
+                                        break
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        # self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+                            for i in range(1,min(len(list_data)+1,20)):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]):
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        # self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+
+                            if not _find:
+                                if len(list_data)>0:
+                                    for i in range(1,len(list_data)+1):
+                                        _d = list_data[-i]
+                                        if _d.get("sentence_title") is not None:
+                                            _data["parent_title"] = _d
+                                            _d["child_title"].append(_data)
+                                            break
+
+
+                else:
+                    if len(list_data)>0:
+                        for i in range(1,len(list_data)+1):
+                            _d = list_data[-i]
+                            if _d.get("sentence_title") is not None:
+                                _data["parent_title"] = _d
+                                _d["child_title"].append(_data)
+                                break
+
+                list_data.append(_data)
+
+        for _data in list_data:
+
+            childs = _data["child_title"]
+
+            for c_i in range(len(childs)):
+                cdata = childs[c_i]
+                if cdata["has_product"]:
+                    continue
+                else:
+                    if c_i>0:
+                        last_cdata = childs[c_i-1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+                    if c_i<len(childs)-1:
+                        last_cdata = childs[c_i+1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+            for c_i in range(len(childs)):
+                cdata = childs[len(childs)-1-c_i]
+                if cdata["has_product"]:
+                    continue
+                else:
+                    if c_i>0:
+                        last_cdata = childs[c_i-1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+                    if c_i<len(childs)-1:
+                        last_cdata = childs[c_i+1]
+                        if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
+                            cdata["has_product"] = True
+
+
+        return list_data
+
+
+def standard_title_context(_title_context):
+    return _title_context.replace("(","(").replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".").replace(".",".")
+
+def standard_product(sentence):
+    return sentence.replace("(","(").replace(")",")")
+
+def extract_products(list_data,_product,_param_pattern = "产品名称|设备材料|采购内存|标的名称|采购内容|(标的|维修|系统|报价构成|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品?|采购|物装|配件|资产|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|品目|^品名|气体|标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)[\))的]?([、\w]{,4}名称|内容|描述)|标的|标项|项目$|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品|物装|配件|资产|招标内容|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|菜名|^品目$|^品名$|^名称|^内容$"):
+    _product = standard_product(_product)
+    list_result = []
+    list_table_products = []
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+
+        if _type=="table":
+            list_table = _data["list_table"]
+            if list_table is None:
+                continue
+            _check = True
+            max_length = max([len(a) for a in list_table])
+            min_length = min([len(a) for a in list_table])
+            if min_length<max_length/2:
+                continue
+            list_head_index = []
+            _begin_index = 0
+            head_cell_text = ""
+            for line_i in range(len(list_table[:2])):
+                line = list_table[line_i]
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<10 and re.search(_param_pattern,cell_text) is not None and re.search("单价|数量|预算|限价|总价|品牌|规格|型号|用途|要求|采购量",line_text) is not None:
+                        _begin_index = line_i+1
+                        list_head_index.append(cell_i)
+
+            for line_i in range(len(list_table)):
+                line = list_table[line_i]
+                for cell_i in list_head_index:
+                    if cell_i>=len(line):
+                        continue
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    head_cell_text += cell_text
+
+            # print("===head_cell_text",head_cell_text)
+            if re.search("招标人|采购人|项目编号|项目名称|金额|^\d+$",head_cell_text) is not None:
+                list_head_index = []
+
+            for line in list_table:
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if cell_text is not None and _product is not None and len(cell_text)<len(_product)*10 and cell_text.find(_product)>=0 and re.search("单价|数量|总价|规格|品牌|型号|用途|要求|采购量",line_text) is not None:
+                        list_head_index.append(cell_i)
+
+            list_head_index = list(set(list_head_index))
+            if len(list_head_index)>0:
+                has_number = False
+                for cell_i in list_head_index:
+                    table_products = []
+
+                    for line_i in range(_begin_index,len(list_table)):
+                        line = list_table[line_i]
+
+                        for _i in range(len(line)):
+                            cell = line[_i]
+                            cell_text = cell[0]
+                            if re.search("^\d+$",cell_text) is not None:
+                                has_number = True
+
+                        if cell_i>=len(line):
+                            continue
+                        cell = line[cell_i]
+                        cell_text = cell[0]
+                        if re.search(_param_pattern,cell_text) is None or has_number:
+                            if re.search("^[\da-zA-Z]+$",cell_text) is None:
+                                table_products.append(cell_text)
+
+                    if len(table_products)>0:
+                        logger.debug("table products %s"%(str(table_products)))
+                        if min([len(x) for x in table_products])>0 and max([len(x) for x in table_products])<=30:
+                            if re.search("招标人|代理人|预算|数量|交货期|品牌|产地","".join(table_products)) is None:
+                                list_table_products.append(table_products)
+    _find = False
+    for table_products in list_table_products:
+        for _p in table_products:
+            if is_similar(_product,_p,90):
+                _find = True
+                logger.debug("similar table_products %s"%(str(table_products)))
+                list_result = list(set([a for a in table_products if len(a)>1 and len(a)<20 and re.search("费用|预算|合计|金额|万元|运费|^其他$",a) is None]))
+                break
+    if not _find:
+        for table_products in list_table_products:
+            list_result.extend(table_products)
+        list_result = list(set([a for a in list_result if len(a)>1 and len(a)<30 and re.search("费用|预算|合计|金额|万元|运费",a) is None]))
+    return list_result
+
+
+def get_childs(childs, max_depth=None):
+    list_data = []
+    for _child in childs:
+        list_data.append(_child)
+        childs2 = _child.get("child_title",[])
+
+        if len(childs2)>0 and (max_depth==None or max_depth>0):
+            for _child2 in childs2:
+                if max_depth != None:
+                    list_data.extend(get_childs([_child2], max_depth-1))
+                else:
+                    list_data.extend(get_childs([_child2], None))
+    return list_data
+
+def get_range_data_by_childs(list_data,childs):
+    range_data = []
+    list_child = get_childs(childs)
+    list_index = []
+    set_child = set([id(x) for x in list_child])
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _id = id(_data)
+        if _id in set_child:
+            list_index.append(_data_i)
+    if len(list_index)>0:
+        range_data = list_data[min(list_index):max(list_index)+1]
+    return range_data
+
+def get_correct_product(product,products):
+    list_data = []
+    for p in products:
+        is_sim = is_similar(product,p)
+        _d = {"product":p,"distance":abs(len(product)-len(p)),"is_sim":is_sim}
+        list_data.append(_d)
+    list_data.sort(key=lambda x:x["distance"])
+    for _d in list_data:
+        is_sim = _d["is_sim"]
+        if is_sim:
+            if len(_d["product"])>len(product) and _d["product"].find(product)>=0:
+                return product
+            return _d["product"]
+    return product
+
+def get_childs_text(childs,_product,products,is_begin=False,is_end=False):
+    _text = ""
+
+    end_next = False
+    for _child in childs:
+
+        child_text = _child.get("text")
+
+
+        if child_text.find(_product)>=0:
+            if not is_begin:
+                is_begin = True
+                if not end_next:
+                    if _child["sentence_title"] is not None and isinstance(_child["title_next"],dict) and _child["title_next"]["sentence_title"] is not None:
+                        end_next = True
+                        end_title = _child["title_next"]
+                        logger.debug("end_title %s "%end_title["text"])
+
+        logger.debug("%s-%s-%s"%("get_childs_text",child_text[:10],str(is_begin)))
+
+        for p in products:
+            if child_text.find(p)>=0 and is_similar(_product,p,90):
+                is_begin = True
+
+            if child_text.find(_product)<0  and not is_similar(_product,p,80) and  (child_text.find(p)>=0 or _child["has_product"]):
+                if is_begin:
+                    is_end = True
+                    logger.debug("%s-%s-%s"%("get_childs_text end",child_text[:10],p))
+                break
+        if re.search(end_pattern,child_text) is not None:
+            if is_begin:
+                is_end = True
+                logger.debug("%s-%s-%s"%("get_childs_text end",child_text[:10],str(is_end)))
+
+        if is_begin and is_end:
+            break
+
+        if is_begin:
+            _text += _child.get("text")+"\r\n"
+        childs2 = _child.get("child_title",[])
+
+
+        if len(childs2)>0:
+            for _child2 in childs2:
+                child_text,is_begin,is_end = get_childs_text([_child2],_product,products,is_begin)
+                if is_begin:
+                    _text += child_text
+                    if is_end:
+                        break
+
+        if end_next:
+            is_end = True
+
+    #     logger.debug("%s-%s-%s"%("get_childs_text1",_text,str(is_begin)))
+    # logger.debug("%s-%s-%s"%("get_childs_text2",_text,str(is_begin)))
+    return _text,is_begin,is_end
+
+def extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result,):
+    _data = list_data[_data_i]
+    childs = _data.get("child_title",[])
+    if len(childs)>0:
+        child_text,_,_ = get_childs_text([_data],_product,products)
+        if len(child_text)>0:
+            logger.info("extract_type by_tree child_text:%s"%child_text)
+            list_result.append(child_text)
+    if parent_title is not None:
+        child_text,_,_ = get_childs_text([parent_title],_product,products)
+        if len(child_text)>0:
+            logger.info("extract_type by_tree child_text:%s"%child_text)
+            list_result.append(child_text)
+
+        childs = parent_title.get("child_title",[])
+        if len(childs)>0:
+
+            range_data = get_range_data_by_childs(list_data[_data_i:],childs)
+            p_text = ""
+            _find = False
+            end_id = id(_data["title_next"]) if isinstance(_data["sentence_title"],dict) and _data["title_next"] is not None and _data["title_next"]["sentence_title"] is not None else None
+            for pdata in range_data:
+                ptext = pdata["text"]
+                for p in products:
+                    if ptext.find(_product)<0 and  (ptext.find(p)>=0 or pdata["has_product"]):
+                        _find = True
+                        break
+                if re.search(end_pattern,ptext) is not None:
+                    _find = True
+                if _find:
+                    break
+                if id(pdata)==end_id:
+                    break
+                p_text += ptext+"\r\n"
+            if len(p_text)>0:
+                logger.debug("extract_type by parent range_text:%s"%p_text)
+                list_result.append(p_text)
+                return True
+    return False
+
+
+def get_table_pieces(_text,_product,products,list_result,_find):
+    _soup = BeautifulSoup(_text,"lxml")
+    _table = _soup.find("table")
+    if _table is not None:
+        trs = getTrs(_table)
+        list_trs = []
+        for tr in trs:
+            tr_text = tr.get_text()
+            if tr_text.find(_product)>=0:
+                _find = True
+
+            logger.debug("%s-%s"%("table_html_tr",tr_text))
+            for p in products:
+                if _find and p!=_product and tr_text.find(p)>=0:
+                    _find = False
+                    break
+            if re.search(end_pattern,tr_text) is not None:
+                _find = False
+                break
+            if _find:
+                list_trs.append(tr)
+        if len(list_trs)>0:
+            table_html = "<table>%s</table>"%("\r\n".join([str(a) for a in list_trs]))
+            logger.debug("extract_type table slices %s"%(table_html))
+            list_result.append(table_html)
+
+def extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result):
+    _data = list_data[_data_i]
+    _text = _data["text"]
+    list_table = _data["list_table"]
+    parent_title = _data["parent_title"]
+    if list_table is not None:
+        _check = True
+        max_length = max([len(a) for a in list_table])
+        min_length = min([len(a) for a in list_table])
+        text_line_first = ",".join(a[0] for a in list_table[0])
+        if max_length>10:
+            if min_length<max_length/2:
+                return
+        last_data = list_data[_data_i-1]
+        _flag = False
+        if last_data["type"]=="sentence" and last_data["text"].find(_product)>=0:
+            logger.debug("last sentence find product %s-%s"%(_product,last_data["text"]))
+            _flag = True
+        # print(text_line_first,"text_line_first",re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0)
+        if re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0:
+            _flag = True
+        if _flag:
+            if len(products)==0:
+                logger.debug("extract_type whole table by param and product %s"%(_text))
+                list_result.append(_text)
+            else:
+                for p in products:
+                    if p!=_product and _text.find(p)>=0:
+                        logger.debug("extract_type add all table failed %s-%s"%(_product,p))
+                        _flag = False
+                        break
+                if _flag:
+                    logger.debug("extract_type add all table succeed")
+                    get_table_pieces(_text,_product,products,list_result,True)
+        else:
+            list_head_index = []
+            for line in list_table[:2]:
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<20 and re.search(_param_pattern,cell_text) is not None:
+                        list_head_index.append(cell_i)
+            list_head_index = list(set(list_head_index))
+            for line in list_table:
+                for cell in line:
+                    cell_text = cell[0]
+                    if len(cell_text)>50 and len(re.findall(meter_pattern,cell_text))>5 and cell_text.find(_product)>=0:
+                        _f = True
+                        for cell in line:
+                            if not _f:
+                                break
+                            cell_text = cell[0]
+                            for p in products:
+                                if cell_text.find(p)>=0 and p!=_product:
+                                    _f = False
+                                    break
+                        if _f:
+                            logger.debug("extract_type param column %s"%(cell_text))
+                            list_result.append(cell_text)
+                    if len(cell_text)<len(_product)*10 and str(cell_text).find(_product)>=0:
+                        for _index in list_head_index:
+                            if _index>=len(line):
+                                continue
+                            _cell = line[_index]
+                            if len(cell[0])>0:
+                                logger.info("%s-%s"%("extract_type add on table text:",_cell[0]))
+                                list_result.append(_cell[0])
+        if not _flag and (re.search(_param_pattern,_text) is not None or (parent_title is not None and re.search(_param_pattern,parent_title["text"]) is not None)) and _text.find(_product)>=0:
+            get_table_pieces(_text,_product,products,list_result,False)
+
+
+def extract_parameters_by_sentence(list_data,_data,_data_i,_product,products,list_result,is_project):
+    _text = _data["text"]
+    if _text.find(_product)>=0:
+        parent_title = _data.get("parent_title")
+        parent_text = ""
+        parent_parent_title = None
+        parent_parent_text = ""
+        parent_title_index = None
+        parent_parent_title_index = None
+        childs = get_childs([_data])
+
+        child_find = False
+        for c in childs:
+            if re.search(_param_pattern,c["text"]) is not None and len(c["text"])<30:
+                logger.debug("child text %s"%(c["text"]))
+                child_find = True
+                break
+
+        extract_text,_,_ = get_childs_text([_data],_product,products)
+        logger.debug("childs found extract_text %s %s"%(str(child_find),extract_text))
+        if child_find:
+            if len(extract_text)>0:
+                list_result.append(extract_text)
+        else:
+            limit_nums = len(_product)*2+5
+            if len(_product)<=3:
+                limit_nums += 6
+            if _text.find("数量")>=0:
+                limit_nums += 6
+            if len(_text)<=limit_nums and _data["sentence_title"] is not None:
+                if re.search(meter_pattern,extract_text) is not None:
+                    list_result.append(extract_text)
+            elif len(re.findall(meter_pattern,extract_text))>2:
+                list_result.append(extract_text)
+
+        if parent_title is not None:
+            parent_text = parent_title.get("text","")
+            parent_parent_title = parent_title.get("parent_title")
+            parent_title_index = parent_title["title_index"]
+            if parent_parent_title is not None:
+                parent_parent_text = parent_parent_title.get("text","")
+                parent_parent_title_index = parent_parent_title["title_index"]
+
+        _suit = False
+        if re.search(_param_pattern,_text) is not None and len(_text)<50:
+            _suit = True
+        if re.search(_param_pattern,parent_text) is not None and len(parent_text)<50:
+            _suit = True
+        if re.search(_param_pattern,parent_parent_text) is not None and len(parent_parent_text)<50:
+            _suit = True
+        if _suit:
+            logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
+            if not extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result):
+                logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
+                extract_parameters_by_tree(_product,products,list_data,_data_i,parent_parent_title,list_result)
+
+    if re.search(_param_pattern,_text) is not None and len(_text)<50:
+        childs = _data["child_title"]
+        if len(childs)>0:
+            extract_text,_,_ = get_childs_text([_data],_product,products)
+            if len(extract_text)>0:
+                logger.debug("extract_type param-product %s"%(extract_text))
+                list_result.append(extract_text)
+            elif is_project:
+                extract_text,_,_ = get_childs_text([_data],_product,products,is_begin=True)
+                if len(extract_text)>0 and re.search(meter_pattern,extract_text) is not None:
+                    logger.debug("extract_type sentence is_project param-product is product %s"%(extract_text))
+                    list_result.append(extract_text)
+
+def getBestProductText(list_result,_product,products):
+    list_result.sort(key=lambda x:len(re.findall(meter_pattern+"|"+'[::;;]|\d+[%A-Za-z]+',BeautifulSoup(x,"lxml").get_text())), reverse=True)
+
+    logger.debug("+++++++++++++++++++++")
+    for i in range(len(list_result)):
+        logger.debug("result%d %s"%(i,list_result[i]))
+    logger.debug("+++++++++++++++++++++")
+
+    for i in range(len(list_result)):
+        _result = list_result[i]
+        _check = True
+        _result_text = BeautifulSoup(_result,"lxml").get_text()
+        _search = re.search("项目编号[::]|项目名称[::]|联合体投标|开户银行",_result)
+        if _search is not None:
+            logger.debug("result%d error illegal text %s"%(i,str(_search)))
+            _check = False
+        if not (len(_result_text)<1000 and _result[:6]!="<table"):
+            for p in products:
+                if _result_text.find(p)>0 and not (is_similar(_product,p,80) or p.find(_product)>=0 or _product.find(p)>=0):
+                    logger.debug("result%d error product scoss %s"%(i,p))
+                    _check = False
+        if len(_result_text)<100:
+            if re.search(meter_pattern,_result_text) is None:
+                logger.debug("result%d error text min count"%(i))
+                _check = False
+        if len(_result_text)>5000:
+            if len(_result_text)>10000:
+                logger.debug("result%d error text max count"%(i))
+                _check = False
+            elif len(re.findall(meter_pattern,_result_text))<10:
+                logger.debug("result%d error text max count less meter"%(i))
+                _check = False
+
+        list_find = list(set(re.findall(meter_pattern,_result_text)))
+
+        not_list_find = list(set(re.findall(not_meter_pattern,_result_text)))
+        _count = len(list_find)-len(not_list_find)
+        has_num = False
+        for _find in list_find:
+            if re.search('[0-9a-zA-Z]',_find) is not None:
+                has_num = True
+                break
+        if not(_count>=2 and has_num or _count>=5):
+            logger.debug("result%d error match not enough"%(i))
+            _check = False
+
+        if _check:
+            return _result
+
+def format_text(_result):
+    list_result = re.split("\r|\n",_result)
+    _result = ""
+    for _r in list_result:
+        if len(_r)>0:
+            _result+="%s\n"%(_r)
+    _result = '<div style="white-space:pre">%s</div>'%(_result)
+    return _result
+
+def extract_product_parameters(list_data,_product):
+
+    list_result = []
+    _product = standard_product(_product.strip())
+    products = extract_products(list_data,_product)
+
+    _product = get_correct_product(_product,products)
+    logger.debug("all products %s-%s"%(_product,str(products)))
+    is_project = False
+    if re.search("项目名称|采购项目",_product) is not None:
+        is_project = True
+        
+    if len(products)==1 and is_similar(products[0],_product,90):
+        is_project = True
+    _find_count = 0
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+        if _type=="sentence":
+            if _text.find(_product)>=0:
+                _find_count += 1
+                if re.search("项目名称|采购项目",_text) is not None and re.search("等",_text) is not None:
+                    is_project = True
+            extract_parameters_by_sentence(list_data,_data,_data_i,_product,products,list_result,is_project)
+
+        elif _type=="table":
+            if _text.find(_product)>=0:
+                _find_count += 1
+            extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result)
+
+    _text = getBestProductText(list_result,_product,products)
+    return _text,_find_count
+
+
+if __name__ == '__main__':
+
+    filepath = "download/4597dcc128bfabc7584d10590ae50656.html"
+    _product = "彩色多普勒超声诊断仪"
+
+    _html = open(filepath, "r", encoding="utf8").read()
+
+    pd = ParseDocument(_html,False)
+
+    pd.fix_tree(_product)
+    list_data = pd.tree
+    pd.print_tree(list_data)
+
+    _text,_count = extract_product_parameters(list_data,_product)
+    logger.info("find count:%d"%(_count))
+    logger.info("extract_parameter_text::%s"%(_text))
+
+

+ 115 - 0
BiddingKG/dl/interface/login_weblist.txt

@@ -0,0 +1,115 @@
+金采网
+隆道-大企业采购平台
+海尔招标网
+中国船舶采购管理电子商务平台
+睿采网
+江阴热电有限公司采购电子平台
+深圳市国深房地产开发有限公司招采平台
+优采云
+益和电气电子采购平台
+亚泰电子招标采购平台
+浙江大有集团招投标采购平台
+山西晋龙集团
+天能重工
+北京市政路桥集团(股份)有限公司招标采购平台
+物集港商城
+中国中化控股有限责任公司电子商务平台
+华招医药网
+河源市万信招标代理有限公司
+欧贝 - 工业品供应链生态平台
+中金岭南阳光采购平台
+东方希望数字化采购平台
+宁波市北仑区大碶博平小学
+西电集团电子采购平台
+聚拍网
+智采招标代理(天津)有限公司
+智采医用耗材信息网
+中国兵工物资集团有限公司电子商务平台
+四川省工程建设项目审批管理系统
+首采云数字化采购平台
+鞍钢现货交易平台
+山钢股份莱芜分公司电子采购平台
+中国电建设备物资集中采购平台
+优质采云采购平台
+特乐意建材电商交易平台
+织巢鸟
+河北普阳钢铁集团网上招标管理平台
+河钢供应链管理平台
+南方水泥招采平台
+航天电子采购平台
+供应链数字化管理平台
+四川省投资项目在线审批监管平台
+盈峰环境
+军队自采平台
+陕西鼓风机(集团)有限公司电子采购系统
+易采平台
+得力集团
+浑源县政府采购电子卖场
+铁建商城
+中国工程物理研究院招投标信息网
+山西省招标投标协会
+浙江保利置业阳光招采平台
+旺采网
+广东省教育系统采购竞价平台
+数字云采
+龙成集团电子招标平台
+友云采
+邯郸市邯钢附属企业公司
+中国硫酸网
+云南江东房地产集团有限公司
+珍药采购招标信息化管理平台
+大地阳光采购平台
+江苏省中医院投标平台
+福建省船舶工业集团公司采购平台
+渤化易采平台
+八戒公采
+云端采购网
+中国航发网上商城
+晋能控股电力集团
+四川玄同工程项目管理有限责任公司
+物联宝
+畅采通招标采购网
+广西保利置业阳光招采平台
+智慧工厂在线
+中复神鹰碳纤维有限责任公司
+云采网
+中国兵器废旧物资处置平台
+中电环保科技公司电子采购平台
+中南锦时招采平台
+浙江中医药大学
+丝路汇采
+中铁鲁班商务网
+津水云采
+中国电子科技集团有限公司电子采购平台
+中国巨石股份有限公司
+龙蟒集团
+中集集装箱电子采购协同平台
+城轨采购网
+中国铁路招标网
+中电环保电子采购平台电子采购平台
+工程众创云平台
+中国石油电子招标投标网
+浪潮爱购云
+福建建工分包与劳务管理平台
+金正大集团电子采购平台
+冠洲集团电子采购系统
+浙江云采购中心平台
+华新阳光采购平台
+苏州市宇杰工程技术服务咨询有限公司
+中储粮服务网
+中国华电集团电子商务平台
+招商局集团电子招标采购交易网
+中国制造网采购平台
+钜商网
+中车购
+中建鸿腾招标与采购平台
+U材U建平台
+山西华鑫电子采购平台
+深圳保利阳光招采平台
+渤商网
+厦门航空采购平台
+山东农商行集中采购管理系统
+湖北保利投资阳光招采平台
+山东省采购与招标网
+政采云
+中国招标投标公共服务平台

+ 1 - 0
BiddingKG/dl/interface/modelFactory.py

@@ -103,6 +103,7 @@ class Model_role_classify_word():
         if re.search('(最终)?排名:', text) and re.search('(最终)?排名:第?[123一二三]', text)==None:
             text = re.sub('(最终)?排名:', '    ', text)
         text = re.sub('交易单位', '发布单位', text)
+        text = re.sub('[,:]各种数据:', ':', text) # 20240620优化 478331984 山东省交通运输厅站源提取不到 各种数据:中标单位,各种数据:济南金曰公路工程有限公司,
         return text.replace('(', '(').replace(')', ')').replace('單', '单').replace('稱','承').replace('標', '标').replace('採購', '采购').replace('機構', '机构')
 
     def encode_word(self, sentence_text, begin_index, end_index, size=20, **kwargs):

+ 279 - 0
BiddingKG/dl/interface/outline_extractor.py

@@ -0,0 +1,279 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+"""
+@author: bidikeji
+@time: 2024/7/19 10:05
+"""
+import re
+from BiddingKG.dl.interface.htmlparser import ParseDocument,get_childs
+
+class Sentence2():
+    def __init__(self,text,sentence_index,wordOffset_begin,wordOffset_end):
+        self.name = 'sentence2'
+        self.text = text
+        self.sentence_index = sentence_index
+        self.wordOffset_begin = wordOffset_begin
+        self.wordOffset_end = wordOffset_end
+
+    def get_text(self):
+        return self.text
+
+def extract_sentence_list(sentence_list):
+    new_sentence2_list = []
+    new_sentence2_list_attach = []
+    for sentence in sentence_list:
+        sentence_index = sentence.sentence_index
+        sentence_text = sentence.sentence_text
+        begin_index = 0
+        end_index = 0
+        for it in re.finditer('([^一二三四五六七八九十,。][一二三四五六七八九十]{1,3}|[^\d,。]\d{1,2}(\.\d{1,2}){,2})、', sentence_text): # 例:289699210 1、招标内容:滑触线及配件2、招标品牌:3、参标供应商经营形式要求:厂家4、参标供应商资质要求:5、
+            temp = it.group(0)
+            sentence_text = sentence_text.replace(temp, temp[0] + ',' + temp[1:])
+        for item in re.finditer('[,。;;!!?]+', sentence_text): # 20240725去掉英文问号,避免网址被分隔
+            end_index = item.end()
+            # if end_index!=len(sentence_text):
+            #     # if end_index-begin_index<6 and item.group(0) in [',', ';', ';'] and re.match('[一二三四五六七八九十\d.]+、', sentence_text[begin_index:end_index])==None: # 20240725 注销,避免标题提取错误
+            #     #     continue
+            new_sentence_text = sentence_text[begin_index:end_index]
+            sentence2 = Sentence2(new_sentence_text,sentence_index,begin_index,end_index)
+            if sentence.in_attachment:
+                new_sentence2_list_attach.append(sentence2)
+            else:
+                new_sentence2_list.append(sentence2)
+            begin_index = end_index
+        if end_index!=len(sentence_text):
+            end_index = len(sentence_text)
+            new_sentence_text = sentence_text[begin_index:end_index]
+            sentence2 = Sentence2(new_sentence_text, sentence_index, begin_index, end_index)
+            if sentence.in_attachment:
+                new_sentence2_list_attach.append(sentence2)
+            else:
+                new_sentence2_list.append(sentence2)
+
+    return new_sentence2_list, new_sentence2_list_attach
+
+requirement_pattern = "(采购需求|需求分析|项目说明|(采购|合同|招标|项目|服务|工程)(的?主要)?(内容|概况|范围|信息)([及与和](其它|\w{,2})要求)?" \
+                      "|招标项目技术要求|服务要求|服务需求|项目目标|需求内容如下|建设规模)([::,]|$)"
+aptitude_pattern = "(资格要求|资质要求)([::,]|$)"
+addr_bidopen_pattern = "([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件)[))]?(时间[与及和、])?(地址|地点)([与及和、]时间)?([::,]|$)|开启([::,]|$)"
+out_lines = []
+
+def extract_parameters(parse_document, content):
+    '''
+    通过大纲、预处理后文本正则获取需要字段
+    :param parse_document: ParseDocument() 方法返回结果
+    :param content: 公告预处理后文本
+    :return:
+    '''
+    list_data = parse_document.tree
+    requirement_text = ''
+    aptitude_text = ''
+    addr_bidopen_text = ''
+
+    _find_count = 0
+    _data_i = -1
+    while _data_i<len(list_data)-1:
+        _data_i += 1
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"].strip()
+        # print(_data.keys())
+        if _type=="sentence":
+            if _data["sentence_title"] is not None:
+
+                outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
+                                 re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
+
+                if re.search(requirement_pattern,_text[:30]) is not None and re.search('符合采购需求,', _text[:30])==None:
+                    out_lines.append(outline)
+                    childs = get_childs([_data])
+                    for c in childs:
+                        # requirement_text += c["text"]+"\n"
+                        requirement_text += c["text"]
+                    _data_i += len(childs)
+                    _data_i -= 1
+    _data_i = -1
+    while _data_i<len(list_data)-1:
+        _data_i += 1
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"].strip()
+        # print(_data.keys())
+        if _type=="sentence":
+            # print("aptitude_pattern", _text)
+            if _data["sentence_title"] is not None:
+                # print("aptitude_pattern",_text)
+
+                # outline = re.sub('(?[一二三四五六七八九十\d.]+)?\s*、?', '',
+                #                  re.split('[::,]', _text)[0].replace('(', '(').replace(')', ')'))
+
+                if re.search(aptitude_pattern,_text[:30]) is not None:
+                    childs = get_childs([_data])
+                    for c in childs:
+                        aptitude_text += c["text"]
+                        # if c["sentence_title"]:
+                        #     aptitude_text += c["text"]+"\n"
+                        # else:
+                        #     aptitude_text += c["text"]
+                    _data_i += len(childs)
+                    _data_i -= 1
+
+                # elif re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', _text) and len(_text)<30 and re.search('资质|资格', _text):
+                #     out_lines.append(outline)
+        if _type=="table":
+            list_table = _data["list_table"]
+            parent_title = _data["parent_title"]
+            if list_table is not None:
+                for line in list_table[:2]:
+                    for cell_i in range(len(line)):
+                        cell = line[cell_i]
+                        cell_text = cell[0]
+                        if len(cell_text)>120 and re.search(aptitude_pattern,cell_text) is not None:
+                            aptitude_text += cell_text+"\n"
+    _data_i = -1
+    while _data_i < len(list_data) - 1:
+        _data_i += 1
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"].strip()
+        # print(_data.keys())
+        if _type == "sentence":
+            if _data["sentence_title"] is not None:
+                if re.search(addr_bidopen_pattern, _text[:20]) is not None:
+                    childs = get_childs([_data], max_depth=1)
+                    for c in childs:
+                        addr_bidopen_text += c["text"]
+                    _data_i += len(childs)
+                    _data_i -= 1
+    if re.search('时间:', addr_bidopen_text) and re.search('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
+        for ser in re.finditer('([开评]标|开启|评选|比选|递交\w{,4}文件)?地[点址]([((]网址[))])?:[^,;。]{2,100}[,;。]', addr_bidopen_text):
+            b, e = ser.span()
+        addr_bidopen_text = addr_bidopen_text[b:e]
+    elif re.search('开启', addr_bidopen_text) and re.search('时间:\d{2,4}年\d{1,2}月\d{1,2}日', addr_bidopen_text) and len(addr_bidopen_text)<40: # 优化类似 364991684只有时间没地址情况
+        addr_bidopen_text = ""
+    if addr_bidopen_text == "":
+        ser = re.search('([开评]标|开启|评选|比选|磋商|遴选|寻源|采购|招标|竞价|议价|委托|询比?价|比价|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|递交\w{,4}文件))?(会议)?地[点址]([((]网址[))])?[:为][^,;。]{2,100}[,;。]', content)
+        if ser:
+            addr_bidopen_text = ser.group(0)
+    return requirement_text, aptitude_text, addr_bidopen_text
+
+if __name__ == "__main__":
+    # with open('D:\html/2.html', 'r', encoding='UTF-8') as f:
+    #     html = f.read()
+    #
+    l = []
+    import pandas as pd
+    from collections import Counter
+    from BiddingKG.dl.interface import Preprocessing
+    from BiddingKG.dl.interface.get_label_dic import get_all_label
+    from bs4 import BeautifulSoup
+    import json
+
+    df = pd.read_excel('E:/公告招标内容提取结果2.xlsx')
+    df['len']= df['招标内容'].apply(lambda x: len(x))
+    print(len(df), sum(df['len']),sum(df['len'])/len(df), max(df['len']), min(df['len']))
+    print(len([it for it in df['len'] if it>1500]))
+
+    # df = pd.read_csv(r'E:\channel分类数据\2022年每月两天数据/指定日期_html2022-12-10.csv')
+    # df1 = pd.read_excel('E:/公告招标内容提取结果.xlsx')
+    # df = df[df['docid'].isin(df1['docid'])]
+    #
+    # df.drop_duplicates(subset=['docchannel', 'web_source_name', 'exist_table'], inplace=True)
+    # print(df.columns, len(df))
+    #
+    #
+    # # def get_text(html):
+    # #     soup = BeautifulSoup(html, 'lxml')
+    # #     text = soup.get_text()
+    # #     return text
+    # # df['content'] = df['dochtmlcon'].apply(lambda x: get_text(x))
+    # # df['标签'] = df.apply(lambda x: get_all_label(x['doctitle'], x['content']), axis=1)
+    # # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
+    # # df1 = df[['docid', '标签']]
+    #
+    # n = 0
+    # datas = []
+    # for id,title, html in zip(df['docid'],df['doctitle'], df['dochtmlcon']):
+    #     # if id not in [289647738, 289647739]:
+    #     #     continue
+    #     # print(id, type(id))
+    #     # parse_document = ParseDocument(html, True)
+    #     # requirement_text, aptitude_text = extract_parameters(parse_document)
+    #     # if re.search('资\s*[格质]', html)==None:
+    #     #     continue
+    #
+    #     list_articles, list_sentences, list_entitys, list_outlines, _cost_time = Preprocessing.get_preprocessed([[id,html,"","",title,'', '']],useselffool=True)
+    #     sentence2_list, sentence2_list_attach = extract_sentence_list(list_sentences[0])
+    #
+    #     # sentence2_list = []
+    #
+    #     parse_document = ParseDocument(html, True, list_obj=sentence2_list)
+    #     requirement_text, aptitude_text = extract_parameters(parse_document)
+    #     # if len(aptitude_text)>0:
+    #     #     datas.append((id, aptitude_text[:1500]))
+    #     #     print(id, aptitude_text[:10], aptitude_text[-20:])
+    #     # else:
+    #     #     parse_document = ParseDocument(html, True, list_obj=sentence2_list_attach)
+    #     #     requirement_text, aptitude_text = extract_parameters(parse_document)
+    #
+    #     # if 0<len(aptitude_text)<20:
+    #     #     l.append(len(aptitude_text))
+    #     #     n += 1
+    #     #     print(id, aptitude_text)
+    #     #     if n > 5:
+    #     #         break
+    #
+    #     if len(requirement_text)>0:
+    #         label_dic = get_all_label(title, list_articles[0].content)
+    #         # datas.append((id, requirement_text))
+    #         datas.append((id, requirement_text, label_dic))
+    #
+    # c = Counter(out_lines)
+    # print(c.most_common(1000))
+    # #
+    # # df = pd.DataFrame(datas, columns=['docid', '资质要求'])
+    # # df.to_excel('E:/公告资质要求提取结果.xlsx')
+    #
+    # df = pd.DataFrame(datas, columns=['docid', '招标内容', '标签'])
+    # df['标签'] = df['标签'].apply(lambda x: json.dumps(x, ensure_ascii=False, indent=2))
+    # df.to_excel('E:/公告招标内容提取结果2.xlsx')
+
+    #     if len(aptitude_text)> 1000:
+    #         print(id, aptitude_text[:10], aptitude_text[-20:])
+    # print(Counter(l).most_common(50))
+    # print(len(df), len(l), min(l), max(l), sum(l)/len(l))
+    # n1 = len([it for it in l if it < 500])
+    # n2 = len([it for it in l if it < 1000])
+    # n3 = len([it for it in l if it < 1500])
+    # n4 = len([it for it in l if it < 2000])
+    # print(n1, n2, n3, n4, n1/len(l), n2/len(l), n3/len(l), n4/len(l))
+
+    # parse_document = ParseDocument(html,True)
+    # requirement_text, new_list_policy, aptitude_text = extract_parameters(parse_document)
+    # print(aptitude_text)
+
+    # sentence_text = '5、要求:3.1投标其他条件:1、中国宝武集团项目未列入禁入名单的投标人。2、具有有效的营业执照;'
+    # begin_index = 0
+    # for item in re.finditer('[,。;;!!??]+', sentence_text):
+    #     end_index = item.end()
+    #     if end_index != len(sentence_text):
+    #         if end_index - begin_index < 6:
+    #             continue
+    #     new_sentence_text = sentence_text[begin_index:end_index]
+    #     print(new_sentence_text)
+
+    # df = pd.read_excel('E:/公告资质要求提取结果.xlsx')
+    # docids = []
+    # pos = neg = 0
+    # for docid, text in zip(df['docid'], df['资质要求']):
+    #     if re.match('[((\s★▲\*]?[一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+', text) and re.search(aptitude_pattern, text[:15]):
+    #         pos += 1
+    #         pass
+    #     else:
+    #         neg += 1
+    #         print(docid, text[:50])
+    #         docids.append(docid)
+    # print('异常:%d, 正常:%d'%(neg, pos))
+    # print(docids)
+

+ 188 - 97
BiddingKG/dl/interface/predictor.py

@@ -430,50 +430,50 @@ class CodeNamePredict():
                                                 code_set.add(it)
                                                 # item['code'].append(it)
                                                 if re.search("(项目编号|招标编号):?$", pre_text[h]):
-                                                    item['code'].append((it, 0))
+                                                    item['code'].append((it, 0, sentence.sentence_index))
                                                 elif re.search('采购(计划)?编号:?$', pre_text[h]):
-                                                    item['code'].append((it, 1))
+                                                    item['code'].append((it, 1, sentence.sentence_index))
                                                 elif re.search('(询价|合同)编号:?$', pre_text[h]):
-                                                    item['code'].append((it, 2))
+                                                    item['code'].append((it, 2, sentence.sentence_index))
                                                 else:
-                                                    item['code'].append((it, 3))
+                                                    item['code'].append((it, 3, sentence.sentence_index))
                                         elif len(item['code']) > 0:
                                             new_it = item['code'][-1][0] + re.search(',|/|;|、|,', the_code).group(0) + it
                                             if new_it not in code_set:
                                                 code_set.add(new_it)
                                                 # item['code'][-1] = new_it
                                                 if re.search("(项目编号|招标编号):?$", pre_text[h]):
-                                                    item['code'][-1] = (new_it, 0)
+                                                    item['code'][-1] = (new_it, 0, sentence.sentence_index)
                                                 elif re.search('采购(计划)?编号:?$', pre_text[h]):
-                                                    item['code'][-1] = (new_it, 1)
+                                                    item['code'][-1] = (new_it, 1, sentence.sentence_index)
                                                 elif re.search('(询价|合同)编号:?$', pre_text[h]):
-                                                    item['code'][-1] = (new_it, 2)
+                                                    item['code'][-1] = (new_it, 2, sentence.sentence_index)
                                                 else:
-                                                    item['code'][-1] = (new_it, 3)
+                                                    item['code'][-1] = (new_it, 3, sentence.sentence_index)
                                         else:
                                             if the_code not in code_set:
                                                 code_set.add(the_code)
                                                 # item['code'].append(the_code)
                                                 if re.search("(项目编号|招标编号):?$", pre_text[h]):
-                                                    item['code'].append((the_code, 0))
+                                                    item['code'].append((the_code, 0, sentence.sentence_index))
                                                 elif re.search('采购(计划)?编号:?$', pre_text[h]):
-                                                    item['code'].append((the_code, 1))
+                                                    item['code'].append((the_code, 1, sentence.sentence_index))
                                                 elif re.search('(询价|合同)编号:?$', pre_text[h]):
-                                                    item['code'].append((the_code, 2))
+                                                    item['code'].append((the_code, 2, sentence.sentence_index))
                                                 else:
-                                                    item['code'].append((the_code, 3))
+                                                    item['code'].append((the_code, 3, sentence.sentence_index))
                                             break
                                 elif the_code not in code_set:
                                     code_set.add(the_code)
                                     # item['code'].append(the_code)
                                     if re.search("(项目编号|招标编号):?$", pre_text[h]):
-                                        item['code'].append((the_code, 0))
+                                        item['code'].append((the_code, 0, sentence.sentence_index))
                                     elif re.search('采购(计划)?编号:?$', pre_text[h]):
-                                        item['code'].append((the_code, 1))
+                                        item['code'].append((the_code, 1, sentence.sentence_index))
                                     elif re.search('(询价|合同)编号:?$', pre_text[h]):
-                                        item['code'].append((the_code, 2))
+                                        item['code'].append((the_code, 2, sentence.sentence_index))
                                     else:
-                                        item['code'].append((the_code, 3))
+                                        item['code'].append((the_code, 3, sentence.sentence_index))
 
                                 # if the_code not in code_set:
                                 #     code_set.add(the_code)
@@ -573,18 +573,18 @@ class CodeNamePredict():
                     if othercode != None:
                         # item['code'].append(othercode.group('code'))
                         if re.search("(项目编号|招标编号):?$", othercode.group(0)):
-                            item['code'].append((othercode.group('code'), 0))
+                            item['code'].append((othercode.group('code'), 0, sentence.sentence_index))
                         elif re.search('采购(计划)?编号:?$', othercode.group(0)):
-                            item['code'].append((othercode.group('code'), 1))
+                            item['code'].append((othercode.group('code'), 1, sentence.sentence_index))
                         elif re.search('(询价|合同)编号:?$', othercode.group(0)):
-                            item['code'].append((othercode.group('code'), 2))
+                            item['code'].append((othercode.group('code'), 2, sentence.sentence_index))
                         else:
-                            item['code'].append((othercode.group('code'), 3))
+                            item['code'].append((othercode.group('code'), 3, sentence.sentence_index))
                         # print('规则召回项目编号:', othercode.group('code'))
             # item['code'] = [code for code in item['code'] if len(code)<500]
             # item['code'].sort(key=lambda x:len(x),reverse=True)
             item['code'] = [code for code in item['code'] if len(code[0]) < 500]
-            item['code'].sort(key=lambda x: x[1])
+            item['code'].sort(key=lambda x: [x[1],x[2]])
             item['code'] = [it[0] for it in item['code']]
             result.append(item)
 
@@ -703,7 +703,7 @@ class PREMPredict():
                             text_sen = sentence.sentence_text
                             b = entity.wordOffset_begin
                             e = entity.wordOffset_end
-                            text_list.append((text_sen[max(0, b-13):b], text_sen[b:e], text_sen[e:e+10]))
+                            text_list.append((text_sen[max(0, b-13):b], text_sen[b:e], text_sen[e:e+15]))
                             # item_x = embedding(spanWindow(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,size=settings.MODEL_ROLE_INPUT_SHAPE[1]),shape=settings.MODEL_ROLE_INPUT_SHAPE)
                             # item_x = self.model_role.encode(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,entity_text=entity.entity_text)
                             item_x = self.model_role.encode_word(sentence_text=text_sen, begin_index=entity.wordOffset_begin, end_index=entity.wordOffset_end, size=30)
@@ -805,6 +805,9 @@ class PREMPredict():
             # print('模型预测角色:', front, entity.entity_text, behind,label, values)
             # if label in [0, 1, 2, 3, 4]:
             #     self.role_file.write("{0}#split#{1}#split#{2}#split#{3}#split#{4}\n".format(front, entity.entity_text, behind,label, entity.doc_id))
+            if re.search('^以\d+[\d,.]+万?元中标', behind) and label != 2: # 优化244261884预测错误 大连长之琳科技发展有限公司以7.63277万元中标
+                label = 2
+                values[label] = 0.8
             if label in [0, 1, 2, 3, 4] and values[label] < 0.5: # 小于阈值的设为其他,让后面的规则召回重新判断
                 # print(' # 小于阈值的设为其他,让后面的规则召回重新判断', values[label])
                 label = 5
@@ -835,7 +838,7 @@ class PREMPredict():
                 elif re.search('尊敬的供应商:$', front):
                     label = 0
                     values[label] = 0.501
-                elif re.search('第[4-9四五六]中标候选人', front):  #修复第4以上的预测错为中标人
+                elif re.search('第[4-9四五六]中标候选人|(提交单位|竞投单位):$', front):  #修复第4以上的预测错为中标人
                     label = 5
                     values[label] = 0.5
                 elif re.search('(排名|排序|名次):([4-9]|\d{2,}),', front) or re.search('序号:\d+,(供应商|投标|候选)', front): # 293225236 附件中 排名预测错误
@@ -846,7 +849,10 @@ class PREMPredict():
                 elif re.search('第一候补|第一后备|备选', front):
                     label = 3
                     values[label] = 0.6
-                elif re.search('放弃中标资格$|是否中标:否|^(中标|成交)(公示|公告)', behind):
+                elif re.search('^放弃中标资格|是否中标:否|^(中标|成交)(公示|公告)', behind):
+                    values[2] = 0.5
+                    label = 5
+                elif re.search('^,?(投标报价|(资格性审查:|符合性审查:)?(不通过|不符合))', behind) and re.search('中标|成交|中选|排名|排序|名次|第[一1]名', front)==None:
                     values[2] = 0.5
                     label = 5
                 elif re.search('(承包权人|帐户名称|债务人|推荐预审合格投标人名单):$|确定为标的的受让方,$|[主次出]入口?,?$|确定(项目|\w{,2})成交供应商,$', front):  # 234501112 民币元,序号:1,债务人: 东营市海宁工贸有限责任公司 ,债权本金: 262414286 八、中标后签约单位,合同签约单位:
@@ -881,6 +887,9 @@ class PREMPredict():
                     label = 5
                 elif re.search('委托$', front) and re.search('^(抽样|送检|看样)', behind):
                     label = 5
+                elif re.search('推荐入围的招标代理单位:$', front): # 20240709 修复302505502预测错为代理
+                    label = 2
+                    values[label] = 0.501
             elif label in [3,4]:
                 if re.search('第[二三]分(公司|店),中标(人|供应商|单位|公司):$', front):
                     label = 2
@@ -891,7 +900,7 @@ class PREMPredict():
                 elif re.search('\d+\.\d+,供应商名称:', front): #  341385226 30.2,供应商名称: 预测为第二名
                     label = 2
                     values[label] = 0.501
-                elif re.search('\d+\.\d+[,、]?(中标|成交)候选人', front):
+                elif re.search('\d+\.\d+[,、]?(中标|成交)候选人|[;,][23]、(中标|中选|成交)候选人:', front):
                     label = 5
                     values[label] = 0.501
                 elif re.search('第一名:$', front):
@@ -952,16 +961,16 @@ class PREMPredict():
                     values[label] = 0.5
                 elif re.search('[\+=]((中标|成交)(金?额|价格?)|[若如]果?(中标|成交)(金?额|价格?)为?', front): # 处理例如 241561780 如中标金额为 500-1000万元,则代理服务费=100 万元×0.5%+400万元×0.35%+(中标金额-500)万元
                     values[label] = 0.49
-                elif re.search('^(以[上下])?按[\d.%]+收取|^以[上下]|^[()]?[+×*-][\d.%]+', behind):
+                elif re.search('^(以[上下])?按[\d.%]+收取|^及?以[上下]|^[()]?[+×*-][\d.%]+', behind):
                     values[label] = 0.49
-                elif re.search('(含|在|包括|[大小等高低]于)$|[\d.%]+[+×*-]$', front):
+                elif re.search('(含|在|包括|[大小等高低]于|达到)$|[\d.%]+[+×*-]$', front):
                     values[label] = 0.49
                 elif entity.notes == '单价' and float(entity.entity_text)<5000:
                     label = 2
             elif label ==0: # 错误招标金额处理
-                if entity.notes in ["投资", "总投资","工程造价"] or re.search('最低限价:?$', front) or re.search('服务内容:([\d,.]+万?亿?元?-?)$', front):
+                if entity.notes in ["投资", "总投资","工程造价"] or re.search('最低限价:?$|注册资本', front) or re.search('服务内容:([\d,.]+万?亿?元?-?)$', front):
                     values[label] = 0.49
-                elif re.search('^(以[上下])?按[\d.%]+收取|^以[上下]|^[()]?[+×*-][\d.%]+|(含)', behind):
+                elif re.search('^(以[上下])?按[\d.%]+收取|^及?以[上下]|^[()]?[+×*-][\d.%]+|(含)', behind):
                     values[label] = 0.49
                 elif re.search('(含|在|包括|[大小等高低]于|如预算金额为)$|[\d.%]+((含))?[+×*-]$', front):
                     values[label] = 0.49
@@ -1396,27 +1405,27 @@ class RoleRulePredictor():
         self.pattern_agency_left = "(?P<agency_left>((代理|拍卖)(?:人|机构|公司|企业|单位|组织)|专业采购机构|集中采购机构|招标组织机构|交易机构|集采机构|[招议))]+标机构|(采购|招标)代理)(名称|.{,4}名,?称|全称)?(是|为|:|:|[,,]?\s*)$|(受.{5,20}委托,?$))"
         self.pattern_agency_right = "(?P<agency_right>^([((](以下简称)?[,\"“]*(代理)(人|单位|机构)[,\"”]*[))])|^受.{5,20}委托|^受委?托,)"  # |^受托  会与 受托生产等冲突,代理表达一般会在后面有逗号
         # 2020//11/24 大网站规则 中标关键词添加 选定单位|指定的中介服务机构
-        self.pattern_winTenderer_left_50 = "(?P<winTenderer_left_50>" \
+        self.pattern_winTenderer_left_50 = "(?P<winTenderer_left_51>" \
                "(乙|竞得|受让|买受|签约|施工|供货|供应?|合作|承做|承包|承建|承销|承保|承接|承制|承担|承修|承租((包))?|入围|入选|竞买)(候选|投标)?(人|单位|机构|供应商|方|公司|企业|厂商|商|社会资本方?)(:?单位名称|:?名称|盖章)?[::是为]+$" \
                "|(选定单位|指定的中介服务机构|实施主体|中标银行|中标通知书,致|征集结果|选择中介|选择结果|成交对象|勘察人|(,|审计|处置|勘察|设计)服务单位|受托[人方])[::是为]+$" \
                "|((评审结果|名次|排名|中标结果)[::]*第?[一1]名?)[::是为]+$|成交供应商信息[,:]?(序号1)?:?|供应商名称$" \
-               "|单一来源(采购)?(供应商|供货商|服务商|方式向)$|((中标|成交)(结果|信息))[::是为]+$" \
+               "|单一来源(采购)?(供应商|供货商|服务商|方式向)$|((中标|成交)(结果|信息))[::是为]+$|(中标|成交)供应商、(中标|成交)(金额|价格),$" \
                "|现(公布|宣布|公示)中标单位如下:$|现将中标单位(公布|公示)如下:$|现宣布以下(企业|单位|公司)中标:$|经讨论,决定采用$)"  # 承办单位:不作为中标 83914772
         self.pattern_winTenderer_left_60 = "(?P<winTenderer_left_60>" \
                                            "(,|。|:|^)((中标(投标)?|[拟预]中标|中选|中价|中签|成交)(人|单位|机构|中介(服务)?机构|供应商|客户|方|公司|企业|厂商|商家?|社会资本方?)|(中标候选人)?第?[一1]名|第[一1](中标|中选|成交)?候选人|服务机构)" \
-                                           "(:?单位名称|:?名称|盖章)?[,,]?([((]按综合排名排序[))]|:择优选取)?[::,,]$)"  # 解决表头识别不到加逗号情况,需前面为,。空
+                                           "(:?单位名称|:?名称|盖章)?[,,]?([((]按综合排名排序[))]|:择优选取)?[::,,]$|选取(情况|说明):中选,中介机构名称:$|排名如下:1、$)"  # 解决表头识别不到加逗号情况,需前面为,。空 20240621补充 中选 云南省投资审批中介超市 补充排名如下 南阳师范学院
         self.pattern_winTenderer_left_55 = "(?P<winTenderer_left_55>(中标(投标)?|[拟预]中标|中选|中价|中签|成交|入选)(人|单位|机构|中介(服务)?机构|供应商|客户|方|公司|企业|厂商|商家?|社会资本方?)" \
                                            "(:?单位名称|:?名称|盖章)?([((]按综合排名排序[))]|:择优选取)?[::是为]+$" \
                                            "|结果公示如下:摇出球号:\d+号,中介机构:$)"  # 取消逗号 并拒绝执行改进计划的供应商,华新水泥将可能终止与其合作关系  # 中标候选人不能作为中标   # |直购企业:$不能作为中标人,看到有些公告会又多个公司,然后还会发布中选结果的公告,其中一个公司中标
 
         self.pattern_winTenderer_right = "(?P<winTenderer_right>(^[是为](首选)?((采购|中标|成交)(供应商|供货商|服务商)|(第[一1]|预)?(拟?(中标|中选|中价|成交)(候选|排序)?(人|单位|机构|供应商|公司|企业|厂商)))|" \
                                          "^((报价|价格)最低,|以\w{5,10})?(确定|成|作)?为[\w“”()]{3,25}((成交|中选|中标|服务)(人|单位|供应商|企业|公司)|供货单位|供应商|第一中标候选人)[,。]" \
-                                         "|^:贵公司参与|^:?你方于|^(胜出)?中标。|^取得中标(单位)?资格" \
+                                         "|^:贵公司参与|^:?你方于|^(胜出)?中标。|^取得中标(单位)?资格|^以\d+[\d,.]+万?元(中标|成交|中选)" \
                                          "|^通过(挂牌|拍卖)方式(以[\d.,]+万?元)?竞得|^[((](中标|成交|承包)人名?称?[))]))" # 去掉 |\w{,20} 修复 460216955 网上公布的与本次采购项目有关的信息视为已送达各响应供应商。 作为中标
         self.pattern_winTenderer_whole = "(?P<winTenderer_center>(贵公司|由).{,15}以\w{,15}中标|确定[\w()]{5,20}为[^,。;]{5,50}的?中标单位" \
                                          "|选定报价最低的[“”\w()]{5,25}为[^,。;]{5,50}的?(服务|中标|成交)单位" \
-                                         "|拟邀请[\w()]{5,20}(进行)?单一来源谈判|(承办单位|报价人|投标人|中介机构)(名称)?:[\w()]{5,20},(中标|承办|中选)价格" \
-                                         "|(谈判结果:|结果|最终|确定|决定)[以由为][^,。;]{5,25}(向我单位)?(供货|承担|承接|中标|竞买成功)|中标通知书.{,15}你方|单一来源方?式?[从向][()\w]{5,20}采购)"  # 2020//11/24 大网站规则 中标关键词添加 谈判结果:由.{5,20}供货
+                                         "|拟邀请[\w()]{5,20}(进行)?单一来源谈判|(承办单位|报价人|投标人|中介机构)(名称)?:[\w()]{5,20},(中标|承办|中选)(价格|金额)" \
+                                         "|(谈判结果:|结果|最终|确定|决定)[以由为][^,。;]{5,25}(向我单位)?(供货|承担|承接|中标|竞买成功)|中标通知书.{,15}你方|单一来源方?式?[从向][()\w]{5,20}采购|供应商名称:[()\w]{5,20},独家采购原因)"  # 2020//11/24 大网站规则 中标关键词添加 谈判结果:由.{5,20}供货
 
         self.pattern_secondTenderer_left = "(?P<secondTenderer_left>((第[二2]名?(名|((中标|中选|中价|成交|候选)(候选)?(人|单位|机构|供应商|公司))))(名称)?[::是为]+$)|((评审结果|名次|排名|排序)[::]第?[二2]名?,?(投标(供应)?商|供应商)(名称)?[::]+$))"
         self.pattern_secondTenderer_right = "(?P<secondTenderer_right>^[是为\(]第[二2](名|(中标|中选|中价|成交)(候选)?(人|单位|机构|供应商|公司)))"
@@ -1453,7 +1462,7 @@ class RoleRulePredictor():
 
         self.SET_NOT_TENDERER = set(["人民政府","人民法院","中华人民共和国","人民检察院","评标委员会","中国政府","中国海关","中华人民共和国政府"])
         
-        self.pattern_money_tenderee = re.compile("投?标?最高限价|采购计划金额|项目预算|招标金额|采购金额|项目金额|投资估算|采购(单位|人)委托价|招标限价|拦标价|预算金额|标底|总计|限额|资金来源,?为\w{2,4}资金|采购成本价")  # |建安费用 不作为招标金额
+        self.pattern_money_tenderee = re.compile("投?标?最高限价|采购计划金额|项目预算|招标金额|采购金额|项目金额|投资估算|采购(单位|人)委托价|招标限价|拦标价|预算金额|标底|总计|限额|资金来源,?为\w{2,4}资金|采购成本价|总费用约?为")  # |建安费用 不作为招标金额
         self.pattern_money_tenderer = re.compile("((合同|成交|中标|应付款|交易|投标|验收|订单)[)\)]?(综合)?(总?金额|结果|[单报总]?价))|标的基本情况|承包价|报酬(含税):|经评审的价格")  # 单写 总价 不能作为中标金额,很多表格有单价、总价
         self.pattern_money_tenderer_whole = re.compile("(以金额.*中标)|中标供应商.*单价|以.*元中标")
         self.pattern_money_other = re.compile("代理费|服务费")
@@ -1515,6 +1524,8 @@ class RoleRulePredictor():
             _label = 5
         elif _label == 2 and re.search('评委|未中标', after[:5]): # 397194341 过滤掉错误召回中标人
             _label = 5
+        elif _label == 2 and re.search('^,?(投标报价|(资格性审查:|符合性审查:)?(不通过|不符合))', after) and re.search('中标|成交|中选|排名|排序|名次|第[一1]名', before[-10:])==None: #20240705 处理类似 493939047 错误
+            _label = 5
         if _label == 5:
             _label, _prob, keyword = self.ser_role(self.pattern_whole, before + center + after, entity_text)  # 前后文匹配
             keyword = 'whole_'+ keyword[:keyword.find(entity_text)] if keyword!="" else keyword
@@ -1614,7 +1625,7 @@ class RoleRulePredictor():
                                         if _span[2].startswith(":"): # 实体后面为冒号的不作为招标人,避免项目名称出错中标变招标  368122675 陇西兴恒建建筑有限责任公司:线路安全保护区内环境治理专项整改(第二标段)项目
                                             break
                                         if str(_span[0][-len(str(_name)):]+_span[1] + _span[2][:len(str(_name))]).find(
-                                                _name) >= 0:
+                                                _name) >= 0 or str(_name).startswith(p_entity.entity_text): # 20240621 补充公司开头的项目名称召回,避免name太长召回失败 例 367033697
                                             # if p_entity.entity_text in agency_set or re.search('(代理|管理|咨询|招投?标|采购)\w{,6}公司', p_entity.entity_text): # 在代理人集合的作为代理人
                                             if is_agency(p_entity.entity_text): # 2024/3/29 统一方法判断是否为代理
                                                 find_flag = True
@@ -1783,7 +1794,7 @@ class RoleRulePredictor():
                                     p_entity.label = 0
                                     # print('规则召回预算金额2:', p_entity.entity_text, _sentence.sentence_text[:p_entity.wordOffset_begin])
             if notfound_tenderer and len(set([ent.entity_text for ent in candidates])) == 1 and re.search(
-                    '(中标|中选|中价|中租|成交|入选|确认)(候选人|人|供应商|记录|结果|变更)?(公告|公示|结果)|(遴选|采购|招标|竞价|议价|比选|询比?价|评选|谈判|邀标|邀请|洽谈|约谈|评标|发包|磋商|交易)\w{,2}结果|单一来源(采购|招标)?的?(中标|成交|结果)|中标通知书',
+                    '(中标|中选|中价|中租|成交|入选|确认)(候选人|人|供应商|记录|结果|变更)?(公告|公示|结果)|(遴选|采购|招标|竞价|议价|比选|询比?价|评选|谈判|邀标|邀请|洽谈|约谈|评标|发包|磋商|交易|评审)\w{,2}结果|单一来源(采购|招标)?的?(中标|成交|结果)|中标通知书',
                     article.title+article.content[:100]):
                 for p_entity in candidates:
                     # print('只有一个候选人的作为中标人', p_entity.entity_text)
@@ -2229,10 +2240,11 @@ class RoleGrade():
         self.winTenderer_left_9 = "(?P<winTenderer_left_9>(中标|中选|中价|成交|竞得)|第[1一]名|排[名序]:1|名次:1)"
         self.winTenderer_left_8 = "(?P<winTenderer_left_8>(入选供应商|供货商|乙方|最[终后]选[择取]))"  # 229435497 最后选择西平,县中原彩印有限公司,作为此项目中标供应商,
         self.winTenderer_left_6 = "(?P<winTenderer_left_6>(入围|承[接建包修做制担租销]))"
+        self.winTenderer_right_9 = "(?P<winTenderer_right_9>^(为(中标|成交|中选)(人|单位|供应商|公司)|以\d+[\d.,]+万?元中标))"
         self.secondTenderer_left_9 = "(?P<secondTenderer_left_9>(第[二2](中标|中选|中价|成交)?候选(人|单位|供应商|公司)|第[二2]名|排[名序]:2|名次:2))"
         self.thirdTenderer_left_9 = "(?P<thirdTenderer_left_9>(第[三3](中标|中选|中价|成交)?候选(人|单位|供应商|公司)|第[三3]名|排[名序]:3|名次:3))"
         self.pattern_list = [self.tenderee_left_9,self.tenderee_center_8, self.tenderee_left_8,self.tenderee_left_6,self.tenderee_left_5,self.agency_left_9,
-                             self.winTenderer_left_6, self.winTenderer_left_9,self.winTenderer_left_8, self.secondTenderer_left_9, self.thirdTenderer_left_9]
+                             self.winTenderer_left_6, self.winTenderer_left_9,self.winTenderer_left_8, self.winTenderer_right_9, self.secondTenderer_left_9, self.thirdTenderer_left_9]
     def predict(self, list_sentences, list_entitys, original_docchannel, span=15, min_prob=0.7):
         '''
         根据规则给角色分配不同等级概率;分三级:0.9-1,0.8-0.9,0.7-0.8;附件0.7-0.8,0.6-0.7,0.5-0.6
@@ -2328,7 +2340,7 @@ class RoleGrade():
                     low_prob_tenderee.append(entity)
             elif entity.entity_type in ['org', 'company'] and entity.label == 2 and 0.5<=entity.values[entity.label]<0.6:
                 low_prob_winner.append(entity)
-            if entity.entity_type in ['org', 'company'] and entity.label in [1, 0] and 0.5<entity.values[entity.label]:
+            if entity.entity_type in ['org', 'company'] and entity.label in [1, 0] and 0.6<entity.values[entity.label]: # 由0.5调为0.6,避免367217504 同时为低概率招标、中标被改
                 all_tenderee_agency.append(entity.entity_text)
 
 
@@ -3918,7 +3930,7 @@ class DocChannel():
       self.type_dic = {
           '土地矿产': '供地结果|(土地|用地|宗地|地块|海域|矿)的?(基本信息|基本情况|概况|信息|详情|来源|用途|性质|编号|位置|坐落|使用年限|出让年限)|(土地|山地|农田)(经营权)?(出让|出租|招租|租赁|承包|流转)|流转土地',
           '拍卖出让': '(拍卖|变卖|流拍|竞拍)的?(公告|活动|信息|结果|成交|主体|标的|资产|财产|方式|类型|流程|程序|规则|价格|保证金|时间)|(公开|进行|密封)(拍卖|变卖|竞拍)|第[一二三]次拍卖|(资产|司法|网络)拍卖|交易方式.{,2}拍卖|拍卖会',
-          '产权交易': '(产权|资产|权证)的?(类型|信息|名称|编号|(基本)?情况)|(经营权|承包权|使用权|租赁权|股权|债权|排污权|化学需氧量|储备量)(挂牌|转让|出让)|竞价销售|销售结果|房屋所有权房产|免租期限|交易期限|(受让|转让|承租|出租)(人|方)|(店面|店铺|商铺|铺位?|门面|门市|食堂|饭堂|校舍|车位|停车场|厂?房|仓?库|馆|资产|物业|房产|房屋|场地|农田|鱼?塘)\w{,4}(处置|招租|出租|续租|租赁|转让)|(出租|转让|产权|资产)(项目|中标|成交|流标|废标)|出租(用途|类型)|转让底价|租赁(标的物|情况)|看样(时间|地[点址]|方式)|最小加价|加价幅度',
+          '产权交易': '(产权|资产|权证)的?(类型|类别|用途|性质|状态|信息|名称|编号|(基本)?情况)|(经营权|承包权|使用权|租赁权|股权|债权|排污权|化学需氧量|储备量)(挂牌|转让|出让)|竞价销售|销售结果|房屋所有权房产|免租期限|交易期限|(受让|转让|承租|出租)(人|方)|(店面|店铺|商铺|铺位?|门面|门市|食堂|饭堂|校舍|车位|停车场|厂?房|仓?库|馆|资产|物业|房产|房屋|场地|农田|鱼?塘)\w{,4}(处置|招租|出租|续租|租赁|转让)|(出租|转让|产权|资产)(项目|中标|成交|流标|废标)|出租(用途|类型)|转让底价|租赁(标的物|情况)|看[货](时间|地[点址]|方式|仓库|验货)|最小加价|加价[梯]|交易模式[::\s]*延时竞价销售|挂牌(开始|结束)时间',
           '采招数据': '(采购|招标)(条件|范围|文件|内容)|(申请人|投标人|供应商|报价人|参选人)的?资格要求;|采购需求清单|最低价排序|竞争性采购方式|采购进行公开竞价|竞价模式[::\s]*一次报价|预算金额'  # |变更|答疑|澄清|中标|成交|合同|废标|流标 |(采购|招标|代理)(人|机构|单位)|
       }
 
@@ -3926,7 +3938,7 @@ class DocChannel():
           '土地矿产': '(土地|用地|宗地|荒地|山地|海域|矿)(出让|出租|招租|租赁|承包|流转|使用权|经营权|征收|划拨|中标|成交)|供地结果|矿业权|探矿权|采矿权|(土地|用地|宗地|地块)(使用权)?(终止|中止|网上)?(挂牌|出让|拍卖|招拍|划拨)|征收土地',
           '拍卖出让': '(拍卖|变卖|流拍|竞拍)的?(公告|公示)|拍卖|变卖|流拍|竞拍',
           '产权交易': '经营权|承包权|使用权|租赁权|股权|债权|排污权|化学需氧量|储备量|竞价销售|销售结果|出租|招租|拍租|竞租|续租|挂牌|出让',
-          '采招数据': '(采购|招标|询价|议价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判)的?(公告|公示|中标|成交|结果|$)|工程招标|定点服务',
+          '采招数据': '(采购|招标|询价|议价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|征询|调研)的?(公告|公示|中标|成交|结果|$)|工程招标|定点服务|竞价采购|(设备|服务)采购|网上超市采购|定点采购',
           # |竞价 采招/产权都有竞价方式 # 意向|需求|预公?告|报建|总承包|工程|施工|设计|勘察|代理|监理 |变更|答疑|澄清|中标|成交|合同|废标|流标
           '新闻资讯': '(考试|面试|笔试)成绩|成绩的?(公告|公示|公布)|公开招聘|招聘(公告|简章|启事|合同制)|疫情防控\s{,5}(通知|情况|提示)|行政审批结果'
       }
@@ -3934,22 +3946,22 @@ class DocChannel():
           '采购意向': '采购意向|招标意向|选取意向|意向公告|意向公示',
           '采购意向neg': '发布政府采购意向|采购意向公告已于',
           '招标预告': '(预计|计划)(采购|招标)(时间|日期)|采购(计划编号|需求方案|预告|预案)|(预|需求)公示|需求(方案|信息|论证|公告|公示)',
-          '招标公告': '(采购|招标|竞选|报名)条件|报名(时间|流程|方法|要求|\w{,5}材料)[:\s]|[^\w]成交规则|参加竞价采购交易资格|(申请人|投标人|供应商|报价人|参选人)的?资格要求|获取(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答)文件|(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答)文件的?(获取|领取)',
+          '招标公告': '(采购|招标|竞选|报名)条件|报名(时间|流程|方法|要求|\w{,5}材料)[:\s]|[^\w]成交规则|参加竞价采购交易资格|(申请人|投标人|供应商|报价人|参选人)的?资格要求|获取(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答)文件|(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答)文件的?(获取|领取)|评选方式:?\s*价格最低',
           '资审结果': '资审及业绩公示|资审结果及业绩|资格后审情况报告|资格(后审|预审|审查)结果(公告|公示)|(预审|审查)工作已经?结束|未通过原因', #|资格
           '招标答疑': '现澄清(为|如下)|答疑补遗|澄清内容如下|第[0-9一二三四五]次澄清|答疑澄清|(最高(投标)?限价|控制价|拦标价)公示',  # |异议的回复
           '公告变更': '第[\d一二]次变更|(更正|变更)(公告|公示|信息|内容|事项|原因|理由|日期|时间|如下)|原公告((主要)?(信息|内容)|发布时间)|(变更|更正)[前后]内容|现?在?(变更|更正|修改|更改)(内容)?为|(公告|如下|信息|内容|事项|结果|文件|发布|时间|日期)(更正|变更)',
           '公告变更neg': '履约变更内容',
           '候选人公示': '候选人公示|评标结果公示|中标候选人名单公示|现将中标候选人(进行公示|公[示布]如下)|(中标|中选)候选人(信息|情况)[::\s]',
           '候选人公示neg': '中标候选人公示期|中标候选人公示前',
-          '中标信息': '供地结果信息|采用单源直接采购的?情况说明|[特现]?将\w{,4}(成交|中标|中选|选定结果|选取结果|入围结果|竞价结果)\w{,4}(进行公示|公[示布]如下)|(询价|竞价|遴选)(成交|中标|中选)(公告|公示)|(成交|中标|中选|选定|选取|入围|询价)结果(如下|公告|公示)|(中标|中选)(供应商|承包商|候选人|入围单位)如下|拟定供应商的情况|((中标|中选)(人|成交)|成交)\w{,3}(信息|情况)[::\s]',
-          '中标信息2': '\s(成交|中标|中选)(信息|日期|时间|总?金额|价格)[::\s]|(采购|招标|成交|中标|中选|评标)结果|单一来源(采购|招标)?的?(中标|成交|结果)|项目已结束|中标公示 ', # |单一来源采购原因|拟采取单一来源方式采购|单一来源采购公示
+          '中标信息': '供地结果信息|采用单源直接采购的?情况说明|[特现]?将\w{,4}(成交|中标|中选|选定结果|选取结果|入围结果|竞价结果)\w{,4}(进行公示|公[示布]如下)|(询价|竞价|遴选)?(成交|中标|中选)(公告|公示)|(成交|中标|中选|选定|选取|入围|询价)结果(如下|公告|公示)|(中标|中选)(供应商|承包商|候选人|入围单位)如下|拟定供应商的情况|((中标|中选)(人|成交)|成交)\w{,3}(信息|情况)[::\s]',
+          '中标信息2': '\s(成交|中标|中选)(信息|日期|时间|总?金额|价格)[::\s]|(成交|中标|中选)价格\s*[\d.,]+(?万?元|(采购|招标|成交|中标|中选|评标)结果|单一来源(采购|招标)?的?(中标|成交|结果)|项目已结束|中标公示 ', # |单一来源采购原因|拟采取单一来源方式采购|单一来源采购公示
           '中标信息3': '(中标|中选|成交|拟定|拟选用|最终选定的?|受让)(供应商|供货商|服务商|机构|企业|公司|单位|候选人|人)(信息[,:]?)?(名称)?[::\s]|[、\s](第一名|(拟定|推荐|入围)?(供应商|供货商)|(中选|中标|供货)单位|中选人)[::\s]|确定[\w()]{6,25}为中标人', # |唯一
-          '中标信息neg': '按项目控制价下浮\d%即为成交价|成交原则|不得确定为(中标|成交)|招标人按下列原则选择中标人|评选成交供应商:|拟邀请供应商|除单一来源采购项目外|单一来源除外|(各.{,5}|尊敬的)(供应商|供货商)[:\s]|竞拍起止时间:|询价结果[\s\n::]*不公开|本项目已具备招标条件|现对该项目进行招标公告|发布\w{2}结果后\d天内送达|本次\w{2}结果不对外公示|供应商\s*资格要求|成交情况:\s*[流废]标|中标单位:本次招标拟?中标单位\d家|通知中标单位|影响(成交|中标)结果',
+          '中标信息neg': '按项目控制价下浮\d%即为成交价|成交原则|不得确定为(中标|成交)|招标人按下列原则选择中标人|评选成交供应商:|拟邀请供应商|除单一来源采购项目外|单一来源除外|(各.{,5}|尊敬的)(供应商|供货商)[:\s]|竞拍起止时间:|询价结果[\s\n::]*不公开|本项目已具备招标条件|现对该项目进行招标公告|发布\w{2}结果后\d天内送达|本次\w{2}结果不对外公示|供应商\s*资格要求|成交情况:\s*[流废]标|中标单位:本次招标拟?中标单位\d家|通知中标单位|影响(成交|中标)结果|确定为成交供应商|(成交|中标|中选)公[告示](发布|\w{,2})后|竞价成交后', # 503076535 按照服务方案的优劣 确定为成交供应商
       # |确定成交供应商[:,\s]
           '合同公告': '合同(公告|公示|信息|内容)|合同(编号|名称|主体|基本情况|完成(日期|时间))|(供应商乙方|乙方供应商):|合同总?金额|履约信息',
           '废标公告': '(终止|中止|废标|流标|流采|失败|作废|异常|撤销)(结果)?(公告|公示|招标|采购|竞价)|(谈判结果为|结果类型):?废标|((本|该)(项目|标段|合同|合同包|采购包|次)\w{,5})((失败|终止|流标|废标)|予以废标|(按|做|作)?(流标|废标|废置)处理)|(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答|项目)(终止|中止|废标|流标|失败|作废|异常|撤销)',
           '废标公告2': '(无效|中止|终止|废标|流标|失败|作废|异常|撤销)的?(原因|理由)|本项目因故取消|本(项目|次)(公开)?\w{2}失败|已终止\s*原因:|(人|人数|供应商|单位)(不足|未达\w{,3}数量)|已终止|不足[3三]家|无(废标)|成交情况:\s*[流废]标|现予以废置',
-          '废标公告neg': '超过此报价将作为[废流]标处理|否则按[废流]标处理|终止规则:|成交规则:|视为流标|竞价失败的一切其他情形'
+          '废标公告neg': '超过此报价将作为[废流]标处理|否则按[废流]标处理|终止规则:|成交规则:|视为流标|竞价失败的一切其他情形|是否废标:否|若不足三家公司参与|供应商数量:?\s*报名供应商不足三家|有效报价不足三家,\s*系统自动废标' # 503076535 供应商数量: 报名供应商不足三家。
       }
       self.title_life_dic = {
           '采购意向': '采购意向|招标意向|选取意向|意向公告|意向公示|意向公开',
@@ -3958,8 +3970,8 @@ class DocChannel():
           '招标答疑': '质疑|澄清|答疑(文件)?|补遗书?|(最高(投标)?限价|控制价|拦标价)(公示|公告|$)',
           '废标公告': '(终止|中止|废标|废除|废置|流标|失败|作废|异常|撤销|撤回|取消成?交?|流拍)(结果|竞价|项目)?的?(公告|公示|$)|(终止|中止)(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|拍卖|招租|交易|出让)|关于废置',
           '合同公告': '(合同(成交|变更)?)(公告|公示|信息|公式|公开|签订)|合同备案|合同书|合同$', # |(履约|验收)(结果)?
-          '候选人公示': '候选人(变更)?公示|评标(结果)?公示|中标前?公示|中标预公示',
-          '中标信息': '(中标|中选|中价|中租|成交|入选|确认)(候选人|人|供应商|记录|结果|变更)?(公告|公示|结果)|未?入围(公示|公告)|(遴选|采购|招标|竞价|议价|比选|询比?价|评选|谈判|邀标|邀请|洽谈|约谈|评标|发包|遴选|交易)\w{,2}结果|单一来源(采购|招标)?的?(中标|成交|结果)|中标通知书|中标$', # |开标(记录|信息|情况)
+          '候选人公示': '候选人(变更)?公示|评标(结果)?公示|中标前?公示|中标预公示|评审结果',
+          '中标信息': '(中标|中选|中价|中租|成交|入选|确认)(候选人|人|供应商|记录|结果|变更)?(公告|公示|结果)|未?入围(公示|公告)|(遴选|采购|招标|竞价|议价|比选|询比?价|评选|谈判|邀标|邀请|洽谈|约谈|评标|发包|遴选|交易)\w{,2}结果|单一来源(采购|招标)?的?(中标|成交|结果)|中标通知书|中标$|项目中标', # |开标(记录|信息|情况)
           '资审结果': '((资格|资质)(审查|预审|后审|审核)|资审)结果(公告|公示)?|(资质|资格)(预审|后审)公示|资审及业绩公示',
           '招标公告': '(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|拍卖|招租|交易|出让)的?(公告|公示|$)|公开(采购|招标|招租|拍卖|挂牌|出让)|(资审|预审|后审)公告',
           '开标记录': '开标记录|截标信息|评委名单公示|开标安排|开标数据表|开标信息|开标情况|开标一览表|开标结果|开标会',
@@ -4254,16 +4266,16 @@ class DocChannel():
       def get_type(title, text):
           if re.search(self.title_type_dic['土地矿产'], title) or re.search(self.type_dic['土地矿产'],
                                                                    text):  # and re.search('(土地|用地|宗地|地块)(经营权)?(流转|承包|出租|招租|租赁|确权)', text)==None
-              if re.search(self.title_type_dic['采招数据'], title + text.strip().split(' ')[0]):
-                  return '采招数据', re.search(self.title_type_dic['采招数据'], title + text.strip().split(' ')[0]).group(0)
+              if re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title):
+                  return '采招数据', re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title).group(0)
               return '土地矿产', (re.search(self.title_type_dic['土地矿产'], title) or re.search(self.type_dic['土地矿产'], text)).group(0)
           elif (re.search(self.title_type_dic['拍卖出让'], title) or re.search(self.type_dic['拍卖出让'], text)):
-              if re.search(self.title_type_dic['采招数据'], title + text.strip().split(' ')[0]):
-                  return '采招数据', re.search(self.title_type_dic['采招数据'], title + text.strip().split(' ')[0]).group(0)
+              if re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title):
+                  return '采招数据', re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title).group(0)
               return '拍卖出让', (re.search(self.title_type_dic['拍卖出让'], title) or re.search(self.type_dic['拍卖出让'], text)).group(0)
           elif re.search(self.title_type_dic['产权交易'], title) or re.search(self.type_dic['产权交易'], text):
-              if re.search(self.title_type_dic['采招数据'], title + text.strip().split(' ')[0]):
-                  return '采招数据', re.search(self.title_type_dic['采招数据'], title + text.strip().split(' ')[0]).group(0)
+              if re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title):
+                  return '采招数据', re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title).group(0)
               return '产权交易', (re.search(self.title_type_dic['产权交易'], title) or re.search(self.type_dic['产权交易'], text)).group(0)
           elif re.search(self.title_type_dic['采招数据'], title) or re.search(self.type_dic['采招数据'], title + text):
               return '采招数据', (
@@ -4363,7 +4375,7 @@ class DocChannel():
           elif '验收合同' in life_kw_title:
               return '验收合同', msc
           elif '候选人公示' in life_kw_title or '候选人公示' in life_list:
-              if '招标公告' in life_kw_title and life_score.get('招标公告', 0) > 3:
+              if '招标公告' in life_kw_title and '候选人公示' not in life_kw_title: # and life_score.get('招标公告', 0) > 3
                   return '招标公告', msc
               elif '废标公告' in life_kw_title or life_score.get('废标公告', 0) > 5:
                   return '废标公告', msc
@@ -4376,8 +4388,7 @@ class DocChannel():
               return '合同公告', msc
 
           elif '中标信息' in life_kw_title or '中标信息' in life_list:
-              if '招标公告' in life_kw_title and life_score.get('招标公告',
-                                                            0) > 2:  # (life_score.get('招标公告', 0)>2 or life_score.get('中标信息', 0)<4) 0.7886409793924245
+              if '招标公告' in life_kw_title and '中标信息' not in life_kw_title and life_score.get('招标公告',0) >= life_score.get('中标信息',0):  # (life_score.get('招标公告', 0)>2 or life_score.get('中标信息', 0)<4) 0.7886409793924245
                   return '招标公告', msc
               elif '废标公告' in life_kw_title or life_score.get('废标公告', 0) > 5:
                   return '废标公告', msc
@@ -4453,7 +4464,8 @@ class DocChannel():
           11、预测预告,原始为意向、招标且标题无预告关键词,返回原始类别
           '''
           if result['docchannel']['docchannel'] in ['中标信息', '合同公告'] and origin_dic.get(
-                  original_docchannel, '') in ['招标公告', '采购意向', '招标预告', '公告变更'] and is_contain_winner(prem_json)==False:
+                  original_docchannel, '') in ['招标公告', '采购意向', '招标预告', '公告变更'] and is_contain_winner(
+              prem_json)==False and re.search(self.title_life_dic['中标信息'], title)==None:
               result['docchannel']['docchannel'] = origin_dic.get(original_docchannel, '')
               msc += '最终规则修改:中标公告、合同公告无中标人且原始为非中标,返回原类型'
           elif result['docchannel']['docchannel'] == '废标公告' and is_contain_winner(prem_json) and re.search(
@@ -4498,10 +4510,16 @@ class DocChannel():
 
           if result['docchannel']['doctype'] in ['产权交易', '土地矿产', '拍卖出让'] and origin_dic.get(
                   original_docchannel, '') not in ['产权交易', '土地矿产', '拍卖出让'] \
-                and re.search('产权|转让|受让|招租|招商|出租|承租|竞价|资产|挂牌|出让|拍卖|招拍|划拨|销售', title) == None\
-                and re.search('(采购|招投?标|投标)(信息|内容|项目|公告|数量|人|单位|方式)|(建设|工程|服务|施工|监理|勘察|设计)项目', text):
+                and (re.search(self.title_type_dic['采招数据'], title) or re.search('工程|服务|采购|询价|磋商', title) or re.search('(采购|招投?标|投标)(信息|内容|项目|公告|数量|人|单位|方式)|(建设|工程|服务|施工|监理|勘察|设计)项目|(%s)'%self.type_dic['采招数据'], text)):
               result['docchannel']['doctype'] = '采招数据'
               msc += ' 最终规则修改:预测为非采招数据,原始为采招数据且有招标关键词,返回采招数据'
+          elif result['docchannel']['doctype'] in ['土地矿产'] and origin_dic.get(original_docchannel, '') in ['拍卖出让', '产权交易']:
+              if origin_dic.get(original_docchannel, '') in ['拍卖出让'] and (re.search(self.title_type_dic['拍卖出让'], title) or re.search(self.type_dic['拍卖出让'], text)):
+                  result['docchannel']['doctype'] = '拍卖出让'
+                  msc += "最终规则修改:预测为土地矿产原始为拍卖且有拍卖关键词,返回拍卖"
+              elif (re.search(self.title_type_dic['产权交易'], title) or re.search(self.type_dic['产权交易'], text)):
+                  result['docchannel']['doctype'] = '产权交易'
+                  msc += "最终规则修改:预测为土地矿产原始为产权交易且有产权交易关键词,返回产权交易"
 
           '''下面是新格式增加返回字段'''
           if result['docchannel']['docchannel'] != '':  # 预测到生命周期的复制到life_docchannel,否则用数据源结果
@@ -4548,6 +4566,8 @@ class DocChannel():
           return {'docchannel': {'docchannel': '', 'doctype': not_extract_dic[original_docchannel], 'life_docchannel': origin_dic.get(original_docchannel, '原始类别')}}, '公告类别不在提取范围'
       if web_source_no in ['02104-7', '04733', 'DX007628-6']: # 这些数据源无法识别
           return {'docchannel': {'docchannel': '', 'doctype': '采招数据', 'life_docchannel': origin_dic.get(original_docchannel, '原始类别')}}, '此数据源公告分类不明确,返回数据源类别'
+      if original_docchannel == 303:
+          return {'docchannel': {'docchannel': '处罚公告', 'doctype': '处罚公告', 'life_docchannel': '处罚公告'}}, "源类别为处罚公告"
 
       title = re.sub('[^\u4e00-\u9fa5]+|出租车', '', title)
       if len(title) > 50:
@@ -4567,9 +4587,9 @@ class DocChannel():
       # print('channel正则预测结果:', result)
       msc = '正则结果:类型:%s, 关键词:%s, 周期:%s, 关键词:%s'%(doc_type, type_kw,doc_life, life_kw)+'\n'+'模型结果:'
       # print('类型:%s, 关键词:%s, 周期:%s, 关键词:%s'%(doc_type, type_kw,doc_life, life_kw))
-      if doc_type == "" or doc_life == "":
+      if doc_type == "" or doc_life == "" or (doc_type != '采招数据' and origin_dic.get(original_docchannel, '原始类别') in ['招标公告', '中标信息', '招标预告', '采购意向']):
           array_content, array_title, text_len, title_len, content = get_model_inputs(list_sentence)
-          if doc_type =="":
+          if  doc_type =="" or (doc_type != '采招数据' and origin_dic.get(original_docchannel, '原始类别') in ['招标公告', '中标信息', '招标预告', '采购意向']):
               type_id, type_prob = type_model_predict()
               type_model = self.id2type[type_id]
               if type_model == '新闻资讯' and doc_life!='': # 修复bug 78584245 "docchannel": "合同公告", "doctype": "新闻资讯",
@@ -4583,9 +4603,10 @@ class DocChannel():
           if doc_life=="" and result['docchannel']['doctype'] not in ['', '新闻资讯']:
               if len(text)>150 and re.search(self.kws, content):
                   life_id, life_prob = life_model_predict()
-                  life_model = self.id2life[life_id]
-                  result['docchannel']['docchannel'] = life_model
-                  msc += life_model + ' 概率:%.4f;\n'%life_prob
+                  if life_prob>=0.8:
+                      life_model = self.id2life[life_id]
+                      result['docchannel']['docchannel'] = life_model
+                      msc += life_model + ' 概率:%.4f;\n'%life_prob
 
       msc = final_change(msc)
       # print('channel ', msc)
@@ -5289,8 +5310,8 @@ class IndustryPredictor():
         text = text.replace('(', '(').replace(')', ')')
         text = re.sub(
             '(废标|终止|综?合?评审|评标|开标|资审|履约|验收|成交|中标人?|中选人?|单一来源|合同|候选人|结果|变更|更正|答疑|澄清|意向|需求|采购|招标|询比?价|磋商|谈判|比选|比价|竞价|议价)的?(公告|预告|公示)?|关于为?|选取|定点|直接|邀请函?|通知书?|备案|公开|公示|公告|记录|竞争性',
-            '', text)
-        text = text.replace(tenderee, '')
+            ' ', text)
+        text = text.replace(tenderee, ' ')
         text = ' ' if text=="" else text
         words_docs_list = selffool.cut(text)
         words_docs_list = [[it for it in l if re.search('^[\u4e00-\u9fa5]+$', it)][-maxSententLen:] for l in words_docs_list]
@@ -5323,6 +5344,7 @@ class IndustryPredictor():
         product = product if product else ''
 
         text_ind = (doctitle + project_name + product).replace(tenderee, '')
+        text_ind = text_ind.replace('墙面粉刷', '墙面 粉刷')
         text_com = win_tenderer
 
         length_ind_text = len(text_ind) + 1
@@ -5878,15 +5900,54 @@ class DistrictPredictor():
                     name, b, e = it
                     area_list.append((name, (e - b + e) / max_len / 2))
             return area_list
+
+        def find_whole_areas(text):
+            '''
+            通过正则匹配字符串返回地址
+            :param pettern: 地址正则 广东省|广西省|...
+            :param text: 待匹配文本
+            :return:
+            '''
+            pettern = "((?P<prov>%s)(?P<city>%s)?(?P<dist>%s)?)|((?P<city1>%s)(?P<dist1>%s)?)|(?P<dist2>%s)" % (
+            p_pro, p_city, p_dis, p_city, p_dis, p_dis)
+            province_l, city_l, district_l = [], [], []
+            for it in re.finditer(pettern, text):
+                if re.search('[省市区县旗盟]$', it.group(0)) == None and re.search(
+                        '^([东南西北中一二三四五六七八九十大小]?(村|镇|街|路|道|社区)|酒店|宾馆)', text[it.end():]):
+                    continue
+                if it.group(0) == '站前':  # 20240314 修复类似 中铁二局新建沪苏湖铁路工程站前VI标项目 错识别为 省份:辽宁, 城市:营口,区县:站前
+                    continue
+                for k, v in it.groupdict().items():
+                    if v != None:
+                        if k in ['prov']:
+                            province_l.append((it.group(k), it.start(k), it.end(k)))
+                        elif k in ['city', 'city1']:
+                            if re.search('^(经济开发区|开发区|新区)', text[it.end(k):]):  # 城市不匹配为区的地址 修复 滨州北海经济开发区 北海新区 等提取为北海
+                                continue
+                            city_l.append((it.group(k), it.start(k), it.end(k)))
+                            if re.search('^([分支](公司|局|行|校|院|干?线)|\w{,3}段|地铁|(火车|高铁)?站|\w{,3}项目)', text[it.end(k):]):
+                                city_l.append((it.group(k), it.start(k), it.end(k)))
+                        elif k in ['dist', 'dist1', 'dist2']:
+                            if it.group(k)=='昌江' and '景德镇' not in it.group(0):
+                                district_l.append(('昌江黎族', it.start(k), it.end(k)))
+                            else:
+                                district_l.append((it.group(k), it.start(k), it.end(k)))
+            return province_l, city_l, district_l
+
         def get_pro_city_dis_score(text, text_weight=1):
             text = re.sub('复合肥|海南岛|兴业银行|双河口|阳光|杭州湾', ' ', text)
             text = re.sub('珠海城市', '珠海', text)  # 修复 426624023 珠海城市 预测为海城市
             text = re.sub('怒江州', '怒江傈僳族自治州', text)  # 修复 423589589  所属地域:怒江州 识别为广西 - 崇左 - 江州
             text = re.sub('茂名滨海新区', '茂名市', text)
             text = re.sub('中山([东南西][部区环]|黄圃|南头|东凤|小榄|石岐|翠亨|南朗)', '中山市', text)
-            province_l = find_areas(p_pro, text)
-            city_l = find_areas(p_city, text)
-            district_l = find_areas(p_dis, text)
+            ser = re.search('海南(昌江|白沙|乐东|陵水|保亭|琼中)(黎族)?', text)
+            if ser and '黎族' not in ser.group(0):
+                text = text.replace(ser.group(0), ser.group(0)+'黎族')
+            # province_l = find_areas(p_pro, text)
+            # city_l = find_areas(p_city, text)
+            # district_l = find_areas(p_dis, text)
+
+            province_l, city_l, district_l = find_whole_areas(text) # 20240703 优化地址提取,解决类似 海南昌江 得到 海南 南昌 结果
 
             if len(province_l) == len(city_l) == 0:
                 district_l = [it for it in district_l if
@@ -6066,8 +6127,11 @@ class DistrictPredictor():
 
         def get_project_addr(text):
             p1 = '(项目|施工|实施|建设|工程|服务|交货|送货|收货|展示|看样|拍卖)(地址|地点|位置|所在地区?)(位于)?:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
+            p2 = '项目位于(?P<addr>\w{2}市\w{2,4}区)'
             if re.search(p1, text):
                 return re.search(p1, text).group('addr')
+            elif re.search(p2, text):
+                return re.search(p2, text).group('addr')
             else:
                 return ''
 
@@ -6722,15 +6786,16 @@ class CandidateExtractor(object):
             'project_code': "(项目|招标|采购|计划|公告|包[段组件]|标[段包的]|标段(包)|分[包标])(编号|编码)",
             "project_name": "(包[段组件]|标[段包的项]|标段(包)|分[包标]|采购|项目|工程|货物|商品|产品|设备|通用|主要标的|^包)(名称?|内容)",
             "win_sort": "排名|排序|名次|推荐顺序",
-            'win_or_not': '是否中标|是否入围|是否入库|入围结论',
-            "candidate": "((候选|入围|入选|投标)(供应商库)?的?(人|人?单位|机构|供应商|供货商|服务商|投标人|(中标)?公司|(中标)?企业)|(通过)?名单|中标候选人)(名称|名单|全称|\d)?$|^供应商(名称|信息)?$",
+            'win_or_not': '是否(建议|推荐)?(中标|成交)|是否入围|是否入库|入围结论',
+            "candidate": "((候选|入围|入选|投标)(供应商库)?的?(人|人?单位|机构|供应商|供货商|服务商|投标人|(中标)?公司|(中标)?企业)|(通过)?名单|中标候选人)(名称|名单|全称|\d)?$|^供应商(名称|信息)?$|投标个人/单位", #补充 368295593 投标个人/单位 提取
             "bid_amount": "投标[报总]?价|报价(总?金额|总价|总额)|总报价|^\w{,5}报价(([\w、/]{1,15}))?$|(中标|成交|合同))?([金总]额|[报均总]价|价[格款]?)|承包价|含税价|经评审的价格",
             "win_tenderer": "第一名|第一(中标|成交)?候选人",
             "second_tenderer": "第二名|第二(中标|成交)?候选人",
             "third_tenderer": "第三名|第三(中标|成交)?候选人",
         }
         '''非表格候选人正则'''
-        self.p = '((候选|入围|入选|投标)(供应商库)?的?(人|人?单位|机构|供应商|供货商|服务商|投标人|(中标)?公司|(中标)?企业)|(通过)?名单)(名称|名单|全称|\d)?:$'
+        # self.p = '((候选|入围|入选|投标)(供应商库)?的?(人|人?单位|机构|供应商|供货商|服务商|投标人|(中标)?公司|(中标)?企业|应答人)|(通过)?名单)(名称|名单|全称|\d)?:$'
+        self.p = '((候选|入围|入选|投标|报价|成交|中标|中选|供[货应]|应答)(人|方|人?单位|机构|厂?商|商家|服务商|公司|企业)|(通过|入围)名单)(名称|名单|全称|\d)?:?$'
         self.tb = TableTag2List()
         with open(os.path.dirname(__file__)+'/header_set.pkl', 'rb') as f:
             self.headerset = pickle.load(f)
@@ -6869,7 +6934,10 @@ class CandidateExtractor(object):
             package = uniform_package_name(package) if package !="" else "Project"
             if candidate:
                 if win_or_not and re.search('否|未入围', win_or_not):
-                    pass
+                    candidate_set.add(candidate)
+                elif re.search('^((建议|推荐)(中标|成交)|是)$', win_or_not) and win_sort in ['', '参与投标单位及排名'] and win_tenderer=='':
+                    win_sort = '第一名'
+                    candidate_set.add(candidate)
                 else:
                     candidate_set.add(candidate)
 
@@ -6884,8 +6952,7 @@ class CandidateExtractor(object):
                             if type not in role_dic:
                                 role_dic[type] = dict()
                             role_dic[type]['role_text'] = text
-                            if type in ['second_tenderer', 'third_tenderer']:
-                                candidate_set.add(text)
+                            candidate_set.add(text)
 
                 elif re.search('投标报价|报价$', df.loc[i, 0]) or re.search('投标报价|报价$', df.loc[i, 1]):
                     findmoney = True
@@ -7042,7 +7109,7 @@ class CandidateExtractor(object):
                 e = ent.wordOffset_end
                 if ent.label in [2,3,4]: # 直接加实体预测的候选人, 否则规则检查是否为候选人
                     candidates.add(ent.entity_text)
-                elif isinstance(b, int) and isinstance(e, int):
+                elif isinstance(b, int) and isinstance(e, int) and ent.label in [5]:
                     foreword = text[max(0, b - 10):b]
                     if re.search(self.p, foreword):
                         candidates.add(ent.entity_text)
@@ -7065,8 +7132,8 @@ class CandidateExtractor(object):
         if prem == {} and richText:
             prem, candidate_set = self.get_prem(richText)
             in_attachment = True
-        if prem == {} and candidate_set == set():
-            candidate_set = self.get_candidates_from_text(list_sentences, list_entitys)
+        candidate_set2 = self.get_candidates_from_text(list_sentences, list_entitys)
+        candidate_set.update(candidate_set2)
         return prem, {'candidate': ','.join(candidate_set)}, in_attachment
 
 def role_special_predictor(web_source_name, content, nlp_enterprise):
@@ -7150,15 +7217,15 @@ class ApprovalPredictor():
         项目(法人)单位
         '''
         self.other_part = {
-            "project_name": "(项目|工程|采购|招标|计划)名称?:(?P<main>[^:。]{5,50})[,。](\w{2,10}:|$)?", # 项目名称
-            "project_code": "(立案号|项目(统一)?代码|(项目|工程|采购|招标|计划|任务|备案)(编[号码]|号)):(?P<main>(\w{2,8})?[()〔〕【】\[\]a-zA-Z0-9-]{5,30}号?)(\w{2,10}:|$)?", # 项目编号
-            "doc_num": "((审[批查核]|批[复准]|立项|[定知]书|[公发批]文|用地|决定|备案|核准|许可|确认)[文编]?号|综合受理号|文书号):(?P<main>(\w{2,8})?[()〔〕【】\[\]a-zA-Z0-9-.]{5,30}号?)[,。]?(\w{2,10}:|$)?", # 文号
-            "pro_type": "(申[报请](类型|种类)|项目所属行业|行业(分类|归属)|产业领域|项目行业|项目类型|立项类型):(?P<main>[^:。]{2,30})[,。](\w{2,10}:|$)?", # 项目类型
-            "year_limit": "((建设|工程|服务|项目)(年限|期限|时长)):(?P<main>[\d个年月日.-]{2,20})[,。](\w{2,10}:|$)?", # 建设年限
-            "construction_scale": "(建设内容[及和](建设)?规模|建设规模[及和](主要)?(建设)?内容|(建设|工程|项目)规模(如下)?):(?P<main>[^:。]{2,50})[,。](\w{2,10}:|$)?", # 建设规模
-            "approval_items": "((审[批查核]|批[复准]申请)(事项|内容)|事项名称|事项审批):(?P<main>[^:。]{2,50})[,。](\w{2,10}:|$)?", # 审批事项
+            "project_name": "((项目|工程|采购|招标|计划|建设|规划)名称?|生产建设项目|申请项目):(?P<main>[^:。]{5,50})[,。](\w{2,10}:|$)?", # 项目名称
+            "project_code": "(立案号|项目(统一)?代码|(项目|工程|采购|招标|计划|任务|备案|索引)(编[号码]|号)):?(?P<main>(\w{2,8})?[()〔〕【】\[\]a-zA-Z0-9-]{5,30}号?)(\w{2,10}:|$)?", # 项目编号
+            "doc_num": "((审[批查核]|批[复准]|立项|[定知]书|[公发批]文|用地|决定|备案|核准|许可|确认|受理|申请报告|文件|意见书|办件)[文编]?号|综合受理号|文书?号|合格书号):?(?P<main>(\w{2,8})?[()〔〕【】\[\]a-zA-Z0-9-.]{5,30}号?)[,。]?(\w{2,10}:|$)?", # 文号
+            "pro_type": "((申[报请]|审核备|项目|立项)(类型|种类)|项目所属行业|行业(分类|归属)|产业领域|项目行业):(?P<main>[^:。]{2,30})[,。](\w{2,10}:|$)?", # 项目类型
+            "year_limit": "((建设|工程|服务|项目)(起止|\w{,2})?(年限|期限|时长|工期)):(约|超过|大概|建设工期|共计|合计)?(?P<main>[\d一二三四五六七八九十]+个月|\d{1,3}(日?历?天|小时)|20\d{2}[年/-](\d{1,2}[月/-]?)?(\d{1,2}日?)?([至—-]+20\d{2}[年/-](\d{1,2}[月/-]?)?(\d{1,2}日?)?)?)[(,。](\w{2,10}:|$)?", # 建设年限
+            "construction_scale": "(建设内容[及和](建设)?规模|建设规模[及和](主要)?(建设)?内容|(建设|招标|采购))?内容|(建设|工程|项目)(主要)?(规模|内容|概况|面积)([及和](主要)?(规模|内容|概况|面积))?(如下)?):(?P<main>[^:。]{2,250})[,。](\w{2,10}:|$)?", # 建设规模
+            "approval_items": "((审[批查核]|批[复准]|申请|监管)(事项|内容|名称)|事项名称|事项审批):(?P<main>[^:。]{2,70})[,。](\w{2,10}:|$)?", # 审批事项
             "properties": "((建设|工程|项目)性质):(?P<main>[^:。]{2,50})[,。](\w{2,10}:|$)?", # 建设性质
-            "approval_result": "((审[批查核]|批[复准])(结果|决定|结论|状态|回复)|(办理|,)(状态|意见|结果)):(?P<main>[^:。]{2,50})[,。](\w{2,10}:|$)?", # 审批结果
+            "approval_result": "((审[批查核]|批[复准]|核[发准]|许可|抽查|备案)(结果|决定|结论|状态|回复|意见)|(办[理件]|,)(状态|意见|结果)|项目(当前|目前)?状态):(?P<main>[^:。]{2,20})[,。](\w{2,10}:|$)?", # 审批结果
             "phone": "(联系)?电话:(?P<main>1[3-9][0-9][-—-―]?\d{4}[-—-―]?\d{4}|" # 联系电话
                      '\+86.?1[3-9]\d{9}|'
                      '0[1-9]\d{1,2}[-—-―][2-9]\d{6}\d?[-—-―]\d{1,4}|'
@@ -7172,22 +7239,26 @@ class ApprovalPredictor():
         }
 
         self.role_type = {
-            "declare_company": "(申[请报]|填报|呈报)(部门|机关|单位|企业|公司|机构|组织)",  # 申报单位
-            "construct_company": "(业主|建设|用地|委托|发包|产权|项目))?(部门|机关|单位|企业|公司|方)|主送机关|法人单位|甲方",  # 建设单位
-            "approver": "(审[批查核]|许可|批准|发证|批复|管理)(部门|机关|单位|企业|公司|机构)",  # 审批部门
-            "evaluation_agency": "(环境|环保)?(影响)?(环评|评价|评估)(机构|单位|公司)"  # 环评机构
+            "declare_company": "(申[请报]|填报|呈报)(人|部门|机关|单位|企业|公司|机构|组织)",  # 申报单位
+            "construct_company": "(业主|建设|用地|委托|发包|产权|项目))?(部门|机关|单位|企业|公司|方|业主)|主送机关|法人单位|甲方",  # 建设单位
+            "approver": "(审[批查核议图]|许可|批[复准](用地)?|发证|管理|办理|受理|核[发准]|备案|承办)(部门|机关|单位|企业|公司|机构)|实施主体",  # 审批部门
+            "evaluation_agency": "(环境|环保)?(影响)?(环评|评价|评估)(机构|单位|公司)" , # 环评机构
+            "compilation_unit": "编制单位", # 编制单位 20240701加
+            "publisher": "(发布|发文|公示|公告)(人|部门|机关|单位|企业|公司|机构|组织)" # 发布机构 20240703加
         }
         self.person_type = {
             "legal_person": "项目法人|法定代表人|企业法人"  # 项目法人
         }
         self.date_type = {
             "time_declare": "(申[请报]|填报|呈报)(时间|日期)", # 申报时间
-            "time_commencement": "(开工|动工|施工开始)(时间|日期)", # 开工时间
-            "time_completion": "(竣工|完工|验收|(项目|建设|工程)(完成|结束))(备案)?(时间|日期)" # 竣工时间
+            "time_commencement": "(开工|动工|(项目|建设|工程|施工)开始)(时间|日期)", # 开工时间
+            "time_completion": "(竣工|完工|验收|(项目|建设|工程|施工)(完成|结束))(备案)?(时间|日期)", # 竣工时间
+            "time_approval": "(审[批查核查议]|许可|批[复准](用地)?|发证|管理|办理|受理|核[发准]|备案|决定)(时间|日期)", # 审批时间 20240701加
+            "time_release": "(发布|发文|公告|生成|成文)(时间|日期)" # 发布时间
         }
 
         self.addr_type = {
-            "project_addr": "(建设|工程|项目|施工)(地址|地点|位置|所在地)|[宗土]地坐落|用地位置" # 建设地址
+            "project_addr": "(建设|工程|项目|施工|地块|用地)\w{,2}(地址|地点|位置|所在地)|[宗土]地坐落" # 建设地址
         }
 
         self.money_type = {
@@ -7203,6 +7274,7 @@ class ApprovalPredictor():
         rs_l = []
         found_key = 0
         code_name_set = set() # 项目编号、名称集合
+        org_set = set() # 保存可能为审批部门的角色
         for entity in list_entitys[0]:
             entities[entity.sentence_index].append(entity)
 
@@ -7214,12 +7286,16 @@ class ApprovalPredictor():
             for entity in entities[i]:
                 b, e = entity.wordOffset_begin, entity.wordOffset_end
                 if entity.entity_type in ['org', 'company']:
+                    flag = 1
                     for k, v in self.role_type.items():
                         if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
                             if rs_dic[k] == '':
                                 rs_dic[k] = entity.entity_text
                             multi_project[k] = entity.entity_text
                             found_key = 1
+                            flag = 0
+                    if flag and entity.entity_type == "org" and re.search('(局|委员会|委|厅)$', entity.entity_text):
+                        org_set.add(entity.entity_text)
                 elif entity.entity_type in ['person']:
                     for k, v in self.person_type.items():
                         if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
@@ -7231,9 +7307,12 @@ class ApprovalPredictor():
                 elif entity.entity_type in ['time']:
                     for k, v in self.date_type.items():
                         if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
+                            time = timeFormat(entity.entity_text, default_first_day=False) if k in ['time_completion'] else timeFormat(entity.entity_text)
+                            if time == "":
+                                continue
                             if rs_dic[k] == '':
-                                rs_dic[k] = entity.entity_text
-                            multi_project[k] = entity.entity_text
+                                rs_dic[k] = time
+                            multi_project[k] = time
                             found_key = 1
                 elif entity.entity_type in ['location']:
                     for k, v in self.addr_type.items():
@@ -7275,6 +7354,16 @@ class ApprovalPredictor():
                     multi_project[k] = iter.group('main')
                     found_key = 1
                     break
+            for k, v in self.date_type.items():
+                for iter in re.finditer(v+':?(?P<main>20\d{2}-\d{1,2}(-\d{1,2})?|20\d{2}/\d{1,2}(/\d{1,2})?|20\d{2}\.\d{1,2}(\.\d{1,2})?|20\d{2}(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])?)', text): # 规则补充实体识别不到的日期时间
+                    time = timeFormat(iter.group('main'), default_first_day=False) if k in ['time_completion'] else timeFormat(iter.group('main'))
+                    if time == "":
+                        continue
+                    if rs_dic[k] == '':
+                        rs_dic[k] = time
+                    multi_project[k] = time
+                    found_key = 1
+                    break
             if (multi_project['project_code'] != "" or multi_project['project_name'] != "") and multi_project['project_code']+multi_project['project_name'] not in code_name_set:
                 code_name_set.add(multi_project['project_code']+multi_project['project_name'])
                 district = getPredictor('district').get_area(
@@ -7296,6 +7385,8 @@ class ApprovalPredictor():
                 rs_dic['province'] = district['district']['province']
                 rs_dic['city'] = district['district']['city']
                 rs_dic['district'] = district['district']['district']
+            if len(org_set) == 1 and rs_dic['approver'] == "":
+                rs_dic['approver'] == org_set.pop()
             rs_dic = {k: v for k, v in rs_dic.items() if v != ''}
             return [rs_dic]
         return []

+ 10 - 6
BiddingKG/dl/proposed_building/pb_extract.py

@@ -133,11 +133,6 @@ class PBPredictor:
                     has_stage = 0
 
                 pb_json = {
-                    'tenderee': tenderee,
-                    'agency': agency,
-                    'project_code': project_code,
-                    'project_name': project_name,
-                    'doctitle': doctitle,
                     'stage': stage,
                     'industry': industry,
                     'proportion': proportion,
@@ -160,12 +155,21 @@ class PBPredictor:
                     'has_stage': has_stage,
                 }
 
+                # 值为None的key删掉
+                delete_keys = []
+                for key in pb_json.keys():
+                    if pb_json.get(key) in [None, "", 0.0, 0]:
+                        delete_keys.append(key)
+                for key in delete_keys:
+                    if key in pb_json.keys():
+                        pb_json.pop(key)
+
                 pb_json = {'pb': pb_json}
                 return pb_json
 
         except:
             traceback.print_exc()
-            return {'pb': 'error'}
+            return {'pb': {}}
 
 
 def extract_legal_stage(content, _pattern, priority_dict, product='', tenderee='', agency=''):

+ 2 - 1
BiddingKG/run_extract_server.py

@@ -81,11 +81,12 @@ def run_thread(data,list_result):
     web_source_no = data.get("web_source_no","")
     web_source_name = data.get("web_source_name","")
     original_docchannel = data.get("original_docchannel","")
+    page_attachments = data.get("page_attachments","")
     # print("web_source_name:",web_source_name)
     is_fail = False
     try:
         if _content!="":
-            data_res  = predict(_doc_id,_content,_title,_page_time,web_source_no=web_source_no,web_source_name=web_source_name,original_docchannel=original_docchannel)
+            data_res  = predict(_doc_id,_content,_title,_page_time,web_source_no=web_source_no,web_source_name=web_source_name,original_docchannel=original_docchannel,page_attachments=page_attachments)
         else:
             data_res = json.dumps({"success":False,"msg":"content not passed"})
             # is_fail = True