Pārlūkot izejas kodu

优化产品配置提取,完善缺失和冗余问题,预估提取率和准确率能达到80%以上

luojiehua 1 gadu atpakaļ
vecāks
revīzija
dd72c9a29a

+ 30 - 0
BaseDataMaintenance/embedding_services.py

@@ -0,0 +1,30 @@
+
+
+import os
+
+os.environ["CUDA_VISIBLE_DEVICES"] = "1"
+
+from flask import Flask,request,jsonify
+app = Flask(__name__)
+
+from BaseDataMaintenance.common.sentencesUtil import *
+
+@app.route("/embedding",methods=["POST"])
+def embedding():
+    _r = {"success": True}
+    try:
+        sentence = request.json.get("sentence","")
+        vector = get_normalized_vector(sentence)
+        _r["vector"] = vector
+    except Exception as e:
+        _r["success"] = False
+
+
+    return jsonify(_r)
+
+## 启动方法
+## 将此文件放在与BaseDataMaintenance同级下
+## nohup /data/anaconda3/envs/py37/bin/gunicorn -w 1 --limit-request-fields 0 --limit-request-line 0 -t 1000 --keep-alive 600 -b 0.0.0.0:17130 embedding_services:app >> embedding.log &
+
+if __name__ == "__main__":
+    app.run(host="0.0.0.0",port="15010",debug=True)

+ 1 - 16
BaseDataMaintenance/fixDoc_to_queue_extract.py

@@ -8,19 +8,4 @@ from BaseDataMaintenance.maintenance.dataflow_mq import fixDoc_to_queue_extract,
 
 if __name__ == '__main__':
     # fixDoc_to_queue_extract()
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-13.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-12.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-11.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-10.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-09.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-08.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-07.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-06.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-05.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-04.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-03.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-02.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-01.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-07-31.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-07-30.xlsx")
-    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-07-29.xlsx")
+    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-30.xlsx")

+ 163 - 102
BaseDataMaintenance/maintenance/product/htmlparser.py

@@ -2,15 +2,22 @@
 
 import re
 
+from BaseDataMaintenance.maintenance.product.productUtils import *
 import logging
 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
 
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
 
 
 from bs4 import BeautifulSoup
 import copy
 
-from BaseDataMaintenance.maintenance.product.productUtils import *
+end_pattern = "商务要求|评分标准|商务条件|商务条件"
+_param_pattern = "(产品|技术|清单[及和]?|配置|参数|具体|明细[及和]?|项目|货物|服务)(指标|配置|要求|参数|需求|规格)|配置清单|(质量|技术).{,10}要求|验收标准|^参数$"
+meter_pattern = "角度|容积|色彩|帧率|磁场|强度|允差|噪音|材质|频率|阻抗|浓度|范围|误差|精确|温度|可调|设定值|功能|检测|高度|宽度|模式|尺寸|重量|峰值|容量|寿命|稳定性|高温|电源|电压|功率|压力|压强"
 
 def getTrs(tbody):
     #获取所有的tr
@@ -130,12 +137,6 @@ class ParseDocument():
 
 
         # #识别目录树
-        # for _page in self.childs:
-        #     print("%d============"%_page.page_no)
-        #     for _sentence in _page.childs:
-        #         print(_sentence)
-        #     print("%d================"%_page.page_no)
-        #
         # if self.parseTree:
         #     self.parseTree.printParseTree()
 
@@ -152,26 +153,26 @@ class ParseDocument():
             if _id in self.set_tree_id:
                 continue
             self.set_tree_id.add(_id)
-            print(append,t["text"][:50],t["sentence_title"],t["title_before"],t["title_after"])
+            logger.debug("%s %s %s %s %s"%(append,t["text"][:50],t["sentence_title"],t["title_before"],t["title_after"]))
             childs = t["child_title"]
-            self.print_tree(childs,append=append+"  ")
+            self.print_tree(childs,append=append+"-|")
 
     def is_title_first(self,title):
         if title in ("一","1","Ⅰ","a","A"):
             return True
         return False
 
-    def find_title_by_pattern(self,_text,_pattern="(^|★|▲|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册包部\.::]))|" \
+    def find_title_by_pattern(self,_text,_pattern="(^|★|▲|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册包部\.::]))|" \
                                              "([\s★▲\*]*)(?P<title_3>(?P<title_3_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>))|" \
                                              "([\s★▲\*]*)(?P<title_4>(?P<title_4_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]))|" \
                                              "([\s★▲\*]*)(?P<title_5>(?P<title_5_index_0_0>^)(?P<title_5_index_1_1>[一二三四五六七八九十]+)(?P<title_5_index_2_0>)[^一二三四五六七八九十节章册部\.::、、])|" \
                                              "([\s★▲\*]*)(?P<title_12>(?P<title_12_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_12_index_1_1>\d{1,2})(?P<title_12_index_2_0>[\..、\s\-]?))|"\
                                              "([\s★▲\*]*)(?P<title_11>(?P<title_11_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
                                              "([\s★▲\*]*)(?P<title_10>(?P<title_10_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
-                                             "([\s★▲\*]*)(?P<title_7>(?P<title_7_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..包::、\s\-]?))|" \
-                                             "([\s★▲\*]*)(?P<title_6>(?P<title_6_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-包]?))|" \
-                                             "([\s★▲\*]*)(?P<title_15>(?P<title_15_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>[))]))|" \
-                                             "([\s★▲\*]*)(?P<title_17>(?P<title_17_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_17_index_1_1>[a-wA-W]+)(?P<title_17_index_2_0>[))]))|" \
+                                             "([\s★▲\*]*)(?P<title_7>(?P<title_7_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..包::、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_6>(?P<title_6_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-包]?))|" \
+                                             "([\s★▲\*]*)(?P<title_15>(?P<title_15_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>[))包标]))|" \
+                                             "([\s★▲\*]*)(?P<title_17>(?P<title_17_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_17_index_1_1>[a-wA-W]+)(?P<title_17_index_2_0>[))包标]))|" \
                                              "([\s★▲\*]*)(?P<title_19>(?P<title_19_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>[))]))" \
                               ):
         _se = re.search(_pattern,_text)
@@ -221,7 +222,6 @@ class ParseDocument():
                         return "一百"
                     else:
                         _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title[0]))
-                        print("=_next_title",_next_title)
                         return _next_title+"十"
 
             _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title))
@@ -257,6 +257,8 @@ class ParseDocument():
 
     def count_title_before(self,list_obj):
         dict_before = {}
+        dict_sentence_count = {}
+        illegal_sentence = set()
         for obj_i in range(len(list_obj)):
             obj = list_obj[obj_i]
             _type = "sentence"
@@ -268,6 +270,16 @@ class ParseDocument():
 
 
             if _type=="sentence":
+                if len(_text)>10 and len(_text)<100:
+                    if _text not in dict_sentence_count:
+                        dict_sentence_count[_text] = 0
+                    dict_sentence_count[_text] += 1
+                    if re.search("\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+                elif len(_text)<10:
+                    if re.search("第\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+
                 sentence_groups = self.find_title_by_pattern(_text[:10])
                 if sentence_groups:
                     # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups)
@@ -280,7 +292,11 @@ class ParseDocument():
                     if title_before not in dict_before:
                         dict_before[title_before] = 0
                     dict_before[title_before] += 1
-        return dict_before
+
+        for k,v in dict_sentence_count.items():
+            if v>10:
+                illegal_sentence.add(k)
+        return dict_before,illegal_sentence
 
     def is_page_no(self,sentence):
         if len(sentence)<10:
@@ -309,21 +325,21 @@ class ParseDocument():
         else:
             max_length = 40
 
-        print("max_length",max_length)
+        logger.debug("%s:%d"%("max_length",max_length))
 
 
         list_data = []
         last_table_index = None
         last_table_columns = None
         last_table = None
-        dict_before = self.count_title_before(list_obj)
+        dict_before,illegal_sentence = self.count_title_before(list_obj)
         for obj_i in range(len(list_obj)):
             obj = list_obj[obj_i]
             _type = "sentence"
             _text = standard_product(obj.text)
             if obj.name=="table":
                 _type = "table"
-                _text = str(obj)
+                _text = standard_product(str(obj))
             _append = False
             sentence_title = None
             sentence_title_text = None
@@ -340,10 +356,9 @@ class ParseDocument():
             block = False
 
             if _type=="sentence":
-                if self.is_page_no(_text):
+                if _text in illegal_sentence:
                     continue
 
-
                 _fix = False
                 for p in products:
                     if re.sub("^(\d[.、]?)+","",_text.strip())==p:
@@ -359,7 +374,6 @@ class ParseDocument():
                 if not _fix:
                     sentence_groups = self.find_title_by_pattern(_text[:10])
                     if sentence_groups:
-                        # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups)
                         title_before = standard_title_context(sentence_groups[1][1])
                         if title_before in dict_before and dict_before[title_before]>1:
                             sentence_title = sentence_groups[0][0]
@@ -488,7 +502,7 @@ class ParseDocument():
                                     _d["title_next"] = _data
                                     if len(_d["child_title"])>0:
                                         _d["child_title"][-1]["title_next"] = ""
-                                        self.block_tree(_d["child_title"])
+                                        # self.block_tree(_d["child_title"])
                                     if _d["parent_title"] is not None:
                                         _d["parent_title"]["child_title"].append(_data)
                                     _find = True
@@ -502,14 +516,13 @@ class ParseDocument():
                                     _d["title_next"] = _data
                                     if len(_d["child_title"])>0:
                                         _d["child_title"][-1]["title_next"] = ""
-                                        self.block_tree(_d["child_title"])
+                                        # self.block_tree(_d["child_title"])
                                     if _d["parent_title"] is not None:
                                         _d["parent_title"]["child_title"].append(_data)
                                     _find = True
                                     break
 
                             if not _find:
-                                print("not found",_text)
                                 if len(list_data)>0:
                                     for i in range(1,len(list_data)+1):
                                         _d = list_data[-i]
@@ -605,10 +618,10 @@ def extract_products(list_data,_product,_param_pattern = "产品名称|采购内
                         cell = line[cell_i]
                         cell_text = cell[0]
                         if re.search(_param_pattern,cell_text) is None or has_number:
-                            table_products.append(cell_text)
+                            if re.search("^[\da-zA-Z]+$",cell_text) is None:
+                                table_products.append(cell_text)
 
         if len(table_products)>0:
-            # print("table_products",table_products,_text)
             if min([len(x) for x in table_products])>0 and max([len(x) for x in table_products])<=20:
                 list_result.extend(table_products)
     list_result = list(set([a for a in list_result if len(a)>1 and len(a)<20 and re.search("预算|合计|金额|万元|运费",a) is None]))
@@ -651,7 +664,7 @@ def get_correct_product(product,products):
         is_sim = _d["is_sim"]
         if is_sim:
             if len(_d["product"])>len(product) and _d["product"].find(product)>=0:
-                return _product
+                return product
             return _d["product"]
     return product
 
@@ -665,11 +678,13 @@ def get_childs_text(childs,_product,products,is_begin=False,is_end=False):
         if child_text.find(_product)>=0:
             is_begin = True
 
-        print("get_childs_text",child_text[:10],is_begin)
+        logger.debug("%s-%s-%s"%("get_childs_text",child_text[:10],str(is_begin)))
 
         for p in products:
+            if child_text.find(p)>=0 and is_similar(_product,p,90):
+                is_begin = True
 
-            if child_text.find(_product)<0 and  child_text.find(p)>=0:
+            if child_text.find(_product)<0 and  child_text.find(p)>=0 and not is_similar(_product,p,80):
                 if is_begin:
                     is_end = True
                 break
@@ -699,6 +714,7 @@ def extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,
     childs = _data.get("child_title",[])
     if len(childs)>0:
         child_text,_,_ = get_childs_text([parent_title],_product,products)
+        logger.info("extract_parameters_by_tree child_text:%s"%child_text)
         if len(child_text)>0:
             list_result.append(child_text)
             return True
@@ -706,13 +722,7 @@ def extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,
         childs = parent_title.get("child_title",[])
         if len(childs)>0:
 
-            print(parent_title["text"])
-            for c in childs:
-                print("11",c["text"])
-                # print([a["text"] for a in c["child_title"]])
             range_data = get_range_data_by_childs(list_data[_data_i:],childs)
-            for c in range_data:
-                print("22",c["text"])
             p_text = ""
             _find = False
             for pdata in range_data:
@@ -732,7 +742,31 @@ def extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,
                 return True
     return False
 
-end_pattern = "商务要求|评分标准|商务条件|商务条件"
+
+def get_table_pieces(_text,_product,products,list_result,_find):
+    _soup = BeautifulSoup(_text,"html5lib")
+    _table = _soup.find("table")
+    if _table is not None:
+        trs = getTrs(_table)
+        list_trs = []
+        for tr in trs:
+            tr_text = tr.get_text()
+            if tr_text.find(_product)>=0:
+                _find = True
+
+            logger.debug("%s-%s"%("table_html_tr",tr_text))
+            for p in products:
+                if _find and p!=_product and tr_text.find(p)>=0:
+                    _find = False
+                    break
+            if re.search(end_pattern,tr_text) is not None:
+                _find = False
+                break
+            if _find:
+                list_trs.append(tr)
+        if len(list_trs)>0:
+            table_html = "<table>%s</table>"%("\r\n".join([str(a) for a in list_trs]))
+            list_result.append(table_html)
 
 def extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result):
     _data = list_data[_data_i]
@@ -749,12 +783,24 @@ def extract_parameters_by_table(_product,products,_param_pattern,list_data,_data
         last_data = list_data[_data_i-1]
         _flag = False
         if last_data["type"]=="sentence" and last_data["text"].find(_product)>=0:
+            logger.debug("last sentence find product %s-%s"%(_product,last_data["text"]))
             _flag = True
         # print(text_line_first,"text_line_first",re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0)
         if re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0:
             _flag = True
         if _flag:
-            list_result.append(_text)
+            logger.debug("extract_type add all table %s"%_text)
+            if len(products)==0:
+                list_result.append(_text)
+            else:
+                for p in products:
+                    if p!=_product and _text.find(p)>=0:
+                        logger.debug("extract_type add all table failed %s-%s"%(_product,p))
+                        _flag = False
+                        break
+                if _flag:
+                    logger.debug("extract_type add all table succeed")
+                    get_table_pieces(_text,_product,products,list_result,True)
         else:
             list_head_index = []
             for line in list_table[:2]:
@@ -775,60 +821,90 @@ def extract_parameters_by_table(_product,products,_param_pattern,list_data,_data
                                 continue
                             _cell = line[_index]
                             if len(cell[0])>0:
-                                print("add on table",_cell[0])
+                                logger.info("%s-%s"%("add on table",_cell[0]))
                                 list_result.append(_cell[0])
         if not _flag and (re.search(_param_pattern,_text) is not None or (parent_title is not None and re.search(_param_pattern,parent_title["text"]) is not None)) and _text.find(_product)>=0:
-            _soup = BeautifulSoup(_text,"html5lib")
-            _table = _soup.find("table")
-            if _table is not None:
-                trs = getTrs(_table)
-                list_trs = []
-                _find = False
-                for tr in trs:
-                    tr_text = tr.get_text()
-                    if tr_text.find(_product)>=0:
-                        _find = True
+            get_table_pieces(_text,_product,products,list_result,False)
+
 
-                    for p in products:
-                        if _find and p!=_product and tr_text.find(p)>=0:
-                            _find = False
-                            break
-                    if re.search(end_pattern,tr_text) is not None:
-                        _find = True
-                        break
-                    if _find:
-                        list_trs.append(tr)
-                if len(list_trs)>0:
-                    _html = "<table>%s</table>"%("\r\n".join([str(a) for a in list_trs]))
-                    print("_html",_html)
-                    list_result.append(_html)
 
+def getBestProductText(list_result,_product,products):
+    list_result.sort(key=lambda x:len(re.findall(meter_pattern+"|"+'[::;;]|\d+[%A-Za-z]+',BeautifulSoup(x,"html5lib").get_text())), reverse=True)
+
+    logger.debug("+++++++++++++++++++++")
+    for i in range(len(list_result)):
+        logger.debug("result%d %s"%(i,list_result[i]))
+    logger.debug("+++++++++++++++++++++")
+
+    for i in range(len(list_result)):
+        _result = list_result[i]
+        _check = True
+        _result_text = BeautifulSoup(_result,"html5lib").get_text()
+        _search = re.search("项目编号[::]|项目名称[::]|联合体投标",_result)
+        if _search is not None:
+            logger.debug("result%d error illegal text %s"%(i,str(_search)))
+            _check = False
+        if not (len(_result_text)<1000 and _result[:6]!="<table"):
+            for p in products:
+                if _result_text.find(p)>0 and not (is_similar(_product,p,80) or p.find(_product)>=0 or _product.find(p)>=0):
+                    logger.debug("result%d error product scoss %s"%(i,p))
+                    _check = False
+        if len(_result_text)<50:
+            if re.search(meter_pattern,_result_text) is None:
+                logger.debug("result%d error text min count"%(i))
+                _check = False
+        if len(_result_text)>5000:
+            logger.debug("result%d error text max count"%(i))
+            _check = False
+        if _check:
+            return _result
 
 def extract_product_parameters(list_data,_product):
 
-    _param_pattern = "(产品|技术|清单[及和]?|配置|参数|具体|明细[及和]?|项目|货物)(指标|配置|要求|参数|需求|规格)|配置清单|(质量|技术).{,10}要求|验收标准|^参数$"
     list_result = []
     _product = standard_product(_product.strip())
     products = extract_products(list_data,_product)
 
     _product = get_correct_product(_product,products)
-    print("===",_product,products)
+    logger.debug("all products %s-%s"%(_product,str(products)))
     is_project = False
+    _find_count = 0
     for _data_i in range(len(list_data)):
         _data = list_data[_data_i]
         _type = _data["type"]
         _text = _data["text"]
         if _type=="sentence":
             if _text.find(_product)>=0:
+                _find_count += 1
                 if re.search("项目名称|采购项目",_text) is not None:
                    is_project = True
-                print("_text find",_text,_data["sentence_title"])
+                if re.search("项目名称|采购项目",_product) is not None:
+                    is_project = True
                 parent_title = _data.get("parent_title")
                 parent_text = ""
                 parent_parent_title = None
                 parent_parent_text = ""
                 parent_title_index = None
                 parent_parent_title_index = None
+                childs = get_childs([_data])
+
+
+                child_find = False
+                for c in childs:
+                    if re.search(_param_pattern,c["text"]) is not None and len(c["text"])<30:
+                        child_find = True
+                        break
+
+                extract_text,_,_ = get_childs_text([_data],_product,products)
+                logger.debug("childs found extract_text %s"%extract_text)
+                if child_find:
+                    if len(extract_text)>0:
+                        list_result.append(extract_text)
+                else:
+                    if len(_text)<len(_product)+10 and _data["sentence_title"] is not None:
+                        if re.search(meter_pattern,extract_text) is not None:
+                            list_result.append(extract_text)
+
                 if parent_title is not None:
                     parent_text = parent_title.get("text","")
                     parent_parent_title = parent_title.get("parent_title")
@@ -838,71 +914,56 @@ def extract_product_parameters(list_data,_product):
                         parent_parent_title_index = parent_parent_title["title_index"]
 
                 _suit = False
-                if re.search(_param_pattern,_text) is not None and (len(_text)<15 or _data["title_index"] is not None):
+                if re.search(_param_pattern,_text) is not None and len(_text)<50:
                     _suit = True
-                if re.search(_param_pattern,parent_text) is not None and (len(parent_text)<15 or parent_title_index is not None):
+                if re.search(_param_pattern,parent_text) is not None and len(parent_text)<50:
                     _suit = True
-                if re.search(_param_pattern,parent_parent_text) is not None and (len(parent_parent_text)<15 or parent_parent_title_index is not None):
+                if re.search(_param_pattern,parent_parent_text) is not None and len(parent_parent_text)<50:
                     _suit = True
                 if _suit:
+                    logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
                     if not extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result):
+                        logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
                         extract_parameters_by_tree(_product,products,list_data,_data_i,parent_parent_title,list_result)
 
 
-            if re.search(_param_pattern,_text) is not None and (len(_text)<15 or _data["title_index"] is not None):
-                print("re.search(_param_pattern,_text)",_text,is_project)
+            if re.search(_param_pattern,_text) is not None and len(_text)<50:
                 childs = _data["child_title"]
-                if len(childs)>0 and (len(products)>0 or is_project):
-                    print("re.search(_param_pattern,_text)",_text,is_project)
+                if len(childs)>0:
+                    logger.debug("extract_type sentence %s"%("re.search(_param_pattern,_text) is not None and len(_text)<50:"))
                     extract_text,_,_ = get_childs_text([_data],_product,products)
-                    print("re.search(_param_pattern,_text)",extract_text)
                     if len(extract_text)>0:
                         list_result.append(extract_text)
+                    elif is_project:
+                        logger.debug("extract_type sentence is_project")
+                        extract_text,_,_ = get_childs_text([_data],_product,products,is_begin=True)
+                        if len(extract_text)>0 and re.search(meter_pattern,extract_text) is not None:
+                            list_result.append(extract_text)
 
 
         elif _type=="table":
+            if _text.find(_product)>=0:
+                _find_count += 1
             extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result)
 
 
-    # for i in range(len(list_result)):
-    #     print("result%d"%i,list_result[i])
-    list_result.sort(key=lambda x:len(re.findall('[^.、][0-9a-zA-Z:::;]+[^.、]?',BeautifulSoup(x,"html5lib").get_text())), reverse=True)
-
-    print("+++++++++++++++++++++")
-    for i in range(len(list_result)):
-        print("result%d"%i,list_result[i])
-    print("+++++++++++++++++++++")
-
-    for _result in list_result:
-        _check = True
-        _result_text = BeautifulSoup(_result,"html5lib").get_text()
-        if re.search("项目编号|项目名称|联合体投标",_result) is not None:
-            _check = False
-        for p in products:
-            if _result_text.find(p)>0 and not (is_similar(_product,p,80) or p.find(_product)>=0 or _product.find(p)>=0):
-                _check = False
-        if len(_result)<10:
-            _check = False
-        if len(_result_text)>5000:
-            _check = False
-        if _check:
-            return _result
-    return None
+    return getBestProductText(list_result,_product,products),_find_count
 
 
 if __name__ == '__main__':
 
-    filepath = "download/d5ceaa54bbdc6a28c7b8796ca71930a9.html"
-    _product = "肺功能测试系统"
+    filepath = "download/8679fef3a6fff56abcbdaccb1a190c80.html"
+    _product = "移液器"
 
     _html = open(filepath, "r", encoding="utf8").read()
 
-    pd = ParseDocument(_html,True)
+    pd = ParseDocument(_html,False)
 
     pd.fix_tree(_product)
     list_data = pd.tree
     pd.print_tree(list_data)
 
-    _text = extract_product_parameters(list_data,_product)
-    print("extract_text",_text)
+    _text,_count = extract_product_parameters(list_data,_product)
+    logger.info("find count:%d"%(_count))
+    logger.info("extract_text %s"%_text)
 

+ 2 - 0
BaseDataMaintenance/maintenance/product/productUtils.py

@@ -101,6 +101,7 @@ def get_embedding_search(coll,index_name,name,grade,vector,search_params,output_
                 db.set(_md5,json.dumps(final_list))
                 db.expire(_md5,PRODUCT_REDIS_CACHE_TIME)
             except Exception as e:
+                traceback.print_exc()
                 log("set redis data error")
             return final_list
 
@@ -143,6 +144,7 @@ def get_embedding_request(sentence,retry_times=3):
                     db.set(_md5,json.dumps(_embedding))
                     db.expire(_md5,60*60)
                 except Exception as e:
+                    traceback.print_exc()
                     log("set redis data error")
             return _embedding
     except Exception as e:

+ 17 - 8
BaseDataMaintenance/maintenance/product/product_attachment.py

@@ -19,6 +19,7 @@ parameter_status_to_process = 0
 parameter_status_process_succeed = 1
 parameter_status_process_failed = 2
 parameter_status_process_jump = 3
+parameter_status_not_found = 4
 
 class Product_Attachment_Processor():
 
@@ -65,7 +66,7 @@ class Product_Attachment_Processor():
                 break
             rows,next_token,total_count,is_all_succeed = self.ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
                                                                                 SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
-                                                                                ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S],return_type=ColumnReturnType.SPECIFIED))
+                                                                                ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S,DOCUMENT_PRODUCT_NAME,DOCUMENT_PRODUCT_ORIGINAL_NAME],return_type=ColumnReturnType.SPECIFIED))
             list_data = getRow_ots(rows)
             for data in list_data:
                 _id = data.get(DOCUMENT_PRODUCT_ID)
@@ -84,6 +85,7 @@ class Product_Attachment_Processor():
             list_product.append(product_name)
         if product_original_name is not None:
             list_product.extend(product_original_name.split("_"))
+        list_product = list(set(list_product))
         dp = Document_product(item)
         if bid_filemd5s is None or bid_filemd5s=="" or len(list_product)==0:
             dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_no_bidfile)
@@ -91,6 +93,7 @@ class Product_Attachment_Processor():
             return
         list_filemd5 = bid_filemd5s.split(",")
         _find = False
+        _success = False
         for _filemd5 in list_filemd5:
             if _find:
                 break
@@ -138,7 +141,9 @@ class Product_Attachment_Processor():
                                 for _product in list_product:
                                     pd.fix_tree(_product)
                                     list_data = pd.tree
-                                    _text = extract_product_parameters(list_data,_product)
+                                    _text,_count = extract_product_parameters(list_data,_product)
+                                    if _count>0:
+                                        _find = True
                                     if _text is not None:
                                         list_text.append(_text)
                                 pd = ParseDocument(_html,False)
@@ -147,13 +152,15 @@ class Product_Attachment_Processor():
                                 for _product in list_product:
                                     pd.fix_tree(_product)
                                     list_data = pd.tree
-                                    _text = extract_product_parameters(list_data,_product)
+                                    _text,_count = extract_product_parameters(list_data,_product)
+                                    if _count>0:
+                                        _find = True
                                     if _text is not None:
                                         list_text.append(_text)
                                 if len(list_text)>0:
-                                    list_text.sort(key=lambda x:len(x),reverse=True)
+                                    list_text.sort(key=lambda x:len(re.findall('[::;;]',BeautifulSoup(x,"html5lib").get_text())), reverse=True)
                                     _text = list_text[0]
-                                    _find = True
+                                    _success = True
                                     dp.setValue(DOCUMENT_PRODUCT_PARAMETER,_text,True)
                                     dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_process_succeed,True)
                                     dp.update_row(self.ots_client)
@@ -171,10 +178,12 @@ class Product_Attachment_Processor():
                             pass
 
         if not _find:
+            dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_not_found,True)
+            dp.update_row(self.ots_client)
+        else:
             dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_process_failed,True)
             dp.update_row(self.ots_client)
 
-
     def start_process(self):
         mt = MultiThreadHandler(self.product_attachment_queue,self.process_parameters_handler,None,3,need_stop=False,restart=True)
         mt.run()
@@ -210,9 +219,9 @@ def change_parameters_status():
     ],
                            must_not_queries=[
         TermQuery("parameter_status",parameter_status_to_process),
-        # TermQuery("parameter_status",parameter_status_process_succeed),
+        TermQuery("parameter_status",parameter_status_process_succeed),
         TermQuery("parameter_status",parameter_status_process_jump),
-        TermQuery("parameter_status",parameter_status_no_bidfile),
+        # TermQuery("parameter_status",parameter_status_no_bidfile),
 
     ])
     list_data = []

+ 39 - 27
BaseDataMaintenance/model/ots/document.py

@@ -320,26 +320,38 @@ def turn_document_status():
         #     ],
         #     # must_not_queries=[WildcardQuery("DX004354*")]
         # )
-        #
-        # rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
-        #                                                                SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid",SortOrder.DESC)]),limit=100,get_total_count=True),
-        #                                                                columns_to_get=ColumnsToGet([document_area],return_type=ColumnReturnType.SPECIFIED))
-        # list_data = getRow_ots(rows)
-        # print(total_count)
-        # _count = len(list_data)
-        # for _data in list_data:
-        #     _document = Document(_data)
-        #     task_queue.put(_document)
-        # while next_token:
-        #     rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
-        #                                                                    SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
-        #                                                                    columns_to_get=ColumnsToGet([document_area],return_type=ColumnReturnType.SPECIFIED))
-        #     list_data = getRow_ots(rows)
-        #     _count += len(list_data)
-        #     print("%d/%d"%(_count,total_count))
-        #     for _data in list_data:
-        #         _document = Document(_data)
-        #         task_queue.put(_document)
+        bool_query = BoolQuery(
+            must_queries=[
+                RangeQuery("crtime","2023-08-30 15:00:00","2023-08-30 23:59:59"),
+                NestedQuery("page_attachments",ExistsQuery("page_attachments.fileMd5"))
+            ],
+            must_not_queries=[WildcardQuery("attachmenttextcon","*")]
+
+        )
+
+        rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
+                                                                       SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid",SortOrder.DESC)]),limit=100,get_total_count=True),
+                                                                       columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+        list_data = getRow_ots(rows)
+        print(total_count)
+        _count = len(list_data)
+        for _data in list_data:
+            _document = Document(_data)
+            _attachment = _data.get(document_attachmenttextcon,"")
+            if _attachment=="":
+                task_queue.put(_document)
+        while next_token:
+            rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
+                                                                           SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
+                                                                           columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+            list_data = getRow_ots(rows)
+            _count += len(list_data)
+            print("%d/%d"%(_count,total_count))
+            for _data in list_data:
+                _document = Document(_data)
+                _attachment = _data.get(document_attachmenttextcon,"")
+                if _attachment=="":
+                    task_queue.put(_document)
 
         # docids = [223820830,224445409]
         # for docid in docids:
@@ -347,13 +359,13 @@ def turn_document_status():
         #              document_partitionkey:int(docid)%500+1,
         #              }
         #     task_queue.put(Document(_dict))
-        import pandas as pd
-        df = pd.read_excel("G:\\20221212error.xlsx")
-        for docid in df["docid"]:
-            _dict = {document_docid:int(docid),
-                     document_partitionkey:int(docid)%500+1,
-                     }
-            task_queue.put(Document(_dict))
+        # import pandas as pd
+        # df = pd.read_excel("G:\\20221212error.xlsx")
+        # for docid in df["docid"]:
+        #     _dict = {document_docid:int(docid),
+        #              document_partitionkey:int(docid)%500+1,
+        #              }
+        #     task_queue.put(Document(_dict))
         log("task_queue size:%d"%(task_queue.qsize()))
 
     def _handle(item,result_queue,ots_client):