Browse Source

Merge branch 'master' of http://192.168.2.103:3000/luojiehua/BaseDataMaintenance

znj 1 year ago
parent
commit
4e73d052ee

+ 1 - 0
.gitignore

@@ -5,3 +5,4 @@
 /attachmentProcessTime2.xlsx
 /BaseDataMaintenance/maintenance/attachment/2022-01-18_183521_export11.xlsx
 /BaseDataMaintenance/test/
+/BaseDataMaintenance/maintenance/product/download/

+ 53 - 0
BaseDataMaintenance/common/ERNIE_utils.py

@@ -0,0 +1,53 @@
+
+import requests
+import json
+
+def get_access_token():
+    """
+    使用 API Key,Secret Key 获取access_token,替换下列示例中的应用API Key、应用Secret Key
+    """
+
+    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=gnwVXv96An9qMYqq9eWbeNqk&client_secret=mDsRQbCPsV4N7x28LbwkhTAaLmrrDnXk"
+
+    payload = json.dumps("")
+    headers = {
+        'Content-Type': 'application/json',
+        'Accept': 'application/json'
+    }
+
+    response = requests.request("POST", url, headers=headers, data=payload)
+    return response.json().get("access_token")
+
+def main():
+    url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token=" + get_access_token()
+
+    payload = json.dumps({
+        "messages": [
+            {
+                "role": "user",
+                "content": '''
+                名称: 亚低温治疗仪
+
+品牌:GSZ
+
+规格型号:233
+
+数量:1台
+
+单价: 170000.00元
+以上的GSZ是什么牌子
+                '''
+            }
+        ]
+    })
+    headers = {
+        'Content-Type': 'application/json'
+    }
+
+    response = requests.request("POST", url, headers=headers, data=payload)
+
+    print(response.text)
+
+if __name__ == '__main__':
+    print(get_access_token())
+    main()

+ 42 - 28
BaseDataMaintenance/common/Utils.py

@@ -704,9 +704,10 @@ def getMultipleFactor(unit):
     '''
     @summary:拿到单位对应的值
     '''
-    MultipleFactor = {"兆":Decimal(1000000000000),"亿":Decimal(100000000),"万":Decimal(10000),"仟":Decimal(1000),"千":Decimal(1000),"佰":Decimal(100),"百":Decimal(100),"拾":Decimal(10),"十":Decimal(10),"元":Decimal(1),"角":round(Decimal(0.1),1),"分":round(Decimal(0.01),2)}
+    MultipleFactor = {"兆":Decimal(1000000000000),"亿":Decimal(100000000),"万":Decimal(10000),"仟":Decimal(1000),"千":Decimal(1000),"佰":Decimal(100),"百":Decimal(100),"拾":Decimal(10),"十":Decimal(10),"元":Decimal(1),"圆":Decimal(1),"角":round(Decimal(0.1),1),"分":round(Decimal(0.01),2)}
     return MultipleFactor.get(unit)
 
+
 def getUnifyMoney(money):
     '''
     @summary:将中文金额字符串转换为数字金额
@@ -715,41 +716,54 @@ def getUnifyMoney(money):
     @return: decimal,数据金额
     '''
 
-
+    MAX_MONEY = 1000000000000
     MAX_NUM = 12
     #去掉逗号
     money = re.sub("[,,]","",money)
-    money = re.sub("[^0-9.零壹贰叁肆伍陆柒捌玖拾佰仟萬億〇一二三四五六七八九十百千万亿元角分]","",money)
+    money = re.sub("[^0-9.零壹贰叁肆伍陆柒捌玖拾佰仟萬億十百千万亿元角分]","",money)
     result = Decimal(0)
-    chnDigits = ["零", "壹", "贰", "叁", "肆", "伍", "陆", "柒", "捌", "玖"]
-    chnFactorUnits = ["兆", "亿", "万", "仟", "佰", "拾","元","角","分"]
+    chnDigits = ["零", "壹", "贰", "叁", "肆", "伍", "陆", "柒", "捌", "玖","一","二","三","四","五","六","七","八","九"]
+    # chnFactorUnits = ["兆", "亿", "万", "仟", "佰", "拾","圆","元","角","分"]
+    chnFactorUnits = ["圆", "元","兆", "亿", "万", "仟", "佰", "拾", "角", "分", '十', '百', '千']
 
     LowMoneypattern = re.compile("^[\d,]+(\.\d+)?$")
     BigMoneypattern = re.compile("^零?(?P<BigMoney>[%s])$"%("".join(chnDigits)))
-    if re.search(LowMoneypattern,money) is not None:
-        return Decimal(money)
-    elif re.search(BigMoneypattern,money) is not None:
-        return getDigitsDic(re.search(BigMoneypattern,money).group("BigMoney"))
-    for factorUnit in chnFactorUnits:
-        if re.search(re.compile(".*%s.*"%(factorUnit)),money) is not None:
-            subMoneys = re.split(re.compile("%s(?!.*%s.*)"%(factorUnit,factorUnit)),money)
-            if re.search(re.compile("^(\d+(,)?)+(\.\d+)?$"),subMoneys[0]) is not None:
-                result += Decimal(subMoneys[0])*(getMultipleFactor(factorUnit))
-            elif len(subMoneys[0])==1:
-                if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[0]) is not None:
-                    result += Decimal(getDigitsDic(subMoneys[0]))*(getMultipleFactor(factorUnit))
-            else:
-                result += Decimal(getUnifyMoney(subMoneys[0]))*(getMultipleFactor(factorUnit))
-
-            if len(subMoneys)>1:
-                if re.search(re.compile("^(\d+(,)?)+(\.\d+)?[百千万亿]?\s?(元)?$"),subMoneys[1]) is not None:
-                    result += Decimal(subMoneys[1])
-                elif len(subMoneys[1])==1:
-                    if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[1]) is not None:
-                        result += Decimal(getDigitsDic(subMoneys[1]))
+    try:
+        if re.search(LowMoneypattern,money) is not None:
+            return Decimal(money)
+        elif re.search(BigMoneypattern,money) is not None:
+            return getDigitsDic(re.search(BigMoneypattern,money).group("BigMoney"))
+        for factorUnit in chnFactorUnits:
+            if re.search(re.compile(".*%s.*"%(factorUnit)),money) is not None:
+                subMoneys = re.split(re.compile("%s(?!.*%s.*)"%(factorUnit,factorUnit)),money)
+                if re.search(re.compile("^(\d+)(\.\d+)?$"),subMoneys[0]) is not None:
+                    if MAX_MONEY/getMultipleFactor(factorUnit)<Decimal(subMoneys[0]):
+                        return Decimal(0)
+                    result += Decimal(subMoneys[0])*(getMultipleFactor(factorUnit))
+                elif len(subMoneys[0])==1:
+                    if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[0]) is not None:
+                        result += Decimal(getDigitsDic(subMoneys[0]))*(getMultipleFactor(factorUnit))
+                # subMoneys[0]中无金额单位,不可再拆分
+                elif subMoneys[0]=="":
+                    result += 0
+                elif re.search(re.compile("[%s]"%("".join(chnFactorUnits))),subMoneys[0]) is None:
+                    # print(subMoneys)
+                    # subMoneys[0] = subMoneys[0][0]
+                    result += Decimal(getUnifyMoney(subMoneys[0])) * (getMultipleFactor(factorUnit))
                 else:
-                    result += Decimal(getUnifyMoney(subMoneys[1]))
-            break
+                    result += Decimal(getUnifyMoney(subMoneys[0]))*(getMultipleFactor(factorUnit))
+                if len(subMoneys)>1:
+                    if re.search(re.compile("^(\d+(,)?)+(\.\d+)?[百千万亿]?\s?(元)?$"),subMoneys[1]) is not None:
+                        result += Decimal(subMoneys[1])
+                    elif len(subMoneys[1])==1:
+                        if re.search(re.compile("^[%s]$"%("".join(chnDigits))),subMoneys[1]) is not None:
+                            result += Decimal(getDigitsDic(subMoneys[1]))
+                    else:
+                        result += Decimal(getUnifyMoney(subMoneys[1]))
+                break
+    except Exception as e:
+        # traceback.print_exc()
+        return Decimal(0)
     return result
 
 

+ 4 - 2
BaseDataMaintenance/dataSource/interface.py

@@ -20,7 +20,7 @@ DEFAULT_TIMEOUT = 3000
 import traceback
 import base64
 
-def getAttachDealInterface(_data,_type,path="",restry=1):
+def getAttachDealInterface(_data,_type,path="",restry=1,kwargs={},url=interface_url,timeout=DEFAULT_TIMEOUT):
     _succeed = False
     _html = ""
     swf_images = []
@@ -32,8 +32,10 @@ def getAttachDealInterface(_data,_type,path="",restry=1):
             else:
                 _json = {"file":_data,
                         "type":_type}
+            if len(kwargs.keys())>0:
+                _json.update(kwargs)
             headers = {"Content-Type":"application/json"}
-            _resp = requests.post(interface_url,data=_json,timeout=DEFAULT_TIMEOUT)
+            _resp = requests.post(url,data=_json,timeout=timeout)
 
             if _resp.status_code==200:
                 _result = json.loads(_resp.content.decode())

+ 30 - 0
BaseDataMaintenance/embedding_services.py

@@ -0,0 +1,30 @@
+
+
+import os
+
+os.environ["CUDA_VISIBLE_DEVICES"] = "1"
+
+from flask import Flask,request,jsonify
+app = Flask(__name__)
+
+from BaseDataMaintenance.common.sentencesUtil import *
+
+@app.route("/embedding",methods=["POST"])
+def embedding():
+    _r = {"success": True}
+    try:
+        sentence = request.json.get("sentence","")
+        vector = get_normalized_vector(sentence)
+        _r["vector"] = vector
+    except Exception as e:
+        _r["success"] = False
+
+
+    return jsonify(_r)
+
+## 启动方法
+## 将此文件放在与BaseDataMaintenance同级下
+## nohup /data/anaconda3/envs/py37/bin/gunicorn -w 1 --limit-request-fields 0 --limit-request-line 0 -t 1000 --keep-alive 600 -b 0.0.0.0:17130 embedding_services:app >> embedding.log &
+
+if __name__ == "__main__":
+    app.run(host="0.0.0.0",port="15010",debug=True)

+ 2 - 2
BaseDataMaintenance/fixDoc_to_queue_extract.py

@@ -7,5 +7,5 @@ from BaseDataMaintenance.maintenance.dataflow_mq import fixDoc_to_queue_extract,
 
 
 if __name__ == '__main__':
-    fixDoc_to_queue_extract()
-    # fixDoc_to_queue_init()
+    # fixDoc_to_queue_extract()
+    fixDoc_to_queue_init(filename="/data/python/flow_init_check/flow_init_2023-08-30.xlsx")

+ 14 - 9
BaseDataMaintenance/maintenance/dataflow.py

@@ -2276,6 +2276,9 @@ class Dataflow_dumplicate(Dataflow):
         fingerprint_less = document_less["fingerprint"]
         extract_count_less = document_less["extract_count"]
         web_source_no_less = document_less.get("web_source_no")
+        province_less = document_less.get("province")
+        city_less = document_less.get("city")
+        district_less = document_less.get("district")
 
         document_greater = _dict2
         docid_greater = _dict2["docid"]
@@ -2296,12 +2299,15 @@ class Dataflow_dumplicate(Dataflow):
         fingerprint_greater = document_greater["fingerprint"]
         extract_count_greater = document_greater["extract_count"]
         web_source_no_greater = document_greater.get("web_source_no")
+        province_greater = document_greater.get("province")
+        city_greater = document_greater.get("city")
+        district_greater = document_greater.get("district")
 
         hard_level=1
         if web_source_no_less==web_source_no_greater=="17397-3":
             hard_level=2
 
-        return check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,min_counts,b_log=b_log,hard_level=hard_level)
+        return check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,province_less,province_greater,city_less,city_greater,district_less,district_greater,min_counts,b_log=b_log,hard_level=hard_level)
 
 
     def dumplicate_check_bak(self,_dict1,_dict2,min_counts,b_log=False):
@@ -2535,8 +2541,6 @@ class Dataflow_dumplicate(Dataflow):
                             _dict["confidence"] = confidence
                             _dict["min_counts"] = total_count
 
-                            print("check====",item.get("docid"),_dict.get("docid"),confidence)
-
                             if not confidence<0.1:
                                 list_data.append(_dict)
                 all_time = time.time()-_time
@@ -2794,7 +2798,7 @@ class Dataflow_dumplicate(Dataflow):
 
         return list_rules,table_name,table_index
 
-    def producer_flow_dumplicate(self,process_count,status_from,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_attachment_extract_status,document_update_document]):
+    def producer_flow_dumplicate(self,process_count,status_from,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_attachment_extract_status,document_update_document,document_province,document_city,document_district]):
         q_size = self.queue_dumplicate.qsize()
         log("dumplicate queue size %d"%(q_size))
         if q_size>process_count//3:
@@ -2843,8 +2847,8 @@ class Dataflow_dumplicate(Dataflow):
 
     def flow_dumpcate_comsumer(self):
         from multiprocessing import Process
-        process_count = 3
-        thread_count = 15
+        process_count = 2
+        thread_count = 20
         list_process = []
         def start_thread():
             mt = MultiThreadHandler(self.queue_dumplicate,self.dumplicate_comsumer_handle,None,thread_count,1,need_stop=False,restart=True,timeout=600,ots_client=self.ots_client)
@@ -3890,7 +3894,7 @@ class Dataflow_dumplicate(Dataflow):
                 singleNum_keys = _rule["singleNum_keys"]
                 contain_keys = _rule["contain_keys"]
                 multiNum_keys = _rule["multiNum_keys"]
-                self.add_data_by_query(item,base_list,set_docid,_query,confidence,table_name=table_name,table_index=table_index,singleNum_keys=singleNum_keys,contain_keys=contain_keys,multiNum_keys=multiNum_keys,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle_refine,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status])
+                self.add_data_by_query(item,base_list,set_docid,_query,confidence,table_name=table_name,table_index=table_index,singleNum_keys=singleNum_keys,contain_keys=contain_keys,multiNum_keys=multiNum_keys,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle_refine,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status,document_province,document_city,document_district])
                 _i += step
 
 
@@ -4066,7 +4070,8 @@ class Dataflow_dumplicate(Dataflow):
 
 
     def test_dumplicate(self,docid):
-        columns=[document_tmp_status,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status]
+        # columns=[document_tmp_status,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status]
+        columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_attachment_extract_status,document_update_document,document_province,document_city,document_district]
         bool_query = BoolQuery(must_queries=[
             TermQuery("docid",docid)
         ])
@@ -4168,7 +4173,7 @@ if __name__ == '__main__':
     df_dump = Dataflow_dumplicate(start_delete_listener=False)
     # df_dump.start_flow_dumplicate()
     a = time.time()
-    df_dump.test_dumplicate(350493205)
+    df_dump.test_dumplicate(349638765)
     # df_dump.test_merge([292315564],[287890754])
     # df_dump.flow_remove_project_tmp()
     print("takes",time.time()-a)

+ 5 - 2
BaseDataMaintenance/maintenance/dataflow_mq.py

@@ -765,7 +765,8 @@ class Dataflow_ActivteMQ_extract(Dataflow_extract):
                     _soup = BeautifulSoup(_dochtmlcon,"lxml")
                     if len(_dochtmlcon)>200000:
                         _find = _soup.find("div",attrs={"class":"richTextFetch"})
-                        _find.decompose()
+                        if _find is not None:
+                            _find.decompose()
                     else:
                         _soup = article_limit(_soup,50000)
                     _dochtmlcon = str(_soup)
@@ -1509,7 +1510,8 @@ def fixDoc_to_queue_init(filename=""):
     if filename=="":
         filename = os.path.join(current_path,"check.xlsx")
     df = pd.read_excel(filename)
-    dict_oracle2ots.pop("docchannel")
+    if "docchannel" in dict_oracle2ots:
+        dict_oracle2ots.pop("docchannel")
     row_name = ",".join(list(dict_oracle2ots.keys()))
     conn = getConnection_oracle()
     cursor = conn.cursor()
@@ -1522,6 +1524,7 @@ def fixDoc_to_queue_init(filename=""):
             cursor.execute(sql)
             log("%d:%s"%(_count,sql))
     conn.commit()
+    conn.close()
 
 if __name__ == '__main__':
     # di = Dataflow_init()

+ 22 - 21
BaseDataMaintenance/maintenance/preproject/fillColumns.py

@@ -16,27 +16,6 @@ class PreprojectFill():
         self.task_queue = Queue(3000)
         self.fill_concat_queue = Queue(10000)
 
-    def fill_producer(self):
-        q1 = BoolQuery(should_queries=[WildcardQuery("uuid","*"),
-                                          RangeQuery("has_bidfile",0)])
-        columns = ["uuid","has_bidfile","json_docids"]
-        query = BoolQuery(must_not_queries=[q1])
-        rows,next_token,total_count,is_all_succeed = self.ots_client.search("preproject","preproject_index",
-                                                                            SearchQuery(query,sort=Sort(sorters=[FieldSort("uuid")]),get_total_count=True,limit=100),
-                                                                            ColumnsToGet(columns,ColumnReturnType.SPECIFIED))
-        while True or self.task_queue.full():
-            if len(rows)>0:
-                dict_rows = getRow_ots(rows)
-                for _row in dict_rows:
-                    self.task_queue.put(_row)
-            else:
-                break
-            rows,next_token,total_count,is_all_succeed = self.ots_client.search("preproject","preproject_index",
-                                                                                SearchQuery(query,next_token=next_token,get_total_count=True,limit=100),
-                                                                                ColumnsToGet(columns,ColumnReturnType.SPECIFIED))
-
-
-
     def fill_comsumer(self):
 
         def comsumer_handle(_row,result_queue):
@@ -65,6 +44,27 @@ class PreprojectFill():
         _mul = MultiThreadHandler(self.task_queue,comsumer_handle,None,10)
         _mul.run()
 
+
+
+    def fill_producer(self):
+        q1 = BoolQuery(should_queries=[WildcardQuery("uuid","*"),
+                                          RangeQuery("has_bidfile",0)])
+        columns = ["uuid","has_bidfile","json_docids"]
+        query = BoolQuery(must_not_queries=[q1])
+        rows,next_token,total_count,is_all_succeed = self.ots_client.search("preproject","preproject_index",
+                                                                            SearchQuery(query,sort=Sort(sorters=[FieldSort("uuid")]),get_total_count=True,limit=100),
+                                                                            ColumnsToGet(columns,ColumnReturnType.SPECIFIED))
+        while True or self.task_queue.full():
+            if len(rows)>0:
+                dict_rows = getRow_ots(rows)
+                for _row in dict_rows:
+                    self.task_queue.put(_row)
+            else:
+                break
+            rows,next_token,total_count,is_all_succeed = self.ots_client.search("preproject","preproject_index",
+                                                                                SearchQuery(query,next_token=next_token,get_total_count=True,limit=100),
+                                                                                ColumnsToGet(columns,ColumnReturnType.SPECIFIED))
+
     def fill_contact_producer(self):
         q1 = BoolQuery(must_queries=[TermQuery("status",1),
                                        ])
@@ -193,6 +193,7 @@ class PreprojectFill():
 
             _row["status"] = 0
             _preproject = Preproject(_row)
+            _preproject.setValue("status",2,True)
             _preproject.update_row(self.ots_client)
 
         _mul = MultiThreadHandler(self.fill_concat_queue,comsumer_handle,None,20)

+ 37 - 4
BaseDataMaintenance/maintenance/preproject/remove_dump.py

@@ -28,8 +28,7 @@ def drop_dump_data():
     log("to drop preproject dump data:%d"%total_count)
     for _data in list_data:
         task_queue.put(_data)
-    mt = MultiThreadHandler(task_queue,drop_data,None,30)
-    mt.run()
+
     while next_token:
         rows,next_token,total_count,is_all_succeed = ots_client.search("preproject_dump","preproject_dump_index",
                                                                        SearchQuery(bool_query,next_token=next_token,get_total_count=True,limit=100),
@@ -37,7 +36,40 @@ def drop_dump_data():
         list_data = getRow_ots(rows)
         for _data in list_data:
             task_queue.put(_data)
-        mt.run()
+    mt = MultiThreadHandler(task_queue,drop_data,None,30)
+    mt.run()
+
+def drop_data():
+
+    def drop_data(item,result_queue):
+        preproject = Preproject(item)
+        preproject.delete_row(ots_client)
+
+
+    task_queue = Queue()
+    ots_client = getConnect_ots()
+    bool_query = BoolQuery(must_queries=[
+        TermQuery("status",5)
+    ]
+    )
+    rows,next_token,total_count,is_all_succeed = ots_client.search("preproject","preproject_index",
+                                                                   SearchQuery(bool_query,sort=Sort(sorters=[FieldSort(preproject_tenderee)]),get_total_count=True,limit=100),
+                                                                   columns_to_get=ColumnsToGet(return_type=ColumnReturnType.NONE))
+    list_data = getRow_ots(rows)
+    log("to drop preproject dump data:%d"%total_count)
+    for _data in list_data:
+        task_queue.put(_data)
+
+    while next_token:
+        rows,next_token,total_count,is_all_succeed = ots_client.search("preproject","preproject_index",
+                                                                       SearchQuery(bool_query,next_token=next_token,get_total_count=True,limit=100),
+                                                                       columns_to_get=ColumnsToGet(return_type=ColumnReturnType.NONE))
+        list_data = getRow_ots(rows)
+        for _data in list_data:
+            task_queue.put(_data)
+        print(task_queue.qsize(),total_count)
+    mt = MultiThreadHandler(task_queue,drop_data,None,30)
+    mt.run()
 
 def start_drop_preproject_dump():
 
@@ -46,5 +78,6 @@ def start_drop_preproject_dump():
     scheduler.start()
 
 if __name__ == '__main__':
-    drop_dump_data()
+    # drop_dump_data()
+    drop_data()
     # start_drop_preproject_dump()

+ 13 - 79
BaseDataMaintenance/maintenance/product/1.py

@@ -1,81 +1,15 @@
 
 
-from fuzzywuzzy import fuzz
-import Levenshtein
-
-
-s1 = "abcd"
-s2 = "abcdefgh"
-
-print(fuzz.ratio(s1,s2))
-print(Levenshtein.ratio(s1,s2))
-
-
-print(Levenshtein.jaro("1abdd","1abbd"))
-
-print((4/5+4/5+4/4)/3)
-print((5/5+5/5+3/5)/3)
-
-from sklearn.cluster import KMeans
-
-
-km = KMeans(n_clusters=2)
-x = [[1,1,22,2,2,2,2],
-     [3,1,22,2,2,2,2],
-     [1.5,1,22,2,2,2,2]]
-km.fit(x)
-
-a = '''
-bidding_budget double,
-    brand_specs string,
-    province string,
-    city STRING,
-    district string,
-    create_time string,
-    dict_name_id string,
-    docchannel bigint,
-    docid bigint,
-    doctitle string,
-    full_name string,
-    industry string,
-    info_type string,
-    page_time string,
-    page_time_year string,
-    procurement_system STRING,
-    project_code string,
-    project_name string,
-    quantity bigint,
-    quantity_unit string,
-    supplier string,
-    tenderee string,
-    tenderee_contact string,
-    tenderee_phone string,
-    update_time string,
-    win_bid_price double,
-    win_tenderer string,
-    win_tenderer_manager string,
-    win_tenderer_phone string,
-    dict_brand_id string,
-    dict_specs_id string,
-    dump_id string,
-    total_price double,
-    unit_price double,
-    bid_filemd5s string
-'''
-
-list_c = []
-for b in a.split("\n"):
-     c = b.strip()
-     if c=="":
-          continue
-     d = c.split(" ")[0]
-     list_c.append(d)
-print(",".join(list_c))
-
-print("BENEHEARTD6".lower()=="BeneHeartD6".lower())
-
-
-
-
-
-
+import re
+pattern="(^|★|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册部\.::]))|" \
+        "([\s★\*]*)(?P<title_3>(?P<title_3_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>))|" \
+        "([\s★\*]*)(?P<title_4>(?P<title_4_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]))|" \
+        "([\s★\*]*)(?P<title_11>(?P<title_11_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_10>(?P<title_10_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_7>(?P<title_7_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_6>(?P<title_6_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_15>(?P<title_15_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?(?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>)))|" \
+        "([\s★\*]*)(?P<title_17>(?P<title_17_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?(?)(?P<title_17_index_1_1>[a-wA-W]+)(?P<title_17_index_2_0>)))|" \
+        "([\s★\*]*)(?P<title_19>(?P<title_19_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>[))]))" \
+        ""
+print(re.search(pattern,"(一)4K内窥镜荧光摄像系统主机").groupdict())

+ 969 - 0
BaseDataMaintenance/maintenance/product/htmlparser.py

@@ -0,0 +1,969 @@
+#coding:utf8
+
+import re
+
+from BaseDataMaintenance.maintenance.product.productUtils import *
+import logging
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+
+
+from bs4 import BeautifulSoup
+import copy
+
+end_pattern = "商务要求|评分标准|商务条件|商务条件"
+_param_pattern = "(产品|技术|清单[及和]?|配置|参数|具体|明细[及和]?|项目|货物|服务)(指标|配置|要求|参数|需求|规格)|配置清单|(质量|技术).{,10}要求|验收标准|^参数$"
+meter_pattern = "角度|容积|色彩|帧率|磁场|强度|允差|噪音|材质|频率|阻抗|浓度|范围|误差|精确|温度|可调|设定值|功能|检测|高度|宽度|模式|尺寸|重量|峰值|容量|寿命|稳定性|高温|电源|电压|功率|压力|压强"
+
+def getTrs(tbody):
+    #获取所有的tr
+    trs = []
+    if tbody.name=="table":
+        body = tbody.find("tbody",recursive=False)
+        if body is not None:
+            tbody = body
+    objs = tbody.find_all(recursive=False)
+    for obj in objs:
+        if obj.name=="tr":
+            trs.append(obj)
+        if obj.name=="tbody" or obj.name=="table":
+            for tr in obj.find_all("tr",recursive=False):
+                trs.append(tr)
+    return trs
+
+def fixSpan(tbody):
+    # 处理colspan, rowspan信息补全问题
+    #trs = tbody.findChildren('tr', recursive=False)
+
+    trs = getTrs(tbody)
+    ths_len = 0
+    ths = list()
+    trs_set = set()
+    #修改为先进行列补全再进行行补全,否则可能会出现表格解析混乱
+    # 遍历每一个tr
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有colspan 则补全同一行下一个位置
+            if 'colspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['colspan'])))!="":
+                    col = int(re.sub("[^0-9]","",str(td['colspan'])))
+                    if col<100 and len(td.get_text())<1000:
+                        td['colspan'] = 1
+                        for i in range(1, col, 1):
+                            td.insert_after(copy.copy(td))
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有rowspan 则补全下一行同样位置
+            if 'rowspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['rowspan'])))!="":
+                    row = int(re.sub("[^0-9]","",str(td['rowspan'])))
+                    td['rowspan'] = 1
+                    for i in range(1, row, 1):
+                        # 获取下一行的所有td, 在对应的位置插入
+                        if indtr+i<len(trs):
+                            tds1 = trs[indtr + i].findChildren(['td','th'], recursive=False)
+                            if len(tds1) >= (indtd) and len(tds1)>0:
+                                if indtd > 0:
+                                    tds1[indtd - 1].insert_after(copy.copy(td))
+                                else:
+                                    tds1[0].insert_before(copy.copy(td))
+                            elif indtd-2>0 and len(tds1) > 0 and len(tds1) == indtd - 1:  # 修正某些表格最后一列没补全
+                                tds1[indtd-2].insert_after(copy.copy(td))
+def getTable(tbody):
+    #trs = tbody.findChildren('tr', recursive=False)
+    fixSpan(tbody)
+    trs = getTrs(tbody)
+    inner_table = []
+    for tr in trs:
+        tr_line = []
+        tds = tr.findChildren(['td','th'], recursive=False)
+        if len(tds)==0:
+            tr_line.append([re.sub('\xa0','',tr.get_text()),0]) # 2021/12/21 修复部分表格没有td 造成数据丢失
+        for td in tds:
+            tr_line.append([re.sub('\xa0','',td.get_text()),0])
+            #tr_line.append([td.get_text(),0])
+        inner_table.append(tr_line)
+    return inner_table
+
+class ParseDocument():
+
+    def __init__(self,_html,auto_merge_table=True):
+        if _html is None:
+            _html = ""
+        self.html = _html
+
+        # self.soup = BeautifulSoup(self.html,"lxml")
+        # self.soup = BeautifulSoup(self.html,"html.parser")
+        self.auto_merge_table = auto_merge_table
+
+        self.soup = BeautifulSoup(self.html,"html5lib")
+        _body = self.soup.find("body")
+        if _body is not None:
+            self.soup = _body
+        self.list_obj = self.soup.find_all(recursive=False)
+
+        # for obj in self.list_obj:
+        #     print("obj",obj.get_text()[:20])
+
+        self.tree = self.buildParsetree(self.list_obj,[],auto_merge_table)
+
+
+        # #识别目录树
+        # if self.parseTree:
+        #     self.parseTree.printParseTree()
+
+    def fix_tree(self,_product):
+        products = extract_products(self.tree,_product)
+        if len(products)>0:
+            self.tree = self.buildParsetree(self.list_obj,products,self.auto_merge_table)
+
+    def print_tree(self,tree,append=""):
+        if append=="":
+            self.set_tree_id = set()
+        for t in tree:
+            _id = id(t)
+            if _id in self.set_tree_id:
+                continue
+            self.set_tree_id.add(_id)
+            logger.debug("%s %s %s %s %s"%(append,t["text"][:50],t["sentence_title"],t["title_before"],t["title_after"]))
+            childs = t["child_title"]
+            self.print_tree(childs,append=append+"-|")
+
+    def is_title_first(self,title):
+        if title in ("一","1","Ⅰ","a","A"):
+            return True
+        return False
+
+    def find_title_by_pattern(self,_text,_pattern="(^|★|▲|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册包标部\.::]))|" \
+                                             "([\s★▲\*]*)(?P<title_3>(?P<title_3_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>))|" \
+                                             "([\s★▲\*]*)(?P<title_4>(?P<title_4_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]))|" \
+                                             "([\s★▲\*]*)(?P<title_5>(?P<title_5_index_0_0>^)(?P<title_5_index_1_1>[一二三四五六七八九十]+)(?P<title_5_index_2_0>)[^一二三四五六七八九十节章册部\.::、、])|" \
+                                             "([\s★▲\*]*)(?P<title_12>(?P<title_12_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_12_index_1_1>\d{1,2})(?P<title_12_index_2_0>[\..、\s\-]?))|"\
+                                             "([\s★▲\*]*)(?P<title_11>(?P<title_11_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_10>(?P<title_10_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_7>(?P<title_7_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..包标::、\s\-]?))|" \
+                                             "([\s★▲\*]*)(?P<title_6>(?P<title_6_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-包标]?))|" \
+                                             "([\s★▲\*]*)(?P<title_15>(?P<title_15_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>[))包标]))|" \
+                                             "([\s★▲\*]*)(?P<title_17>(?P<title_17_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_17_index_1_1>[a-wA-W]+)(?P<title_17_index_2_0>[))包标]))|" \
+                                             "([\s★▲\*]*)(?P<title_19>(?P<title_19_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>[))]))" \
+                              ):
+        _se = re.search(_pattern,_text)
+        groups = []
+        if _se is not None:
+            _gd = _se.groupdict()
+            for k,v in _gd.items():
+                if v is not None:
+                    groups.append((k,v))
+        if len(groups):
+            # groups.sort(key=lambda x:x[0])
+            return groups
+        return None
+
+    def make_increase(self,_sort,_title,_add=1):
+        if len(_title)==0 and _add==0:
+            return ""
+        if len(_title)==0 and _add==1:
+            return _sort[0]
+        _index = _sort.index(_title[-1])
+        next_index = (_index+_add)%len(_sort)
+        next_chr = _sort[next_index]
+        if _index==len(_sort)-1:
+            _add = 1
+        else:
+            _add = 0
+        return next_chr+self.make_increase(_sort,_title[:-1],_add)
+
+
+    def get_next_title(self,_title):
+        if re.search("^\d+$",_title) is not None:
+            return str(int(_title)+1)
+        if re.search("^[一二三四五六七八九十百]+$",_title) is not None:
+            if _title[-1]=="十":
+                return _title+"一"
+            if _title[-1]=="百":
+                return _title+"零一"
+
+            if _title[-1]=="九":
+                if len(_title)==1:
+                    return "十"
+                if len(_title)==2:
+                    if _title[0]=="十":
+                        return "二十"
+                if len(_title)==3:
+                    if _title[0]=="九":
+                        return "一百"
+                    else:
+                        _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title[0]))
+                        return _next_title+"十"
+
+            _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title))
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            if _next_title[-1]!="十":
+                if len(_next_title)>=2:
+                    _next_title.insert(-1,'十')
+            if len(_next_title)>=4:
+                _next_title.insert(-3,'百')
+            if _title[0]=="十":
+                if _next_title=="十":
+                    _next_title = ["二","十"]
+                _next_title.insert(0,"十")
+            _next_title = "".join(_next_title)
+            return _next_title
+        if re.search("^[a-z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('a')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[A-Z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('A')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$",_title) is not None:
+            _sort = ["Ⅰ","Ⅱ","Ⅲ","Ⅳ","Ⅴ","Ⅵ","Ⅶ","Ⅷ","Ⅸ","Ⅹ","Ⅺ","Ⅻ"]
+            _index = _sort.index(_title)
+            if _index<len(_sort)-1:
+                return _sort[_index+1]
+            return None
+
+    def count_title_before(self,list_obj):
+        dict_before = {}
+        dict_sentence_count = {}
+        illegal_sentence = set()
+        for obj_i in range(len(list_obj)):
+            obj = list_obj[obj_i]
+            _type = "sentence"
+            _text = obj.text.strip()
+            if obj.name=="table":
+                _type = "table"
+                _text = str(obj)
+            _append = False
+
+
+            if _type=="sentence":
+                if len(_text)>10 and len(_text)<100:
+                    if _text not in dict_sentence_count:
+                        dict_sentence_count[_text] = 0
+                    dict_sentence_count[_text] += 1
+                    if re.search("\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+                elif len(_text)<10:
+                    if re.search("第\d+页",_text) is not None:
+                        illegal_sentence.add(_text)
+
+                sentence_groups = self.find_title_by_pattern(_text[:10])
+                if sentence_groups:
+                    # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups)
+                    sentence_title = sentence_groups[0][0]
+                    sentence_title_text = sentence_groups[0][1]
+                    title_index = sentence_groups[-2][1]
+                    title_before = sentence_groups[1][1].replace("(","(").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
+                    title_after = sentence_groups[-1][1].replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
+                    next_index = self.get_next_title(title_index)
+                    if title_before not in dict_before:
+                        dict_before[title_before] = 0
+                    dict_before[title_before] += 1
+
+        for k,v in dict_sentence_count.items():
+            if v>10:
+                illegal_sentence.add(k)
+        return dict_before,illegal_sentence
+
+    def is_page_no(self,sentence):
+        if len(sentence)<10:
+            if re.search("\d+页|^\-\d+\-$",sentence) is not None:
+                return True
+
+    def block_tree(self,childs):
+        for child in childs:
+
+            if not child["block"]:
+                child["block"] = True
+                childs2 = child["child_title"]
+                self.block_tree(childs2)
+
+
+    def buildParsetree(self,list_obj,products=[],auto_merge_table=True):
+
+        self.parseTree = None
+        trees = []
+        list_length = []
+        for obj in list_obj[:200]:
+            if obj.name!="table":
+                list_length.append(len(obj.get_text()))
+        if len(list_length)>0:
+            max_length = max(list_length)
+        else:
+            max_length = 40
+
+        logger.debug("%s:%d"%("max_length",max_length))
+
+
+        list_data = []
+        last_table_index = None
+        last_table_columns = None
+        last_table = None
+        dict_before,illegal_sentence = self.count_title_before(list_obj)
+        for obj_i in range(len(list_obj)):
+            obj = list_obj[obj_i]
+            _type = "sentence"
+            _text = standard_product(obj.text)
+            if obj.name=="table":
+                _type = "table"
+                _text = standard_product(str(obj))
+            _append = False
+            sentence_title = None
+            sentence_title_text = None
+            sentence_groups = None
+            title_index = None
+            next_index = None
+            parent_title = None
+            title_before = None
+            title_after = None
+            title_next = None
+            childs = []
+
+            list_table = None
+            block = False
+
+            if _type=="sentence":
+                if _text in illegal_sentence:
+                    continue
+
+                _fix = False
+                for p in products:
+                    if re.sub("^(\d[.、]?)+","",_text.strip())==p:
+                        title_before = "=产品"
+                        sentence_title = "title_0"
+                        sentence_title_text = p
+                        title_index = "0"
+                        title_after = "产品="
+                        next_index = "0"
+                        _fix = True
+                        break
+
+                if not _fix:
+                    sentence_groups = self.find_title_by_pattern(_text[:10])
+                    if sentence_groups:
+                        title_before = standard_title_context(sentence_groups[1][1])
+                        if title_before in dict_before and dict_before[title_before]>1:
+                            sentence_title = sentence_groups[0][0]
+                            sentence_title_text = sentence_groups[0][1]
+                            title_index = sentence_groups[-2][1]
+
+                            title_after = sentence_groups[-1][1]
+                            next_index = self.get_next_title(title_index)
+                        else:
+                            title_before = None
+
+
+            if _type=="sentence":
+                if sentence_title is None and len(list_data)>0 and list_data[-1]["sentence_title"] is not None and list_data[-1]["line_width"]>=max_length*0.6:
+                    list_data[-1]["text"] += _text
+                    list_data[-1]["line_width"] = len(_text)
+                    _append = True
+                elif sentence_title is None and len(list_data)>0 and _type==list_data[-1]["type"]:
+                    if list_data[-1]["line_width"]>=max_length*0.7:
+                        list_data[-1]["text"] += _text
+                        list_data[-1]["line_width"] = len(_text)
+                        _append = True
+
+            if _type=="table":
+                _soup = BeautifulSoup(_text,"lxml")
+                _table = _soup.find("table")
+                if _table is not None:
+                    list_table = getTable(_table)
+                    table_columns = len(list_table[0])
+
+                    if auto_merge_table:
+                        if last_table_index is not None and abs(obj_i-last_table_index)<=2 and last_table_columns is not None and last_table_columns==table_columns:
+                            if last_table is not None:
+                                trs = getTrs(_table)
+                                last_tbody = BeautifulSoup(last_table["text"],"lxml")
+                                _table = last_tbody.find("table")
+                                last_trs = getTrs(_table)
+                                _append = True
+
+                                for _line in list_table:
+                                    last_table["list_table"].append(_line)
+                                if len(last_trs)>0:
+                                    for _tr in trs:
+                                        last_trs[-1].insert_after(copy.copy(_tr))
+                                    last_table["text"] = re.sub("</?html>|</?body>","",str(last_tbody))
+
+                                last_table_index = obj_i
+                                last_table_columns = len(list_table[-1])
+
+
+            if not _append:
+                _data = {"type":_type, "text":_text,"list_table":list_table,"line_width":len(_text),"sentence_title":sentence_title,"title_index":title_index,
+                         "sentence_title_text":sentence_title_text,"sentence_groups":sentence_groups,"parent_title":parent_title,
+                         "child_title":childs,"title_before":title_before,"title_after":title_after,"title_next":title_next,"next_index":next_index,
+                         "block":block}
+
+                if _type=="table":
+                    last_table = _data
+                    last_table_index = obj_i
+                    if list_table:
+                        last_table_columns = last_table_columns = len(list_table[-1])
+
+                if sentence_title is not None:
+                    if len(list_data)>0:
+                        if self.is_title_first(title_index):
+                            for i in range(1,len(list_data)+1):
+                                _d = list_data[-i]
+                                if _d["sentence_title"] is not None:
+                                    _data["parent_title"] = _d
+                                    _d["child_title"].append(_data)
+                                    break
+                        else:
+                            _find = False
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
+                                        _data["parent_title"] = _d["parent_title"]
+                                        _d["title_next"] = _data
+                                        if len(_d["child_title"])>0:
+                                            _d["child_title"][-1]["title_next"] = ""
+                                            self.block_tree(_d["child_title"])
+                                        if _d["parent_title"] is not None:
+                                            _d["parent_title"]["child_title"].append(_data)
+                                        _find = True
+                                        break
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if i==1 and not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+                            title_before = standard_title_context(title_before)
+                            title_after = standard_title_context(title_after)
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
+                                    if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
+                                        _data["parent_title"] = _d["parent_title"]
+                                        _d["title_next"] = _data
+                                        if len(_d["child_title"])>0:
+                                            _d["child_title"][-1]["title_next"] = ""
+                                            self.block_tree(_d["child_title"])
+                                        if _d["parent_title"] is not None:
+                                            _d["parent_title"]["child_title"].append(_data)
+                                        _find = True
+                                        break
+                            for i in range(1,len(list_data)+1):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        # self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+                            for i in range(1,min(len(list_data)+1,20)):
+                                if _find:
+                                    break
+                                _d = list_data[-i]
+                                if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]):
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if len(_d["child_title"])>0:
+                                        _d["child_title"][-1]["title_next"] = ""
+                                        # self.block_tree(_d["child_title"])
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+
+                            if not _find:
+                                if len(list_data)>0:
+                                    for i in range(1,len(list_data)+1):
+                                        _d = list_data[-i]
+                                        if _d.get("sentence_title") is not None:
+                                            _data["parent_title"] = _d
+                                            _d["child_title"].append(_data)
+                                            break
+
+
+                else:
+                    if len(list_data)>0:
+                        for i in range(1,len(list_data)+1):
+                            _d = list_data[-i]
+                            if _d.get("sentence_title") is not None:
+                                _data["parent_title"] = _d
+                                _d["child_title"].append(_data)
+                                break
+
+                list_data.append(_data)
+
+        return list_data
+
+def standard_title_context(_title_context):
+    return _title_context.replace("(","(").replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".").replace(".",".")
+
+def standard_product(sentence):
+    return sentence.replace("(","(").replace(")",")")
+
+def extract_products(list_data,_product,_param_pattern = "产品名称|采购内存|标的名称|采购内容|(标的|维修|系统|报价构成|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品?|采购|物装|配件|资产|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|品目|^品名|气体|标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)[\))的]?([、\w]{,4}名称|内容|描述)|标的|标项|项目$|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品|物装|配件|资产|招标内容|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|菜名|^品目$|^品名$|^名称|^内容$"):
+    _product = standard_product(_product)
+    list_result = []
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+        table_products = []
+        if _type=="table":
+            list_table = _data["list_table"]
+            if list_table is None:
+                continue
+            _check = True
+            max_length = max([len(a) for a in list_table])
+            min_length = min([len(a) for a in list_table])
+            if min_length<max_length/2:
+                continue
+            list_head_index = []
+            _begin_index = 0
+            head_cell_text = ""
+            for line_i in range(len(list_table[:2])):
+                line = list_table[line_i]
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<10 and re.search(_param_pattern,cell_text) is not None and re.search("单价|数量|预算|限价|总价|品牌|规格|型号|用途|要求|采购量",line_text) is not None:
+                        _begin_index = line_i+1
+                        list_head_index.append(cell_i)
+
+            for line_i in range(len(list_table)):
+                line = list_table[line_i]
+                for cell_i in list_head_index:
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    head_cell_text += cell_text
+
+            # print("===head_cell_text",head_cell_text)
+            if re.search("招标人|采购人|项目编号|项目名称|金额|^\d+$",head_cell_text) is not None:
+                list_head_index = []
+
+
+            for line in list_table:
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if cell_text is not None and _product is not None and len(cell_text)<len(_product)*10 and re.search(_product,cell_text) is not None and re.search("单价|数量|总价|规格|品牌|型号|用途|要求|采购量",line_text) is not None:
+                        list_head_index.append(cell_i)
+
+            list_head_index = list(set(list_head_index))
+            if len(list_head_index)>0:
+                for line_i in range(_begin_index,len(list_table)):
+                    line = list_table[line_i]
+                    has_number = False
+                    for cell_i in range(len(line)):
+                        cell = line[cell_i]
+                        cell_text = cell[0]
+                        if re.search("^\d+$",cell_text) is not None:
+                            has_number = True
+
+                    for cell_i in list_head_index:
+                        if cell_i>=len(line):
+                            continue
+                        cell = line[cell_i]
+                        cell_text = cell[0]
+                        if re.search(_param_pattern,cell_text) is None or has_number:
+                            if re.search("^[\da-zA-Z]+$",cell_text) is None:
+                                table_products.append(cell_text)
+
+        if len(table_products)>0:
+            if min([len(x) for x in table_products])>0 and max([len(x) for x in table_products])<=20:
+                list_result.extend(table_products)
+    list_result = list(set([a for a in list_result if len(a)>1 and len(a)<20 and re.search("预算|合计|金额|万元|运费",a) is None]))
+    return list_result
+
+
+def get_childs(childs):
+    list_data = []
+    for _child in childs:
+        list_data.append(_child)
+        childs2 = _child.get("child_title",[])
+
+        if len(childs2)>0:
+            for _child2 in childs2:
+                list_data.extend(get_childs([_child2]))
+    return list_data
+
+def get_range_data_by_childs(list_data,childs):
+    range_data = []
+    list_child = get_childs(childs)
+    list_index = []
+    set_child = set([id(x) for x in list_child])
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _id = id(_data)
+        if _id in set_child:
+            list_index.append(_data_i)
+    if len(list_index)>0:
+        range_data = list_data[min(list_index):max(list_index)+1]
+    return range_data
+
+def get_correct_product(product,products):
+    list_data = []
+    for p in products:
+        is_sim = is_similar(product,p)
+        _d = {"product":p,"distance":abs(len(product)-len(p)),"is_sim":is_sim}
+        list_data.append(_d)
+    list_data.sort(key=lambda x:x["distance"])
+    for _d in list_data:
+        is_sim = _d["is_sim"]
+        if is_sim:
+            if len(_d["product"])>len(product) and _d["product"].find(product)>=0:
+                return product
+            return _d["product"]
+    return product
+
+def get_childs_text(childs,_product,products,is_begin=False,is_end=False):
+    _text = ""
+
+    for _child in childs:
+
+        child_text = _child.get("text")
+
+        if child_text.find(_product)>=0:
+            is_begin = True
+
+        logger.debug("%s-%s-%s"%("get_childs_text",child_text[:10],str(is_begin)))
+
+        for p in products:
+            if child_text.find(p)>=0 and is_similar(_product,p,90):
+                is_begin = True
+
+            if child_text.find(_product)<0 and  child_text.find(p)>=0 and not is_similar(_product,p,80):
+                if is_begin:
+                    is_end = True
+                break
+        if re.search(end_pattern,child_text) is not None:
+            if is_begin:
+                is_end = True
+
+        if is_begin and is_end:
+            break
+
+        if is_begin:
+            _text += _child.get("text")+"\r\n"
+        childs2 = _child.get("child_title",[])
+
+        if len(childs2)>0:
+            for _child2 in childs2:
+                child_text,is_begin,is_end = get_childs_text([_child2],_product,products,is_begin)
+                if is_begin and is_end:
+                    break
+                else:
+                    if is_begin:
+                        _text += child_text
+    return _text,is_begin,is_end
+
+def extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result,):
+    _data = list_data[_data_i]
+    childs = _data.get("child_title",[])
+    if len(childs)>0:
+        child_text,_,_ = get_childs_text([parent_title],_product,products)
+        logger.info("extract_parameters_by_tree child_text:%s"%child_text)
+        if len(child_text)>0:
+            list_result.append(child_text)
+            return True
+    if parent_title is not None:
+        childs = parent_title.get("child_title",[])
+        if len(childs)>0:
+
+            range_data = get_range_data_by_childs(list_data[_data_i:],childs)
+            p_text = ""
+            _find = False
+            for pdata in range_data:
+                ptype = _data["type"]
+                ptext = pdata["text"]
+                for p in products:
+                    if ptext.find(_product)<0 and  ptext.find(p)>=0:
+                        _find = True
+                        break
+                if re.search(end_pattern,ptext) is not None:
+                    _find = True
+                if _find:
+                    break
+                p_text += ptext+"\r\n"
+            if len(p_text)>0:
+                list_result.append(p_text)
+                return True
+    return False
+
+
+def get_table_pieces(_text,_product,products,list_result,_find):
+    _soup = BeautifulSoup(_text,"html5lib")
+    _table = _soup.find("table")
+    if _table is not None:
+        trs = getTrs(_table)
+        list_trs = []
+        for tr in trs:
+            tr_text = tr.get_text()
+            if tr_text.find(_product)>=0:
+                _find = True
+
+            logger.debug("%s-%s"%("table_html_tr",tr_text))
+            for p in products:
+                if _find and p!=_product and tr_text.find(p)>=0:
+                    _find = False
+                    break
+            if re.search(end_pattern,tr_text) is not None:
+                _find = False
+                break
+            if _find:
+                list_trs.append(tr)
+        if len(list_trs)>0:
+            table_html = "<table>%s</table>"%("\r\n".join([str(a) for a in list_trs]))
+            list_result.append(table_html)
+
+def extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result):
+    _data = list_data[_data_i]
+    _text = _data["text"]
+    list_table = _data["list_table"]
+    parent_title = _data["parent_title"]
+    if list_table is not None:
+        _check = True
+        max_length = max([len(a) for a in list_table])
+        min_length = min([len(a) for a in list_table])
+        text_line_first = ",".join(a[0] for a in list_table[0])
+        if min_length<max_length/2:
+            return
+        last_data = list_data[_data_i-1]
+        _flag = False
+        if last_data["type"]=="sentence" and last_data["text"].find(_product)>=0:
+            logger.debug("last sentence find product %s-%s"%(_product,last_data["text"]))
+            _flag = True
+        # print(text_line_first,"text_line_first",re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0)
+        if re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0:
+            _flag = True
+        if _flag:
+            logger.debug("extract_type add all table %s"%_text)
+            if len(products)==0:
+                list_result.append(_text)
+            else:
+                for p in products:
+                    if p!=_product and _text.find(p)>=0:
+                        logger.debug("extract_type add all table failed %s-%s"%(_product,p))
+                        _flag = False
+                        break
+                if _flag:
+                    logger.debug("extract_type add all table succeed")
+                    get_table_pieces(_text,_product,products,list_result,True)
+        else:
+            list_head_index = []
+            for line in list_table[:2]:
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<20 and re.search(_param_pattern,cell_text) is not None:
+                        list_head_index.append(cell_i)
+            list_head_index = list(set(list_head_index))
+            for line in list_table:
+                for cell in line:
+                    cell_text = cell[0]
+                    if len(cell_text)>50 and len(re.findall("\d+",cell_text))>10 and cell_text.find(_product)>=0:
+                        list_result.append(cell_text)
+                    if len(cell_text)<len(_product)*10 and str(cell_text).find(_product)>=0:
+                        for _index in list_head_index:
+                            if _index>=len(line):
+                                continue
+                            _cell = line[_index]
+                            if len(cell[0])>0:
+                                logger.info("%s-%s"%("add on table",_cell[0]))
+                                list_result.append(_cell[0])
+        if not _flag and (re.search(_param_pattern,_text) is not None or (parent_title is not None and re.search(_param_pattern,parent_title["text"]) is not None)) and _text.find(_product)>=0:
+            get_table_pieces(_text,_product,products,list_result,False)
+
+
+
+def getBestProductText(list_result,_product,products):
+    list_result.sort(key=lambda x:len(re.findall(meter_pattern+"|"+'[::;;]|\d+[%A-Za-z]+',BeautifulSoup(x,"html5lib").get_text())), reverse=True)
+
+    logger.debug("+++++++++++++++++++++")
+    for i in range(len(list_result)):
+        logger.debug("result%d %s"%(i,list_result[i]))
+    logger.debug("+++++++++++++++++++++")
+
+    for i in range(len(list_result)):
+        _result = list_result[i]
+        _check = True
+        _result_text = BeautifulSoup(_result,"html5lib").get_text()
+        _search = re.search("项目编号[::]|项目名称[::]|联合体投标",_result)
+        if _search is not None:
+            logger.debug("result%d error illegal text %s"%(i,str(_search)))
+            _check = False
+        if not (len(_result_text)<1000 and _result[:6]!="<table"):
+            for p in products:
+                if _result_text.find(p)>0 and not (is_similar(_product,p,80) or p.find(_product)>=0 or _product.find(p)>=0):
+                    logger.debug("result%d error product scoss %s"%(i,p))
+                    _check = False
+        if len(_result_text)<50:
+            if re.search(meter_pattern,_result_text) is None:
+                logger.debug("result%d error text min count"%(i))
+                _check = False
+        if len(_result_text)>5000:
+            logger.debug("result%d error text max count"%(i))
+            _check = False
+        if _check:
+            return _result
+
+def extract_product_parameters(list_data,_product):
+
+    list_result = []
+    _product = standard_product(_product.strip())
+    products = extract_products(list_data,_product)
+
+    _product = get_correct_product(_product,products)
+    logger.debug("all products %s-%s"%(_product,str(products)))
+    is_project = False
+    _find_count = 0
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+        if _type=="sentence":
+            if _text.find(_product)>=0:
+                _find_count += 1
+                if re.search("项目名称|采购项目",_text) is not None:
+                   is_project = True
+                if re.search("项目名称|采购项目",_product) is not None:
+                    is_project = True
+                parent_title = _data.get("parent_title")
+                parent_text = ""
+                parent_parent_title = None
+                parent_parent_text = ""
+                parent_title_index = None
+                parent_parent_title_index = None
+                childs = get_childs([_data])
+
+
+                child_find = False
+                for c in childs:
+                    if re.search(_param_pattern,c["text"]) is not None and len(c["text"])<30:
+                        child_find = True
+                        break
+
+                extract_text,_,_ = get_childs_text([_data],_product,products)
+                logger.debug("childs found extract_text %s"%extract_text)
+                if child_find:
+                    if len(extract_text)>0:
+                        list_result.append(extract_text)
+                else:
+                    if len(_text)<len(_product)+10 and _data["sentence_title"] is not None:
+                        if re.search(meter_pattern,extract_text) is not None:
+                            list_result.append(extract_text)
+
+                if parent_title is not None:
+                    parent_text = parent_title.get("text","")
+                    parent_parent_title = parent_title.get("parent_title")
+                    parent_title_index = parent_title["title_index"]
+                    if parent_parent_title is not None:
+                        parent_parent_text = parent_parent_title.get("text","")
+                        parent_parent_title_index = parent_parent_title["title_index"]
+
+                _suit = False
+                if re.search(_param_pattern,_text) is not None and len(_text)<50:
+                    _suit = True
+                if re.search(_param_pattern,parent_text) is not None and len(parent_text)<50:
+                    _suit = True
+                if re.search(_param_pattern,parent_parent_text) is not None and len(parent_parent_text)<50:
+                    _suit = True
+                if _suit:
+                    logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
+                    if not extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result):
+                        logger.debug("extract_type sentence %s"%("extract_parameters_by_tree"))
+                        extract_parameters_by_tree(_product,products,list_data,_data_i,parent_parent_title,list_result)
+
+
+            if re.search(_param_pattern,_text) is not None and len(_text)<50:
+                childs = _data["child_title"]
+                if len(childs)>0:
+                    logger.debug("extract_type sentence %s"%("re.search(_param_pattern,_text) is not None and len(_text)<50:"))
+                    extract_text,_,_ = get_childs_text([_data],_product,products)
+                    if len(extract_text)>0:
+                        list_result.append(extract_text)
+                    elif is_project:
+                        logger.debug("extract_type sentence is_project")
+                        extract_text,_,_ = get_childs_text([_data],_product,products,is_begin=True)
+                        if len(extract_text)>0 and re.search(meter_pattern,extract_text) is not None:
+                            list_result.append(extract_text)
+
+
+        elif _type=="table":
+            if _text.find(_product)>=0:
+                _find_count += 1
+            extract_parameters_by_table(_product,products,_param_pattern,list_data,_data_i,list_result)
+
+
+    return getBestProductText(list_result,_product,products),_find_count
+
+
+if __name__ == '__main__':
+
+    filepath = "download/8679fef3a6fff56abcbdaccb1a190c80.html"
+    _product = "移液器"
+
+    _html = open(filepath, "r", encoding="utf8").read()
+
+    pd = ParseDocument(_html,False)
+
+    pd.fix_tree(_product)
+    list_data = pd.tree
+    pd.print_tree(list_data)
+
+    _text,_count = extract_product_parameters(list_data,_product)
+    logger.info("find count:%d"%(_count))
+    logger.info("extract_text %s"%_text)
+

+ 4 - 1
BaseDataMaintenance/maintenance/product/productUtils.py

@@ -101,6 +101,7 @@ def get_embedding_search(coll,index_name,name,grade,vector,search_params,output_
                 db.set(_md5,json.dumps(final_list))
                 db.expire(_md5,PRODUCT_REDIS_CACHE_TIME)
             except Exception as e:
+                traceback.print_exc()
                 log("set redis data error")
             return final_list
 
@@ -143,6 +144,7 @@ def get_embedding_request(sentence,retry_times=3):
                     db.set(_md5,json.dumps(_embedding))
                     db.expire(_md5,60*60)
                 except Exception as e:
+                    traceback.print_exc()
                     log("set redis data error")
             return _embedding
     except Exception as e:
@@ -590,4 +592,5 @@ if __name__ == '__main__':
     # print(clean_product_specs("//4008SverssionV10"))
     print(is_legal_brand(getConnect_ots(),"产地:中国品牌:天津迈达型号:ODM-2100S"))
     print(clean_product_brand("产地:中国品牌:天津迈达型号:ODM-2100S"))
-    # print(check_specs("500ml","3500ml"))
+    # print(check_specs("500ml","3500ml"))
+    # print(is_similar("手术显微镜配套无线工作站(含助手镜)","显微镜",80))

+ 247 - 0
BaseDataMaintenance/maintenance/product/product_attachment.py

@@ -0,0 +1,247 @@
+
+
+
+from apscheduler.schedulers.blocking import BlockingScheduler
+from tablestore import *
+from BaseDataMaintenance.dataSource.source import getConnect_ots,getAuth,is_internal
+from BaseDataMaintenance.dataSource.interface import *
+from multiprocessing import Queue as PQueue,Process
+from BaseDataMaintenance.model.ots.document_product import *
+from BaseDataMaintenance.model.ots.attachment import *
+from BaseDataMaintenance.common.Utils import *
+from BaseDataMaintenance.common.ossUtils import *
+from BaseDataMaintenance.maintenance.product.htmlparser import *
+import oss2
+from BaseDataMaintenance.common.multiThread import MultiThreadHandler
+
+parameter_status_no_bidfile = -1
+parameter_status_to_process = 0
+parameter_status_process_succeed = 1
+parameter_status_process_failed = 2
+parameter_status_process_jump = 3
+parameter_status_not_found = 4
+
+class Product_Attachment_Processor():
+
+    def __init__(self,):
+        self.ots_client = getConnect_ots()
+        self.product_attachment_queue = PQueue()
+        self.product_attachment_queue_size = 100
+        self.set_product_attachment = set()
+        self.attachment_hub_url = "https://attachment-hub.oss-cn-hangzhou.aliyuncs.com/"
+        self.auth = getAuth()
+        oss2.defaults.connection_pool_size = 100
+        oss2.defaults.multiget_num_threads = 20
+        if is_internal:
+            self.bucket_url = "http://oss-cn-hangzhou-internal.aliyuncs.com"
+        else:
+            self.bucket_url = "http://oss-cn-hangzhou.aliyuncs.com"
+        log("bucket_url:%s"%(self.bucket_url))
+        self.attachment_bucket_name = "attachment-hub"
+        self.bucket = oss2.Bucket(self.auth,self.bucket_url,self.attachment_bucket_name)
+        self.current_path = os.path.dirname(__file__)
+        self.download_path = "%s/%s"%(self.current_path,"download")
+
+    def process_parameters_producer(self,):
+
+        if self.product_attachment_queue.qsize()>self.product_attachment_queue_size/3:
+            return
+        bool_query = BoolQuery(must_queries=[
+            TermQuery("parameter_status",parameter_status_to_process)
+        ])
+        list_id = []
+        rows,next_token,total_count,is_all_succeed = self.ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                            SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("parameter_status")]),limit=100,get_total_count=True),
+                                                                            ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S,DOCUMENT_PRODUCT_NAME,DOCUMENT_PRODUCT_ORIGINAL_NAME],return_type=ColumnReturnType.SPECIFIED))
+
+        list_data = getRow_ots(rows)
+        for data in list_data:
+            _id = data.get(DOCUMENT_PRODUCT_ID)
+            if _id in self.set_product_attachment:
+                continue
+            self.product_attachment_queue.put(data)
+            list_id.append(_id)
+        while next_token:
+            if self.product_attachment_queue.qsize()>=self.product_attachment_queue_size:
+                break
+            rows,next_token,total_count,is_all_succeed = self.ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                                SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
+                                                                                ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S,DOCUMENT_PRODUCT_NAME,DOCUMENT_PRODUCT_ORIGINAL_NAME],return_type=ColumnReturnType.SPECIFIED))
+            list_data = getRow_ots(rows)
+            for data in list_data:
+                _id = data.get(DOCUMENT_PRODUCT_ID)
+                if _id in self.set_product_attachment:
+                    continue
+                self.product_attachment_queue.put(data)
+                list_id.append(_id)
+        self.set_product_attachment =  set(list_id)
+
+    def process_parameters_handler(self,item,result_queue):
+        bid_filemd5s = item.get(DOCUMENT_PRODUCT_BID_FILEMD5S)
+        product_name = item.get(DOCUMENT_PRODUCT_NAME)
+        product_original_name = item.get(DOCUMENT_PRODUCT_ORIGINAL_NAME)
+        list_product = []
+        if product_name is not None:
+            list_product.append(product_name)
+        if product_original_name is not None:
+            list_product.extend(product_original_name.split("_"))
+        list_product = list(set(list_product))
+        dp = Document_product(item)
+        if bid_filemd5s is None or bid_filemd5s=="" or len(list_product)==0:
+            dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_no_bidfile)
+            dp.update_row(self.ots_client)
+            return
+        list_filemd5 = bid_filemd5s.split(",")
+        _find = False
+        _success = False
+        for _filemd5 in list_filemd5:
+            if _find:
+                break
+            atta = attachment({attachment_filemd5:_filemd5})
+            if atta.fix_columns(self.ots_client,[attachment_path,attachment_filetype],True):
+                objectPath = atta.getProperties().get(attachment_path)
+                _filetype = atta.getProperties().get(attachment_filetype)
+                if _filetype in ("doc","xls"):
+                    if len(list_filemd5)==1:
+                        dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_process_jump,True)
+                        dp.update_row(self.ots_client)
+                        return
+                    else:
+                        continue
+                localpath = "%s/%s.%s"%(self.download_path,_filemd5,_filetype)
+                localhtml = "%s/%s.%s"%(self.download_path,_filemd5,"html")
+                download_succeed = False
+                try:
+                    if not os.path.exists(localpath):
+                        download_succeed = downloadFile(self.bucket,objectPath,localpath)
+                    else:
+                        download_succeed = True
+                except Exception as e:
+                    download_succeed = False
+                if download_succeed:
+                    try:
+                        _html = ""
+                        if os.path.exists(localhtml):
+                            _html = open(localhtml,"r",encoding="utf8").read()
+                            _success = True
+                        if len(_html)>10:
+                            _success = True
+                        else:
+                            _data_base64 = base64.b64encode(open(localpath,"rb").read())
+                            _success,_html,swf_images,classification = getAttachDealInterface(_data_base64,_filetype,url="http://192.168.2.102:15011/convert",kwargs={'page_no': '1,-1',"max_bytes":"-1"},timeout=6000)
+                            if _success:
+                                localhtml = "%s/%s.%s"%(self.download_path,_filemd5,"html")
+                                with open(localhtml,"w",encoding="utf8") as f:
+                                    f.write(_html)
+                        if _success:
+                            if len(_html)>5:
+                                pd = ParseDocument(_html,True)
+
+                                list_text = []
+                                for _product in list_product:
+                                    pd.fix_tree(_product)
+                                    list_data = pd.tree
+                                    _text,_count = extract_product_parameters(list_data,_product)
+                                    if _count>0:
+                                        _find = True
+                                    if _text is not None:
+                                        list_text.append(_text)
+                                pd = ParseDocument(_html,False)
+
+                                list_text = []
+                                for _product in list_product:
+                                    pd.fix_tree(_product)
+                                    list_data = pd.tree
+                                    _text,_count = extract_product_parameters(list_data,_product)
+                                    if _count>0:
+                                        _find = True
+                                    if _text is not None:
+                                        list_text.append(_text)
+                                if len(list_text)>0:
+                                    list_text.sort(key=lambda x:len(re.findall('[::;;]',BeautifulSoup(x,"html5lib").get_text())), reverse=True)
+                                    _text = list_text[0]
+                                    _success = True
+                                    dp.setValue(DOCUMENT_PRODUCT_PARAMETER,_text,True)
+                                    dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_process_succeed,True)
+                                    dp.update_row(self.ots_client)
+                                    return
+                            else:
+                                log("product attachment process filemd5 %s has no content"%(_filemd5))
+                    except Exception as e:
+                        traceback.print_exc()
+                    finally:
+                        try:
+                            # if os.path.exists(localpath):
+                            #     os.remove(localpath)
+                            pass
+                        except Exception as e:
+                            pass
+
+        if not _find:
+            dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_not_found,True)
+            dp.update_row(self.ots_client)
+        else:
+            dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_process_failed,True)
+            dp.update_row(self.ots_client)
+
+    def start_process(self):
+        mt = MultiThreadHandler(self.product_attachment_queue,self.process_parameters_handler,None,3,need_stop=False,restart=True)
+        mt.run()
+
+    def process_parameters_comsumer(self,):
+
+        # process_count = 2
+        # list_process = []
+        # for i in range(process_count):
+        #     p = Process(target=self.start_process)
+        #     list_process.append(p)
+        # for p in list_process:
+        #     p.start()
+        # for p in list_process:
+        #     p.join()
+
+        self.start_process()
+
+    def start_process_parameters(self):
+        scheduler = BlockingScheduler()
+        scheduler.add_job(self.process_parameters_producer,"cron",second="*/10")
+        scheduler.add_job(self.process_parameters_comsumer,"cron",second="*/30")
+        scheduler.start()
+
+def start_process_parameters():
+    pap = Product_Attachment_Processor()
+    pap.start_process_parameters()
+
+def change_parameters_status():
+    ots_client =getConnect_ots()
+    bool_query = BoolQuery(must_queries=[
+        RangeQuery("parameter_status",-1)
+    ],
+                           must_not_queries=[
+        TermQuery("parameter_status",parameter_status_to_process),
+        TermQuery("parameter_status",parameter_status_process_succeed),
+        TermQuery("parameter_status",parameter_status_process_jump),
+        # TermQuery("parameter_status",parameter_status_no_bidfile),
+
+    ])
+    list_data = []
+    rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                        SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("parameter_status")]),limit=100,get_total_count=True),
+                                                                        ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S,DOCUMENT_PRODUCT_NAME,DOCUMENT_PRODUCT_ORIGINAL_NAME],return_type=ColumnReturnType.SPECIFIED))
+
+    list_data.extend(getRow_ots(rows))
+    print("total_count",total_count)
+    while next_token:
+        rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                            SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
+                                                                            ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S],return_type=ColumnReturnType.SPECIFIED))
+        list_data.extend(getRow_ots(rows))
+    for data in list_data:
+        dp = Document_product(data)
+        dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_to_process,True)
+        dp.setValue(DOCUMENT_PRODUCT_PARAMETER,"",True)
+        dp.update_row(ots_client)
+
+if __name__ == '__main__':
+    start_process_parameters()
+    # change_parameters_status()

+ 61 - 11
BaseDataMaintenance/maintenance/product/products.py

@@ -680,9 +680,15 @@ class Product_Manager(Product_Dict_Manager):
             _product.setValue(DOCUMENT_PRODUCT_ORIGINAL_BRAND,original_brand,True)
             _product.setValue(DOCUMENT_PRODUCT_ORIGINAL_SPECS,original_specs,True)
 
-            bid_filemd5s = self.get_bid_filemd5s(docid,self.ots_client)
-            if bid_filemd5s is not None:
+            list_attachments,bid_filemd5s = self.get_bid_filemd5s(docid,self.ots_client)
+            if len(list_attachments)>0:
+                _product.setValue(DOCUMENT_PRODUCT_ATTACHMENTS,json.dumps(list_attachments,ensure_ascii=False),True)
+                _product.setValue(DOCUMENT_PRODUCT_HAS_ATTACHMENTS,1,True)
+
+            if bid_filemd5s!="":
                 _product.setValue(DOCUMENT_PRODUCT_BID_FILEMD5S,bid_filemd5s,True)
+                _product.setValue(DOCUMENT_PRODUCT_HAS_BIDFILE,1,True)
+                _product.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,0,True)
 
             if not is_legal_data:
                 _status = randint(501,550)
@@ -721,6 +727,7 @@ class Product_Manager(Product_Dict_Manager):
         list_data = getRow_ots(rows)
 
         list_bid_filemd5s = []
+        list_attachments = []
         set_docids = set([docid])
         set_md5s = set()
 
@@ -747,12 +754,13 @@ class Product_Manager(Product_Dict_Manager):
                     set_md5s.add(_filemd5)
                     _da = {attachment_filemd5:_filemd5}
                     _attach = attachment(_da)
-                    _attach.fix_columns(ots_client,[attachment_classification],True)
-                    if _attach.getProperties().get(attachment_classification,"")=="招标文件":
-                        list_bid_filemd5s.append(_filemd5)
-        if len(list_bid_filemd5s)==0:
-            return None
-        return ",".join(list(set(list_bid_filemd5s)))
+                    if _attach.fix_columns(ots_client,[attachment_classification,attachment_filetype],True):
+                        _da[attachment_classification] = _attach.getProperties().get(attachment_classification)
+                        _da[attachment_filetype] = _attach.getProperties().get(attachment_filetype)
+                        list_attachments.append(_da)
+                        if _attach.getProperties().get(attachment_classification,"")=="招标文件":
+                            list_bid_filemd5s.append(_filemd5)
+        return list_attachments,",".join(list(set(list_bid_filemd5s)))
 
 
 
@@ -1044,8 +1052,8 @@ def fix_product_data():
         #                      "docid":docid})
         #     _doc.fix_columns(ots_client,["doctitle"],True)
         #     dp.setValue(DOCUMENT_PRODUCT_DOCTITLE,_doc.getProperties().get("doctitle"),True)
-        bid_filemd5s = Product_Manager.get_bid_filemd5s(docid,ots_client)
-        if bid_filemd5s is not None:
+        list_attachments,bid_filemd5s = Product_Manager.get_bid_filemd5s(docid,ots_client)
+        if bid_filemd5s!="":
             dp.setValue(DOCUMENT_PRODUCT_BID_FILEMD5S,bid_filemd5s,True)
 
         dp.setValue(DOCUMENT_PRODUCT_ORIGINAL_NAME,dpt.getProperties().get(DOCUMENT_PRODUCT_TMP_NAME,""),True)
@@ -1486,9 +1494,51 @@ def clean_product_dict_interface():
     mt = MultiThreadHandler(task_queue,_handle,None,30)
     mt.run()
 
+def fix_attachment():
+    ots_client = getConnect_ots()
+    bool_query = BoolQuery(must_queries=[
+        RangeQuery("docid",1)
+    ])
+    task_queue = Queue()
+    rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                   SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("status")]),get_total_count=True,limit=100),
+                                                                   columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+    list_data = getRow_ots(rows)
+    for _data in list_data:
+        task_queue.put(_data)
+    print("%d/%d"%(task_queue.qsize(),total_count))
+    while next_token:
+        rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                       SearchQuery(bool_query,next_token=next_token,get_total_count=True,limit=100),
+                                                                       columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+        list_data = getRow_ots(rows)
+        for _data in list_data:
+            task_queue.put(_data)
+        print("%d/%d"%(task_queue.qsize(),total_count))
+
+    def _handle(item,result_queue):
+        _product = Document_product(item)
+        docid = _product.getProperties().get("docid")
+
+
+        list_attachments,bid_filemd5s = Product_Manager.get_bid_filemd5s(docid,ots_client)
+        if len(list_attachments)>0:
+            _product.setValue(DOCUMENT_PRODUCT_ATTACHMENTS,json.dumps(list_attachments,ensure_ascii=False),True)
+            _product.setValue(DOCUMENT_PRODUCT_HAS_ATTACHMENTS,1,True)
+
+        if bid_filemd5s!="":
+            _product.setValue(DOCUMENT_PRODUCT_BID_FILEMD5S,bid_filemd5s,True)
+            _product.setValue(DOCUMENT_PRODUCT_HAS_BIDFILE,1,True)
+
+        _product.update_row(ots_client)
+
+    mt = MultiThreadHandler(task_queue,_handle,None,30)
+    mt.run()
+
 if __name__ == '__main__':
 
-    test()
+    # test()
+    fix_attachment()
     # start_process_product()
     # print(getMD5('11936c56f2dd1426764e317ca2e8e1a7'+'&&鱼跃'))
     # print(Product_Manager.get_bid_filemd5s(155415770,getConnect_ots()))

+ 6 - 3
BaseDataMaintenance/maxcompute/documentDumplicate.py

@@ -1028,7 +1028,7 @@ def check_time(json_time_less,json_time_greater):
                         return False
     return True
 
-def check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,min_counts,b_log=False,hard_level=1):
+def check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,province_less,province_greater,city_less,city_greater,district_less,district_greater,min_counts,b_log=False,hard_level=1):
     if fingerprint_less==fingerprint_greater and getLength(fingerprint_less)>0:
         return 1
     if isinstance(project_codes_less,str):
@@ -1070,8 +1070,11 @@ def check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_
     else:
         base_prob = 0.6
     _prob = base_prob*same_count/all_count
-    if _prob<0.1 and min(extract_count_less,extract_count_greater)<=3:
-        _prob = 0.15
+    if min(extract_count_less,extract_count_greater)<=3:
+        if _prob<0.1:
+            _prob = 0.15
+        if province_less!=province_greater:
+            return 0
     if _prob<0.1:
         return _prob
 

+ 1 - 1
BaseDataMaintenance/model/oracle/GongGaoTemp.py

@@ -25,7 +25,7 @@ dict_oracle2ots = {"WEB_SOURCE_NO":"web_source_no",
 dict_channel_table = {"114":"bxkc.T_CAI_GOU_YI_XIANG_TEMP",
                       "117":"bxkc.T_CHAN_QUAN_JIAO_YI_TEMP",
                       "51":"bxkc.T_GONG_GAO_BIAN_GENG_TEMP",
-                      "103":"bxkc.T_KONG_ZHI_JIA_TEMP",
+                      "106":"bxkc.T_KONG_ZHI_JIA_TEMP",
                       "115":"bxkc.T_PAI_MAI_CHU_RANG_TEMP",
                       "116":"bxkc.T_TU_DI_KUANG_CHAN_TEMP",
                       "103":"bxkc.T_ZHAO_BIAO_DA_YI_TEMP",

+ 1 - 1
BaseDataMaintenance/model/oracle/KongZhiJiaTemp.py

@@ -9,7 +9,7 @@ class KongZhiJiaTemp(GongGaoTemp):
     def __init__(self,_dict):
         GongGaoTemp.__init__(self,_dict)
         self.table_name = "bxkc.T_KONG_ZHI_JIA_TEMP"
-        self.setValue("docchannel",103,True)
+        self.setValue("docchannel",106,True)
 
     def getPrimary_keys(self):
         return ["ID"]

+ 1 - 1
BaseDataMaintenance/model/ots/BaseModel.py

@@ -42,7 +42,7 @@ class BaseModel():
             if _key=="all_columns":
                 continue
             _v = self.getProperties().get(_key)
-            if _v is not None and _v!="":
+            if _v is not None:
                 if isinstance(_v,list):
                     _v = json.dumps(_v)
                 _list.append((_key,_v))

+ 51 - 27
BaseDataMaintenance/model/ots/document.py

@@ -84,6 +84,18 @@ class Document(BaseModel):
     def getPrimary_keys(self):
         return ["partitionkey","docid"]
 
+    def getAttribute_turple(self):
+        _list = []
+        for _key in self.getAttribute_keys():
+            if _key=="all_columns":
+                continue
+            _v = self.getProperties().get(_key)
+            if _v is not None and _v!="":
+                if isinstance(_v,list):
+                    _v = json.dumps(_v)
+                _list.append((_key,_v))
+        return _list
+
     # def delete_row(self,ots_client):
     #     raise NotImplementedError()
 
@@ -308,26 +320,38 @@ def turn_document_status():
         #     ],
         #     # must_not_queries=[WildcardQuery("DX004354*")]
         # )
-        #
-        # rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
-        #                                                                SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid",SortOrder.DESC)]),limit=100,get_total_count=True),
-        #                                                                columns_to_get=ColumnsToGet([document_area],return_type=ColumnReturnType.SPECIFIED))
-        # list_data = getRow_ots(rows)
-        # print(total_count)
-        # _count = len(list_data)
-        # for _data in list_data:
-        #     _document = Document(_data)
-        #     task_queue.put(_document)
-        # while next_token:
-        #     rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
-        #                                                                    SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
-        #                                                                    columns_to_get=ColumnsToGet([document_area],return_type=ColumnReturnType.SPECIFIED))
-        #     list_data = getRow_ots(rows)
-        #     _count += len(list_data)
-        #     print("%d/%d"%(_count,total_count))
-        #     for _data in list_data:
-        #         _document = Document(_data)
-        #         task_queue.put(_document)
+        bool_query = BoolQuery(
+            must_queries=[
+                RangeQuery("crtime","2023-08-30 15:00:00","2023-08-30 23:59:59"),
+                NestedQuery("page_attachments",ExistsQuery("page_attachments.fileMd5"))
+            ],
+            must_not_queries=[WildcardQuery("attachmenttextcon","*")]
+
+        )
+
+        rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
+                                                                       SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid",SortOrder.DESC)]),limit=100,get_total_count=True),
+                                                                       columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+        list_data = getRow_ots(rows)
+        print(total_count)
+        _count = len(list_data)
+        for _data in list_data:
+            _document = Document(_data)
+            _attachment = _data.get(document_attachmenttextcon,"")
+            if _attachment=="":
+                task_queue.put(_document)
+        while next_token:
+            rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
+                                                                           SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
+                                                                           columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+            list_data = getRow_ots(rows)
+            _count += len(list_data)
+            print("%d/%d"%(_count,total_count))
+            for _data in list_data:
+                _document = Document(_data)
+                _attachment = _data.get(document_attachmenttextcon,"")
+                if _attachment=="":
+                    task_queue.put(_document)
 
         # docids = [223820830,224445409]
         # for docid in docids:
@@ -335,13 +359,13 @@ def turn_document_status():
         #              document_partitionkey:int(docid)%500+1,
         #              }
         #     task_queue.put(Document(_dict))
-        import pandas as pd
-        df = pd.read_excel("G:\\20221212error.xlsx")
-        for docid in df["docid"]:
-            _dict = {document_docid:int(docid),
-                     document_partitionkey:int(docid)%500+1,
-                     }
-            task_queue.put(Document(_dict))
+        # import pandas as pd
+        # df = pd.read_excel("G:\\20221212error.xlsx")
+        # for docid in df["docid"]:
+        #     _dict = {document_docid:int(docid),
+        #              document_partitionkey:int(docid)%500+1,
+        #              }
+        #     task_queue.put(Document(_dict))
         log("task_queue size:%d"%(task_queue.qsize()))
 
     def _handle(item,result_queue,ots_client):

+ 7 - 0
BaseDataMaintenance/model/ots/document_product.py

@@ -47,8 +47,15 @@ DOCUMENT_PRODUCT_ORIGINAL_NAME = "original_name"
 DOCUMENT_PRODUCT_ORIGINAL_BRAND = "original_brand"
 DOCUMENT_PRODUCT_ORIGINAL_SPECS = "original_specs"
 
+
+DOCUMENT_PRODUCT_ATTACHMENTS = "attachments"
 DOCUMENT_PRODUCT_BID_FILEMD5S = "bid_filemd5s"
 
+DOCUMENT_PRODUCT_HAS_BIDFILE = "has_bidfile"
+DOCUMENT_PRODUCT_HAS_ATTACHMENTS = "has_attachments"
+
+DOCUMENT_PRODUCT_PARAMETER_STATUS = "parameter_status"
+
 Document_product_table_name = "document_product2"
 
 class Document_product(BaseModel):