Просмотр исходного кода

产品配置参数提取等代码

luojiehua 1 год назад
Родитель
Сommit
5b1f7468c2

+ 53 - 0
BaseDataMaintenance/common/ERNIE_utils.py

@@ -0,0 +1,53 @@
+
+import requests
+import json
+
+def get_access_token():
+    """
+    使用 API Key,Secret Key 获取access_token,替换下列示例中的应用API Key、应用Secret Key
+    """
+
+    url = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=gnwVXv96An9qMYqq9eWbeNqk&client_secret=mDsRQbCPsV4N7x28LbwkhTAaLmrrDnXk"
+
+    payload = json.dumps("")
+    headers = {
+        'Content-Type': 'application/json',
+        'Accept': 'application/json'
+    }
+
+    response = requests.request("POST", url, headers=headers, data=payload)
+    return response.json().get("access_token")
+
+def main():
+    url = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token=" + get_access_token()
+
+    payload = json.dumps({
+        "messages": [
+            {
+                "role": "user",
+                "content": '''
+                名称: 亚低温治疗仪
+
+品牌:GSZ
+
+规格型号:233
+
+数量:1台
+
+单价: 170000.00元
+以上的GSZ是什么牌子
+                '''
+            }
+        ]
+    })
+    headers = {
+        'Content-Type': 'application/json'
+    }
+
+    response = requests.request("POST", url, headers=headers, data=payload)
+
+    print(response.text)
+
+if __name__ == '__main__':
+    print(get_access_token())
+    main()

+ 4 - 2
BaseDataMaintenance/dataSource/interface.py

@@ -20,7 +20,7 @@ DEFAULT_TIMEOUT = 3000
 import traceback
 import base64
 
-def getAttachDealInterface(_data,_type,path="",restry=1):
+def getAttachDealInterface(_data,_type,path="",restry=1,kwargs={},url=interface_url,timeout=DEFAULT_TIMEOUT):
     _succeed = False
     _html = ""
     swf_images = []
@@ -32,8 +32,10 @@ def getAttachDealInterface(_data,_type,path="",restry=1):
             else:
                 _json = {"file":_data,
                         "type":_type}
+            if len(kwargs.keys())>0:
+                _json.update(kwargs)
             headers = {"Content-Type":"application/json"}
-            _resp = requests.post(interface_url,data=_json,timeout=DEFAULT_TIMEOUT)
+            _resp = requests.post(url,data=_json,timeout=timeout)
 
             if _resp.status_code==200:
                 _result = json.loads(_resp.content.decode())

+ 14 - 9
BaseDataMaintenance/maintenance/dataflow.py

@@ -2276,6 +2276,9 @@ class Dataflow_dumplicate(Dataflow):
         fingerprint_less = document_less["fingerprint"]
         extract_count_less = document_less["extract_count"]
         web_source_no_less = document_less.get("web_source_no")
+        province_less = document_less.get("province")
+        city_less = document_less.get("city")
+        district_less = document_less.get("district")
 
         document_greater = _dict2
         docid_greater = _dict2["docid"]
@@ -2296,12 +2299,15 @@ class Dataflow_dumplicate(Dataflow):
         fingerprint_greater = document_greater["fingerprint"]
         extract_count_greater = document_greater["extract_count"]
         web_source_no_greater = document_greater.get("web_source_no")
+        province_greater = document_greater.get("province")
+        city_greater = document_greater.get("city")
+        district_greater = document_greater.get("district")
 
         hard_level=1
         if web_source_no_less==web_source_no_greater=="17397-3":
             hard_level=2
 
-        return check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,min_counts,b_log=b_log,hard_level=hard_level)
+        return check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,province_less,province_greater,city_less,city_greater,district_less,district_greater,min_counts,b_log=b_log,hard_level=hard_level)
 
 
     def dumplicate_check_bak(self,_dict1,_dict2,min_counts,b_log=False):
@@ -2535,8 +2541,6 @@ class Dataflow_dumplicate(Dataflow):
                             _dict["confidence"] = confidence
                             _dict["min_counts"] = total_count
 
-                            print("check====",item.get("docid"),_dict.get("docid"),confidence)
-
                             if not confidence<0.1:
                                 list_data.append(_dict)
                 all_time = time.time()-_time
@@ -2794,7 +2798,7 @@ class Dataflow_dumplicate(Dataflow):
 
         return list_rules,table_name,table_index
 
-    def producer_flow_dumplicate(self,process_count,status_from,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_attachment_extract_status,document_update_document]):
+    def producer_flow_dumplicate(self,process_count,status_from,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_attachment_extract_status,document_update_document,document_province,document_city,document_district]):
         q_size = self.queue_dumplicate.qsize()
         log("dumplicate queue size %d"%(q_size))
         if q_size>process_count//3:
@@ -2843,8 +2847,8 @@ class Dataflow_dumplicate(Dataflow):
 
     def flow_dumpcate_comsumer(self):
         from multiprocessing import Process
-        process_count = 3
-        thread_count = 15
+        process_count = 2
+        thread_count = 20
         list_process = []
         def start_thread():
             mt = MultiThreadHandler(self.queue_dumplicate,self.dumplicate_comsumer_handle,None,thread_count,1,need_stop=False,restart=True,timeout=600,ots_client=self.ots_client)
@@ -3890,7 +3894,7 @@ class Dataflow_dumplicate(Dataflow):
                 singleNum_keys = _rule["singleNum_keys"]
                 contain_keys = _rule["contain_keys"]
                 multiNum_keys = _rule["multiNum_keys"]
-                self.add_data_by_query(item,base_list,set_docid,_query,confidence,table_name=table_name,table_index=table_index,singleNum_keys=singleNum_keys,contain_keys=contain_keys,multiNum_keys=multiNum_keys,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle_refine,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status])
+                self.add_data_by_query(item,base_list,set_docid,_query,confidence,table_name=table_name,table_index=table_index,singleNum_keys=singleNum_keys,contain_keys=contain_keys,multiNum_keys=multiNum_keys,columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle_refine,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status,document_province,document_city,document_district])
                 _i += step
 
 
@@ -4066,7 +4070,8 @@ class Dataflow_dumplicate(Dataflow):
 
 
     def test_dumplicate(self,docid):
-        columns=[document_tmp_status,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status]
+        # columns=[document_tmp_status,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_tmp_web_source_no,document_tmp_fingerprint,document_attachment_extract_status]
+        columns=[document_tmp_status,document_tmp_save,document_tmp_page_time,document_tmp_docchannel,document_tmp_tenderee,document_tmp_agency,document_tmp_doctitle,document_tmp_sub_docs_json,document_tmp_extract_json,document_attachment_extract_status,document_update_document,document_province,document_city,document_district]
         bool_query = BoolQuery(must_queries=[
             TermQuery("docid",docid)
         ])
@@ -4168,7 +4173,7 @@ if __name__ == '__main__':
     df_dump = Dataflow_dumplicate(start_delete_listener=False)
     # df_dump.start_flow_dumplicate()
     a = time.time()
-    df_dump.test_dumplicate(350493205)
+    df_dump.test_dumplicate(349638765)
     # df_dump.test_merge([292315564],[287890754])
     # df_dump.flow_remove_project_tmp()
     print("takes",time.time()-a)

+ 37 - 4
BaseDataMaintenance/maintenance/preproject/remove_dump.py

@@ -28,8 +28,7 @@ def drop_dump_data():
     log("to drop preproject dump data:%d"%total_count)
     for _data in list_data:
         task_queue.put(_data)
-    mt = MultiThreadHandler(task_queue,drop_data,None,30)
-    mt.run()
+
     while next_token:
         rows,next_token,total_count,is_all_succeed = ots_client.search("preproject_dump","preproject_dump_index",
                                                                        SearchQuery(bool_query,next_token=next_token,get_total_count=True,limit=100),
@@ -37,7 +36,40 @@ def drop_dump_data():
         list_data = getRow_ots(rows)
         for _data in list_data:
             task_queue.put(_data)
-        mt.run()
+    mt = MultiThreadHandler(task_queue,drop_data,None,30)
+    mt.run()
+
+def drop_data():
+
+    def drop_data(item,result_queue):
+        preproject = Preproject(item)
+        preproject.delete_row(ots_client)
+
+
+    task_queue = Queue()
+    ots_client = getConnect_ots()
+    bool_query = BoolQuery(must_queries=[
+        TermQuery("status",5)
+    ]
+    )
+    rows,next_token,total_count,is_all_succeed = ots_client.search("preproject","preproject_index",
+                                                                   SearchQuery(bool_query,sort=Sort(sorters=[FieldSort(preproject_tenderee)]),get_total_count=True,limit=100),
+                                                                   columns_to_get=ColumnsToGet(return_type=ColumnReturnType.NONE))
+    list_data = getRow_ots(rows)
+    log("to drop preproject dump data:%d"%total_count)
+    for _data in list_data:
+        task_queue.put(_data)
+
+    while next_token:
+        rows,next_token,total_count,is_all_succeed = ots_client.search("preproject","preproject_index",
+                                                                       SearchQuery(bool_query,next_token=next_token,get_total_count=True,limit=100),
+                                                                       columns_to_get=ColumnsToGet(return_type=ColumnReturnType.NONE))
+        list_data = getRow_ots(rows)
+        for _data in list_data:
+            task_queue.put(_data)
+        print(task_queue.qsize(),total_count)
+    mt = MultiThreadHandler(task_queue,drop_data,None,30)
+    mt.run()
 
 def start_drop_preproject_dump():
 
@@ -46,5 +78,6 @@ def start_drop_preproject_dump():
     scheduler.start()
 
 if __name__ == '__main__':
-    drop_dump_data()
+    # drop_dump_data()
+    drop_data()
     # start_drop_preproject_dump()

+ 13 - 79
BaseDataMaintenance/maintenance/product/1.py

@@ -1,81 +1,15 @@
 
 
-from fuzzywuzzy import fuzz
-import Levenshtein
-
-
-s1 = "abcd"
-s2 = "abcdefgh"
-
-print(fuzz.ratio(s1,s2))
-print(Levenshtein.ratio(s1,s2))
-
-
-print(Levenshtein.jaro("1abdd","1abbd"))
-
-print((4/5+4/5+4/4)/3)
-print((5/5+5/5+3/5)/3)
-
-from sklearn.cluster import KMeans
-
-
-km = KMeans(n_clusters=2)
-x = [[1,1,22,2,2,2,2],
-     [3,1,22,2,2,2,2],
-     [1.5,1,22,2,2,2,2]]
-km.fit(x)
-
-a = '''
-bidding_budget double,
-    brand_specs string,
-    province string,
-    city STRING,
-    district string,
-    create_time string,
-    dict_name_id string,
-    docchannel bigint,
-    docid bigint,
-    doctitle string,
-    full_name string,
-    industry string,
-    info_type string,
-    page_time string,
-    page_time_year string,
-    procurement_system STRING,
-    project_code string,
-    project_name string,
-    quantity bigint,
-    quantity_unit string,
-    supplier string,
-    tenderee string,
-    tenderee_contact string,
-    tenderee_phone string,
-    update_time string,
-    win_bid_price double,
-    win_tenderer string,
-    win_tenderer_manager string,
-    win_tenderer_phone string,
-    dict_brand_id string,
-    dict_specs_id string,
-    dump_id string,
-    total_price double,
-    unit_price double,
-    bid_filemd5s string
-'''
-
-list_c = []
-for b in a.split("\n"):
-     c = b.strip()
-     if c=="":
-          continue
-     d = c.split(" ")[0]
-     list_c.append(d)
-print(",".join(list_c))
-
-print("BENEHEARTD6".lower()=="BeneHeartD6".lower())
-
-
-
-
-
-
+import re
+pattern="(^|★|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册部\.::]))|" \
+        "([\s★\*]*)(?P<title_3>(?P<title_3_index_0_0>.{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>))|" \
+        "([\s★\*]*)(?P<title_4>(?P<title_4_index_0_0>.{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]))|" \
+        "([\s★\*]*)(?P<title_11>(?P<title_11_index_0_0>.{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_10>(?P<title_10_index_0_0>.{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_7>(?P<title_7_index_0_0>.{,3}?\d{1,2}[\..、\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_6>(?P<title_6_index_0_0>.{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-]?))|" \
+        "([\s★\*]*)(?P<title_15>(?P<title_15_index_0_0>.{,3}?(?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>)))|" \
+        "([\s★\*]*)(?P<title_17>(?P<title_17_index_0_0>.{,3}?(?)(?P<title_17_index_1_1>[a-wA-W]+)(?P<title_17_index_2_0>)))|" \
+        "([\s★\*]*)(?P<title_19>(?P<title_19_index_0_0>.{,3}?(?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>)))" \
+""
+print(re.search(pattern,"9.球囊按压"))

+ 651 - 0
BaseDataMaintenance/maintenance/product/htmlparser.py

@@ -0,0 +1,651 @@
+#coding:utf8
+
+import re
+
+import logging
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+
+from bs4 import BeautifulSoup
+import copy
+
+from BaseDataMaintenance.maintenance.product.productUtils import *
+
+def getTrs(tbody):
+    #获取所有的tr
+    trs = []
+    if tbody.name=="table":
+        body = tbody.find("tbody",recursive=False)
+        if body is not None:
+            tbody = body
+    objs = tbody.find_all(recursive=False)
+    for obj in objs:
+        if obj.name=="tr":
+            trs.append(obj)
+        if obj.name=="tbody" or obj.name=="table":
+            for tr in obj.find_all("tr",recursive=False):
+                trs.append(tr)
+    return trs
+
+def fixSpan(tbody):
+    # 处理colspan, rowspan信息补全问题
+    #trs = tbody.findChildren('tr', recursive=False)
+
+    trs = getTrs(tbody)
+    ths_len = 0
+    ths = list()
+    trs_set = set()
+    #修改为先进行列补全再进行行补全,否则可能会出现表格解析混乱
+    # 遍历每一个tr
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有colspan 则补全同一行下一个位置
+            if 'colspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['colspan'])))!="":
+                    col = int(re.sub("[^0-9]","",str(td['colspan'])))
+                    if col<100 and len(td.get_text())<1000:
+                        td['colspan'] = 1
+                        for i in range(1, col, 1):
+                            td.insert_after(copy.copy(td))
+
+    for indtr, tr in enumerate(trs):
+        ths_tmp = tr.findChildren('th', recursive=False)
+        #不补全含有表格的tr
+        if len(tr.findChildren('table'))>0:
+            continue
+        if len(ths_tmp) > 0:
+            ths_len = ths_len + len(ths_tmp)
+            for th in ths_tmp:
+                ths.append(th)
+            trs_set.add(tr)
+        # 遍历每行中的element
+        tds = tr.findChildren(recursive=False)
+        for indtd, td in enumerate(tds):
+            # 若有rowspan 则补全下一行同样位置
+            if 'rowspan' in td.attrs:
+                if str(re.sub("[^0-9]","",str(td['rowspan'])))!="":
+                    row = int(re.sub("[^0-9]","",str(td['rowspan'])))
+                    td['rowspan'] = 1
+                    for i in range(1, row, 1):
+                        # 获取下一行的所有td, 在对应的位置插入
+                        if indtr+i<len(trs):
+                            tds1 = trs[indtr + i].findChildren(['td','th'], recursive=False)
+                            if len(tds1) >= (indtd) and len(tds1)>0:
+                                if indtd > 0:
+                                    tds1[indtd - 1].insert_after(copy.copy(td))
+                                else:
+                                    tds1[0].insert_before(copy.copy(td))
+                            elif indtd-2>0 and len(tds1) > 0 and len(tds1) == indtd - 1:  # 修正某些表格最后一列没补全
+                                tds1[indtd-2].insert_after(copy.copy(td))
+def getTable(tbody):
+    #trs = tbody.findChildren('tr', recursive=False)
+    fixSpan(tbody)
+    trs = getTrs(tbody)
+    inner_table = []
+    for tr in trs:
+        tr_line = []
+        tds = tr.findChildren(['td','th'], recursive=False)
+        if len(tds)==0:
+            tr_line.append([re.sub('\xa0','',tr.get_text()),0]) # 2021/12/21 修复部分表格没有td 造成数据丢失
+        for td in tds:
+            tr_line.append([re.sub('\xa0','',td.get_text()),0])
+            #tr_line.append([td.get_text(),0])
+        inner_table.append(tr_line)
+    return inner_table
+
+class ParseDocument():
+
+    def __init__(self,_html,auto_merge_table=True):
+        if _html is None:
+            _html = ""
+        self.html = _html
+
+        # self.soup = BeautifulSoup(self.html,"lxml")
+        # self.soup = BeautifulSoup(self.html,"html.parser")
+
+        self.soup = BeautifulSoup(self.html,"html5lib")
+        _body = self.soup.find("body")
+        if _body is not None:
+            self.soup = _body
+        list_obj = self.soup.find_all(recursive=False)
+
+        for obj in list_obj:
+            print("obj",obj.get_text()[:20])
+
+        self.tree = self.buildParsetree(list_obj,auto_merge_table)
+
+        # #识别目录树
+        # for _page in self.childs:
+        #     print("%d============"%_page.page_no)
+        #     for _sentence in _page.childs:
+        #         print(_sentence)
+        #     print("%d================"%_page.page_no)
+        #
+        # if self.parseTree:
+        #     self.parseTree.printParseTree()
+
+    def print_tree(self,tree,append=""):
+        if append=="":
+            self.set_tree_id = set()
+        for t in tree:
+            _id = id(t)
+            if _id in self.set_tree_id:
+                continue
+            self.set_tree_id.add(_id)
+            print(append,t["text"][:20])
+            childs = t["child_title"]
+            self.print_tree(childs,append=append+"  ")
+
+    def is_title_first(self,title):
+        if title in ("一","1","Ⅰ","a","A"):
+            return True
+        return False
+
+    def find_title_by_pattern(self,_text,_pattern="(^|★|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册部\.::]))|" \
+                                             "([\s★\*]*)(?P<title_3>(?P<title_3_index_0_0>.{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>))|" \
+                                             "([\s★\*]*)(?P<title_4>(?P<title_4_index_0_0>.{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]))|" \
+                                             "([\s★\*]*)(?P<title_11>(?P<title_11_index_0_0>.{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★\*]*)(?P<title_10>(?P<title_10_index_0_0>.{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★\*]*)(?P<title_7>(?P<title_7_index_0_0>.{,3}?\d{1,2}[\..、\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★\*]*)(?P<title_6>(?P<title_6_index_0_0>.{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-]?))|" \
+                                             "([\s★\*]*)(?P<title_15>(?P<title_15_index_0_0>.{,3}?(?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>)))|" \
+                                             "([\s★\*]*)(?P<title_17>(?P<title_17_index_0_0>.{,3}?(?)(?P<title_17_index_1_1>[a-wA-W]+)(?P<title_17_index_2_0>)))|" \
+                                             "([\s★\*]*)(?P<title_19>(?P<title_19_index_0_0>.{,3}?(?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>)))" \
+                              ):
+        _se = re.search(_pattern,_text)
+        groups = []
+        if _se is not None:
+            _gd = _se.groupdict()
+            for k,v in _gd.items():
+                if v is not None:
+                    groups.append((k,v))
+        if len(groups):
+            groups.sort(key=lambda x:x[0])
+            return groups
+        return None
+
+    def make_increase(self,_sort,_title,_add=1):
+        if len(_title)==0 and _add==0:
+            return ""
+        if len(_title)==0 and _add==1:
+            return _sort[0]
+        _index = _sort.index(_title[-1])
+        next_index = (_index+_add)%len(_sort)
+        next_chr = _sort[next_index]
+        if _index==len(_sort)-1:
+            _add = 1
+        else:
+            _add = 0
+        return next_chr+self.make_increase(_sort,_title[:-1],_add)
+
+    def get_next_title(self,_title):
+        if re.search("^\d+$",_title) is not None:
+            return str(int(_title)+1)
+        if re.search("^[一二三四五六七八九十百]+$",_title) is not None:
+            _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title))
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            if _next_title[-1]!="十":
+                if len(_next_title)>=2:
+                    _next_title.insert(-1,'十')
+            if len(_next_title)>=4:
+                _next_title.insert(-3,'百')
+            if _title[0]=="十":
+                if _next_title=="十":
+                    _next_title = ["二","十"]
+                _next_title.insert(0,"十")
+            _next_title = "".join(_next_title)
+            return _next_title
+        if re.search("^[a-z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('a')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[A-Z]+$",_title) is not None:
+            _next_title = self.make_increase([chr(i+ord('A')) for i in range(26)],_title)
+            _next_title = list(_next_title)
+            _next_title.reverse()
+            return "".join(_next_title)
+        if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$",_title) is not None:
+            _sort = ["Ⅰ","Ⅱ","Ⅲ","Ⅳ","Ⅴ","Ⅵ","Ⅶ","Ⅷ","Ⅸ","Ⅹ","Ⅺ","Ⅻ"]
+            _index = _sort.index(_title)
+            if _index<len(_sort)-1:
+                return _sort[_index+1]
+            return None
+
+    def buildParsetree(self,list_obj,auto_merge_table=True):
+
+        self.parseTree = None
+        trees = []
+        list_length = []
+        for obj in list_obj[:200]:
+            if obj.name!="table":
+                list_length.append(len(obj.get_text()))
+        if len(list_length)>0:
+            max_length = max(list_length)
+        else:
+            max_length = 40
+
+        print("max_length",max_length)
+
+
+        list_data = []
+        last_table_index = None
+        last_table_columns = None
+        last_table = None
+        for obj_i in range(len(list_obj)):
+            obj = list_obj[obj_i]
+            _type = "sentence"
+            _text = obj.text
+            if obj.name=="table":
+                _type = "table"
+                _text = str(obj)
+            _append = False
+            sentence_title = None
+            sentence_title_text = None
+            sentence_groups = None
+            title_index = None
+            next_index = None
+            parent_title = None
+            title_before = None
+            title_after = None
+            title_next = None
+            childs = []
+
+
+            list_table = None
+
+
+            if _type=="sentence":
+                sentence_groups = self.find_title_by_pattern(_text[:10])
+                if sentence_groups:
+                    # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups)
+                    sentence_title = sentence_groups[0][0]
+                    sentence_title_text = sentence_groups[0][1]
+                    title_index = sentence_groups[-2][1]
+                    title_before = sentence_groups[1][1]
+                    title_after = sentence_groups[-1][1]
+                    next_index = self.get_next_title(title_index)
+
+            if _type=="sentence":
+                if sentence_title is None and len(list_data)>0 and list_data[-1]["sentence_title"] is not None and list_data[-1]["line_width"]>=max_length*0.6:
+                    list_data[-1]["text"] += _text
+                    list_data[-1]["line_width"] = len(_text)
+                    _append = True
+                elif sentence_title is None and len(list_data)>0 and _type==list_data[-1]["type"]:
+                    if list_data[-1]["line_width"]>=max_length*0.7:
+                        list_data[-1]["text"] += _text
+                        list_data[-1]["line_width"] = len(_text)
+                        _append = True
+
+            if _type=="table":
+                _soup = BeautifulSoup(_text,"lxml")
+                _table = _soup.find("table")
+                if _table is not None:
+                    list_table = getTable(_table)
+                    table_columns = len(list_table[0])
+
+                    if auto_merge_table:
+                        if last_table_index is not None and abs(obj_i-last_table_index)<=1 and last_table_columns is not None and last_table_columns==table_columns:
+                            if last_table is not None:
+                                trs = getTrs(_table)
+                                last_tbody = BeautifulSoup(last_table["text"],"lxml")
+                                _table = last_tbody.find("table")
+                                last_trs = getTrs(_table)
+                                _append = True
+
+                                for _line in list_table:
+                                    last_table["list_table"].append(_line)
+                                if len(last_trs)>0:
+                                    for _tr in trs:
+                                        last_trs[-1].insert_after(copy.copy(_tr))
+                                    last_table["text"] = re.sub("</?html>|</?body>","",str(last_tbody))
+
+                                last_table_index = obj_i
+                                last_table_columns = len(list_table[-1])
+
+
+            if not _append:
+                _data = {"type":_type, "text":_text,"list_table":list_table,"line_width":len(_text),"sentence_title":sentence_title,"title_index":title_index,
+                         "sentence_title_text":sentence_title_text,"sentence_groups":sentence_groups,"parent_title":parent_title,
+                         "child_title":childs,"title_before":title_before,"title_after":title_after,"title_next":title_next,"next_index":next_index}
+
+                if _type=="table":
+                    last_table = _data
+                    last_table_index = obj_i
+                    if list_table:
+                        last_table_columns = last_table_columns = len(list_table[-1])
+
+                if sentence_title is not None:
+                    if len(list_data)>0:
+                        if self.is_title_first(title_index):
+                            for i in range(1,len(list_data)+1):
+                                _d = list_data[-i]
+                                if _d["sentence_title"] is not None:
+                                    _data["parent_title"] = _d
+                                    _d["child_title"].append(_data)
+                                    break
+                        else:
+                            _find = False
+                            for i in range(1,len(list_data)+1):
+                                _d = list_data[-i]
+                                if i==1 and _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    _data["parent_title"] = _d["parent_title"]
+                                    _d["title_next"] = _data
+                                    if _d["parent_title"] is not None:
+                                        _d["parent_title"]["child_title"].append(_data)
+                                    _find = True
+                                    break
+                                if _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
+                                    if _d["next_index"]==title_index and _d["title_next"] is None:
+                                        _data["parent_title"] = _d["parent_title"]
+                                        _d["title_next"] = _data
+                                        if _d["parent_title"] is not None:
+                                            _d["parent_title"]["child_title"].append(_data)
+                                        _find = True
+                                        break
+                            if not _find:
+                                if len(list_data)>0:
+                                    for i in range(1,len(list_data)+1):
+                                        _d = list_data[-i]
+                                        if _d.get("sentence_title") is not None:
+                                            _data["parent_title"] = _d
+                                            _d["child_title"].append(_data)
+                                            break
+
+
+                else:
+                    if len(list_data)>0:
+                        for i in range(1,len(list_data)+1):
+                            _d = list_data[-i]
+                            if _d.get("sentence_title") is not None:
+                                _data["parent_title"] = _d
+                                _d["child_title"].append(_data)
+                                break
+
+                list_data.append(_data)
+
+        return list_data
+
+
+def extract_products(list_data,_product,_param_pattern = "产品名称|采购内存|标的名称|采购内容|(标的|维修|系统|报价构成|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品?|采购|物装|配件|资产|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|品目|^品名|气体|标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)[\))的]?([、\w]{,4}名称|内容|描述)|标的|标项|项目$|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品|物装|配件|资产|招标内容|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|菜名|^品目$|^品名$|^名称|^内容$"):
+    list_result = []
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+        table_products = []
+        if _type=="table":
+            list_table = _data["list_table"]
+            if list_table is None:
+                continue
+            _check = True
+            max_length = max([len(a) for a in list_table])
+            min_length = min([len(a) for a in list_table])
+            if min_length<max_length/2:
+                continue
+            list_head_index = []
+            _begin_index = 0
+            head_cell_text = ""
+            for line_i in range(len(list_table[:2])):
+                line = list_table[line_i]
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<10 and re.search(_param_pattern,cell_text) is not None and re.search("单价|数量|总价|规格|品牌|型号|用途|要求|采购量",line_text) is not None:
+                        _begin_index = line_i+1
+
+                        list_head_index.append(cell_i)
+            for line_i in range(len(list_table)):
+                line = list_table[line_i]
+                for cell_i in list_head_index:
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    head_cell_text += cell_text
+
+            # print("===head_cell_text",head_cell_text)
+            if re.search("招标人|采购人|项目编号|项目名称|金额|^\d+$",head_cell_text) is not None:
+                list_head_index = []
+
+
+            for line in list_table:
+                line_text = ",".join([cell[0] for cell in line])
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if cell_text is not None and _product is not None and len(cell_text)<len(_product)*10 and re.search(_product,cell_text) is not None and re.search("单价|数量|总价|规格|品牌|型号|用途|要求|采购量",line_text) is not None:
+                        list_head_index.append(cell_i)
+
+            list_head_index = list(set(list_head_index))
+            if len(list_head_index)>0:
+                for line_i in range(_begin_index,len(list_table)):
+                    line = list_table[line_i]
+                    for cell_i in list_head_index:
+                        if cell_i>=len(line):
+                            continue
+                        cell = line[cell_i]
+                        _text = cell[0]
+                        if re.search(_param_pattern,_text) is None:
+                            table_products.append(_text)
+        if len(table_products)>0:
+            if min([len(x) for x in table_products])>0:
+                list_result.extend(table_products)
+    list_result = [a for a in list_result if len(a)>1 and len(a)<20]
+    return list_result
+
+
+def get_childs(childs):
+    list_data = []
+    for _child in childs:
+        list_data.append(_child)
+        childs2 = _child.get("child_title",[])
+
+        if len(childs2)>0:
+            for _child2 in childs2:
+                list_data.extend(get_childs([_child2]))
+    return list_data
+
+def get_range_data_by_childs(list_data,childs):
+    range_data = []
+    list_child = get_childs(childs)
+    list_index = []
+    set_child = set([id(x) for x in list_child])
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _id = id(_data)
+        if _id in set_child:
+            list_index.append(_data_i)
+    if len(list_index)>0:
+        range_data = list_data[min(list_index):max(list_index)+1]
+    return range_data
+
+def get_correct_product(product,products):
+    list_data = []
+    for p in products:
+        is_sim = is_similar(product,p)
+        _d = {"product":p,"distance":abs(len(product)-len(p)),"is_sim":is_sim}
+        list_data.append(_d)
+    list_data.sort(key=lambda x:x["distance"])
+    for _d in list_data:
+        is_sim = _d["is_sim"]
+        if is_sim:
+            return _d["product"]
+    return product
+
+def get_childs_text(childs,_product,products,is_end=False):
+    _text = ""
+
+    for _child in childs:
+        child_text = _child.get("text")
+
+        for p in products:
+
+            if child_text.find(_product)<0 and  child_text.find(p)>=0:
+                is_end = True
+                break
+        if is_end:
+            break
+
+
+        _text += _child.get("text")+"\n"
+        childs2 = _child.get("child_title",[])
+
+
+        if len(childs2)>0:
+            for _child2 in childs2:
+                child_text,is_end = get_childs_text([_child2],_product,products)
+                if is_end:
+                    break
+                else:
+                    _text += child_text
+    return _text,is_end
+
+def extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result,):
+    _data = list_data[_data_i]
+    childs = _data.get("child_title",[])
+    if len(childs)>0:
+        child_text,_ = get_childs_text([_data],_product,products)
+        list_result.append(child_text)
+        return True
+    if parent_title is not None:
+        childs = parent_title.get("child_title",[])
+        if len(childs)>0:
+
+
+            print(parent_title["text"])
+            for c in childs:
+                print("11",c["text"])
+                # print([a["text"] for a in c["child_title"]])
+            range_data = get_range_data_by_childs(list_data[_data_i:],childs)
+            for c in range_data:
+                print("22",c["text"])
+            p_text = ""
+            _find = False
+            for pdata in range_data:
+                ptype = _data["type"]
+                ptext = pdata["text"]
+                for p in products:
+                    if ptext.find(_product)<0 and  ptext.find(p)>=0:
+                        _find = True
+                        print("p find",p)
+                        break
+                if _find:
+                    print("======break")
+                    print(ptext)
+                    break
+                p_text += ptext+"\n"
+            if len(p_text)>0:
+                list_result.append(p_text)
+                return True
+    return False
+
+def extract_parameters_by_table(_product,_param_pattern,list_data,_data_i,list_result):
+    _data = list_data[_data_i]
+    _text = _data["text"]
+    list_table = _data["list_table"]
+    if list_table is not None:
+        _check = True
+        max_length = max([len(a) for a in list_table])
+        min_length = min([len(a) for a in list_table])
+        text_line_first = ",".join(a[0] for a in list_table[0])
+        if min_length<max_length/2:
+            return
+        last_data = list_data[_data_i-1]
+        _flag = False
+        if last_data["type"]=="sentence" and last_data["text"].find(_product)>=0:
+            _flag = True
+        # print(text_line_first,"text_line_first",re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0)
+        if re.search(_param_pattern,text_line_first) is not None and text_line_first.find(_product)>=0:
+            _flag = True
+        if _flag:
+            list_result.append(_text)
+        else:
+            list_head_index = []
+            for line in list_table[:2]:
+                for cell_i in range(len(line)):
+                    cell = line[cell_i]
+                    cell_text = cell[0]
+                    if len(cell_text)<20 and re.search(_param_pattern,cell_text) is not None:
+                        list_head_index.append(cell_i)
+            list_head_index = list(set(list_head_index))
+            for line in list_table:
+                for cell in line:
+                    cell_text = cell[0]
+                    if len(cell_text)>50 and len(re.findall("\d+",cell_text))>10 and cell_text.find(_product)>=0:
+                        list_result.append(cell_text)
+                    if len(cell_text)<len(_product)*10 and str(cell_text).find(_product)>=0:
+                        for _index in list_head_index:
+                            _cell = line[_index]
+                            if len(cell[0])>0:
+                                print("add on table",_cell[0])
+                                list_result.append(_cell[0])
+
+def extract_product_parameters(list_data,_product):
+    _param_pattern = "配置要求|技术要求|技术参数|具体参数|规格参数|参数要求|技术需求|配置清单|(质量|技术).{,10}要求|明细及参数|验收标准|^参数$"
+    list_result = []
+    products = extract_products(list_data,_product)
+
+    _product = get_correct_product(_product,products)
+    print("===",_product,products)
+    for _data_i in range(len(list_data)):
+        _data = list_data[_data_i]
+        _type = _data["type"]
+        _text = _data["text"]
+        if _type=="sentence":
+            if _text.find(_product)>=0:
+                print("_text",_text,_data["sentence_title"])
+                parent_title = _data.get("parent_title")
+                if re.search(_param_pattern,_text) is not None:
+                    extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result)
+
+                elif parent_title is not None:
+                    parent_text = parent_title.get("text","")
+                    print("parent_text",parent_text)
+                    if re.search(_param_pattern,parent_text) is not None:
+                        extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result)
+
+                    else:
+                        parent_title = parent_title.get("parent_title")
+                        if parent_title is not None:
+                            parent_text = parent_title.get("text","")
+                            # print("parent_text",parent_text)
+                            if re.search(_param_pattern,parent_text) is not None:
+                                extract_parameters_by_tree(_product,products,list_data,_data_i,parent_title,list_result)
+
+        elif _type=="table":
+            extract_parameters_by_table(_product,_param_pattern,list_data,_data_i,list_result)
+
+
+    # for i in range(len(list_result)):
+    #     print("result%d"%i,list_result[i])
+    list_result.sort(key=lambda x:len(re.findall('[^.][0-9a-zA-Z]+[^.]',x)), reverse=True)
+
+    return list_result[0] if len(list_result)>0 else None
+
+
+if __name__ == '__main__':
+
+    _html = open("download/7421e0c9d12dc6290ead4040df0e3cd0.html", "r", encoding="utf8").read()
+
+    pd = ParseDocument(_html)
+
+    list_data = pd.tree
+    pd.print_tree(list_data)
+
+
+    _text = extract_product_parameters(list_data,"4K高清摄像系统")
+    print("extract_text",_text)
+

+ 217 - 0
BaseDataMaintenance/maintenance/product/product_attachment.py

@@ -0,0 +1,217 @@
+
+
+
+from apscheduler.schedulers.blocking import BlockingScheduler
+from tablestore import *
+from BaseDataMaintenance.dataSource.source import getConnect_ots,getAuth,is_internal
+from BaseDataMaintenance.dataSource.interface import *
+from multiprocessing import Queue as PQueue,Process
+from BaseDataMaintenance.model.ots.document_product import *
+from BaseDataMaintenance.model.ots.attachment import *
+from BaseDataMaintenance.common.Utils import *
+from BaseDataMaintenance.common.ossUtils import *
+from BaseDataMaintenance.maintenance.product.htmlparser import *
+import oss2
+from BaseDataMaintenance.common.multiThread import MultiThreadHandler
+
+parameter_status_no_bidfile = -1
+parameter_status_to_process = 0
+parameter_status_process_succeed = 1
+parameter_status_process_failed = 2
+
+class Product_Attachment_Processor():
+
+    def __init__(self,):
+        self.ots_client = getConnect_ots()
+        self.product_attachment_queue = PQueue()
+        self.product_attachment_queue_size = 100
+        self.set_product_attachment = set()
+        self.attachment_hub_url = "https://attachment-hub.oss-cn-hangzhou.aliyuncs.com/"
+        self.auth = getAuth()
+        oss2.defaults.connection_pool_size = 100
+        oss2.defaults.multiget_num_threads = 20
+        if is_internal:
+            self.bucket_url = "http://oss-cn-hangzhou-internal.aliyuncs.com"
+        else:
+            self.bucket_url = "http://oss-cn-hangzhou.aliyuncs.com"
+        log("bucket_url:%s"%(self.bucket_url))
+        self.attachment_bucket_name = "attachment-hub"
+        self.bucket = oss2.Bucket(self.auth,self.bucket_url,self.attachment_bucket_name)
+        self.current_path = os.path.dirname(__file__)
+        self.download_path = "%s/%s"%(self.current_path,"download")
+
+    def process_parameters_producer(self,):
+
+        if self.product_attachment_queue.qsize()>self.product_attachment_queue_size/3:
+            return
+        bool_query = BoolQuery(must_queries=[
+            TermQuery("parameter_status",parameter_status_to_process)
+        ])
+        list_id = []
+        rows,next_token,total_count,is_all_succeed = self.ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                            SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("parameter_status")]),limit=100,get_total_count=True),
+                                                                            ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S,DOCUMENT_PRODUCT_NAME,DOCUMENT_PRODUCT_ORIGINAL_NAME],return_type=ColumnReturnType.SPECIFIED))
+
+        list_data = getRow_ots(rows)
+        for data in list_data:
+            _id = data.get(DOCUMENT_PRODUCT_ID)
+            if _id in self.set_product_attachment:
+                continue
+            self.product_attachment_queue.put(data)
+            list_id.append(_id)
+        while next_token:
+            if self.product_attachment_queue.qsize()>=self.product_attachment_queue_size:
+                break
+            rows,next_token,total_count,is_all_succeed = self.ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                                SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
+                                                                                ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S],return_type=ColumnReturnType.SPECIFIED))
+            list_data = getRow_ots(rows)
+            for data in list_data:
+                _id = data.get(DOCUMENT_PRODUCT_ID)
+                if _id in self.set_product_attachment:
+                    continue
+                self.product_attachment_queue.put(data)
+                list_id.append(_id)
+        self.set_product_attachment =  set(list_id)
+
+    def process_parameters_handler(self,item,result_queue):
+        bid_filemd5s = item.get(DOCUMENT_PRODUCT_BID_FILEMD5S)
+        product_name = item.get(DOCUMENT_PRODUCT_NAME)
+        product_original_name = item.get(DOCUMENT_PRODUCT_ORIGINAL_NAME)
+        list_product = []
+        if product_name is not None:
+            list_product.append(product_name)
+        if product_original_name is not None:
+            list_product.extend(product_original_name.split("_"))
+        dp = Document_product(item)
+        if bid_filemd5s is None or bid_filemd5s=="" or len(list_product)==0:
+            dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_no_bidfile)
+            dp.update_row(self.ots_client)
+            return
+        list_filemd5 = bid_filemd5s.split(",")
+        _find = False
+        for _filemd5 in list_filemd5:
+            if _find:
+                break
+            atta = attachment({attachment_filemd5:_filemd5})
+            if atta.fix_columns(self.ots_client,[attachment_path,attachment_filetype],True):
+                objectPath = atta.getProperties().get(attachment_path)
+                _filetype = atta.getProperties().get(attachment_filetype)
+                if _filetype in ("doc","xls"):
+                    continue
+                localpath = "%s/%s.%s"%(self.download_path,_filemd5,_filetype)
+                localhtml = "%s/%s.%s"%(self.download_path,_filemd5,"html")
+                download_succeed = False
+                try:
+                    if not os.path.exists(localpath):
+                        download_succeed = downloadFile(self.bucket,objectPath,localpath)
+                    else:
+                        download_succeed = True
+                except Exception as e:
+                    download_succeed = False
+                if download_succeed:
+                    try:
+                        _html = ""
+                        if os.path.exists(localhtml):
+                            _html = open(localhtml,"r",encoding="utf8").read()
+                            _success = True
+                        if len(_html)>10:
+                            _success = True
+                        else:
+                            _data_base64 = base64.b64encode(open(localpath,"rb").read())
+                            _success,_html,swf_images,classification = getAttachDealInterface(_data_base64,_filetype,url="http://192.168.2.102:15011/convert",kwargs={'page_no': '1,-1',"max_bytes":"-1"},timeout=6000)
+                            if _success:
+                                localhtml = "%s/%s.%s"%(self.download_path,_filemd5,"html")
+                                with open(localhtml,"w",encoding="utf8") as f:
+                                    f.write(_html)
+                        if _success:
+                            if len(_html)>5:
+                                list_data = ParseDocument(_html).tree
+                                list_text = []
+                                for _product in list_product:
+                                    _text = extract_product_parameters(list_data,_product)
+                                    if _text is not None:
+                                        list_text.append(_text)
+                                if len(list_text)>0:
+                                    list_text.sort(key=lambda x:len(x),reverse=True)
+                                    _text = list_text[0]
+                                    _find = True
+                                    dp.setValue(DOCUMENT_PRODUCT_PARAMETER,_text,True)
+                                    dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_process_succeed,True)
+                                    dp.update_row(self.ots_client)
+                                    return
+                            else:
+                                log("product attachment process filemd5 %s has no content"%(_filemd5))
+                    except Exception as e:
+                        traceback.print_exc()
+                    finally:
+                        try:
+                            # if os.path.exists(localpath):
+                            #     os.remove(localpath)
+                            pass
+                        except Exception as e:
+                            pass
+
+        if not _find:
+            dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_process_failed,True)
+            dp.update_row(self.ots_client)
+
+
+    def start_process(self):
+        mt = MultiThreadHandler(self.product_attachment_queue,self.process_parameters_handler,None,3,need_stop=False,restart=True)
+        mt.run()
+
+    def process_parameters_comsumer(self,):
+
+        # process_count = 2
+        # list_process = []
+        # for i in range(process_count):
+        #     p = Process(target=self.start_process)
+        #     list_process.append(p)
+        # for p in list_process:
+        #     p.start()
+        # for p in list_process:
+        #     p.join()
+
+        self.start_process()
+
+    def start_process_parameters(self):
+        scheduler = BlockingScheduler()
+        scheduler.add_job(self.process_parameters_producer,"cron",second="*/10")
+        scheduler.add_job(self.process_parameters_comsumer,"cron",second="*/30")
+        scheduler.start()
+
+def start_process_parameters():
+    pap = Product_Attachment_Processor()
+    pap.start_process_parameters()
+
+def change_parameters_status():
+    ots_client =getConnect_ots()
+    bool_query = BoolQuery(must_queries=[
+        RangeQuery("parameter_status",-1)
+    ],
+                           must_not_queries=[
+        TermQuery("parameter_status",parameter_status_to_process),
+        TermQuery("parameter_status",parameter_status_process_succeed)
+    ])
+    list_data = []
+    rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                        SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("parameter_status")]),limit=100,get_total_count=True),
+                                                                        ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S,DOCUMENT_PRODUCT_NAME,DOCUMENT_PRODUCT_ORIGINAL_NAME],return_type=ColumnReturnType.SPECIFIED))
+
+    list_data.extend(getRow_ots(rows))
+    print("total_count",total_count)
+    while next_token:
+        rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                            SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
+                                                                            ColumnsToGet([DOCUMENT_PRODUCT_BID_FILEMD5S],return_type=ColumnReturnType.SPECIFIED))
+        list_data.extend(getRow_ots(rows))
+    for data in list_data:
+        dp = Document_product(data)
+        dp.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,parameter_status_to_process,True)
+        dp.setValue(DOCUMENT_PRODUCT_PARAMETER,"",True)
+        dp.update_row(ots_client)
+
+if __name__ == '__main__':
+    start_process_parameters()
+    # change_parameters_status()

+ 61 - 11
BaseDataMaintenance/maintenance/product/products.py

@@ -680,9 +680,15 @@ class Product_Manager(Product_Dict_Manager):
             _product.setValue(DOCUMENT_PRODUCT_ORIGINAL_BRAND,original_brand,True)
             _product.setValue(DOCUMENT_PRODUCT_ORIGINAL_SPECS,original_specs,True)
 
-            bid_filemd5s = self.get_bid_filemd5s(docid,self.ots_client)
-            if bid_filemd5s is not None:
+            list_attachments,bid_filemd5s = self.get_bid_filemd5s(docid,self.ots_client)
+            if len(list_attachments)>0:
+                _product.setValue(DOCUMENT_PRODUCT_ATTACHMENTS,json.dumps(list_attachments,ensure_ascii=False),True)
+                _product.setValue(DOCUMENT_PRODUCT_HAS_ATTACHMENTS,1,True)
+
+            if bid_filemd5s!="":
                 _product.setValue(DOCUMENT_PRODUCT_BID_FILEMD5S,bid_filemd5s,True)
+                _product.setValue(DOCUMENT_PRODUCT_HAS_BIDFILE,1,True)
+                _product.setValue(DOCUMENT_PRODUCT_PARAMETER_STATUS,0,True)
 
             if not is_legal_data:
                 _status = randint(501,550)
@@ -721,6 +727,7 @@ class Product_Manager(Product_Dict_Manager):
         list_data = getRow_ots(rows)
 
         list_bid_filemd5s = []
+        list_attachments = []
         set_docids = set([docid])
         set_md5s = set()
 
@@ -747,12 +754,13 @@ class Product_Manager(Product_Dict_Manager):
                     set_md5s.add(_filemd5)
                     _da = {attachment_filemd5:_filemd5}
                     _attach = attachment(_da)
-                    _attach.fix_columns(ots_client,[attachment_classification],True)
-                    if _attach.getProperties().get(attachment_classification,"")=="招标文件":
-                        list_bid_filemd5s.append(_filemd5)
-        if len(list_bid_filemd5s)==0:
-            return None
-        return ",".join(list(set(list_bid_filemd5s)))
+                    if _attach.fix_columns(ots_client,[attachment_classification,attachment_filetype],True):
+                        _da[attachment_classification] = _attach.getProperties().get(attachment_classification)
+                        _da[attachment_filetype] = _attach.getProperties().get(attachment_filetype)
+                        list_attachments.append(_da)
+                        if _attach.getProperties().get(attachment_classification,"")=="招标文件":
+                            list_bid_filemd5s.append(_filemd5)
+        return list_attachments,",".join(list(set(list_bid_filemd5s)))
 
 
 
@@ -1044,8 +1052,8 @@ def fix_product_data():
         #                      "docid":docid})
         #     _doc.fix_columns(ots_client,["doctitle"],True)
         #     dp.setValue(DOCUMENT_PRODUCT_DOCTITLE,_doc.getProperties().get("doctitle"),True)
-        bid_filemd5s = Product_Manager.get_bid_filemd5s(docid,ots_client)
-        if bid_filemd5s is not None:
+        list_attachments,bid_filemd5s = Product_Manager.get_bid_filemd5s(docid,ots_client)
+        if bid_filemd5s!="":
             dp.setValue(DOCUMENT_PRODUCT_BID_FILEMD5S,bid_filemd5s,True)
 
         dp.setValue(DOCUMENT_PRODUCT_ORIGINAL_NAME,dpt.getProperties().get(DOCUMENT_PRODUCT_TMP_NAME,""),True)
@@ -1486,9 +1494,51 @@ def clean_product_dict_interface():
     mt = MultiThreadHandler(task_queue,_handle,None,30)
     mt.run()
 
+def fix_attachment():
+    ots_client = getConnect_ots()
+    bool_query = BoolQuery(must_queries=[
+        RangeQuery("docid",1)
+    ])
+    task_queue = Queue()
+    rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                   SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("status")]),get_total_count=True,limit=100),
+                                                                   columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+    list_data = getRow_ots(rows)
+    for _data in list_data:
+        task_queue.put(_data)
+    print("%d/%d"%(task_queue.qsize(),total_count))
+    while next_token:
+        rows,next_token,total_count,is_all_succeed = ots_client.search(Document_product_table_name,Document_product_table_name+"_index",
+                                                                       SearchQuery(bool_query,next_token=next_token,get_total_count=True,limit=100),
+                                                                       columns_to_get=ColumnsToGet(["docid"],return_type=ColumnReturnType.SPECIFIED))
+        list_data = getRow_ots(rows)
+        for _data in list_data:
+            task_queue.put(_data)
+        print("%d/%d"%(task_queue.qsize(),total_count))
+
+    def _handle(item,result_queue):
+        _product = Document_product(item)
+        docid = _product.getProperties().get("docid")
+
+
+        list_attachments,bid_filemd5s = Product_Manager.get_bid_filemd5s(docid,ots_client)
+        if len(list_attachments)>0:
+            _product.setValue(DOCUMENT_PRODUCT_ATTACHMENTS,json.dumps(list_attachments,ensure_ascii=False),True)
+            _product.setValue(DOCUMENT_PRODUCT_HAS_ATTACHMENTS,1,True)
+
+        if bid_filemd5s!="":
+            _product.setValue(DOCUMENT_PRODUCT_BID_FILEMD5S,bid_filemd5s,True)
+            _product.setValue(DOCUMENT_PRODUCT_HAS_BIDFILE,1,True)
+
+        _product.update_row(ots_client)
+
+    mt = MultiThreadHandler(task_queue,_handle,None,30)
+    mt.run()
+
 if __name__ == '__main__':
 
-    test()
+    # test()
+    fix_attachment()
     # start_process_product()
     # print(getMD5('11936c56f2dd1426764e317ca2e8e1a7'+'&&鱼跃'))
     # print(Product_Manager.get_bid_filemd5s(155415770,getConnect_ots()))

+ 6 - 3
BaseDataMaintenance/maxcompute/documentDumplicate.py

@@ -1028,7 +1028,7 @@ def check_time(json_time_less,json_time_greater):
                         return False
     return True
 
-def check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,min_counts,b_log=False,hard_level=1):
+def check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_greater,project_codes_less,project_codes_greater,tenderee_less,tenderee_greater,agency_less,agency_greater,win_tenderer_less,win_tenderer_greater,bidding_budget_less,bidding_budget_greater,win_bid_price_less,win_bid_price_greater,project_name_less,project_name_greater,doctitle_refine_less,doctitle_refine_greater,extract_count_less,extract_count_greater,docchannel_less,docchannel_greater,page_time_less,page_time_greater,product_less,product_greater,nlp_enterprise_less,nlp_enterprise_greater,package_less,package_greater,json_time_less,json_time_greater,province_less,province_greater,city_less,city_greater,district_less,district_greater,min_counts,b_log=False,hard_level=1):
     if fingerprint_less==fingerprint_greater and getLength(fingerprint_less)>0:
         return 1
     if isinstance(project_codes_less,str):
@@ -1070,8 +1070,11 @@ def check_dumplicate_rule(docid_less,docid_greater,fingerprint_less,fingerprint_
     else:
         base_prob = 0.6
     _prob = base_prob*same_count/all_count
-    if _prob<0.1 and min(extract_count_less,extract_count_greater)<=3:
-        _prob = 0.15
+    if min(extract_count_less,extract_count_greater)<=3:
+        if _prob<0.1:
+            _prob = 0.15
+        if province_less!=province_greater:
+            return 0
     if _prob<0.1:
         return _prob
 

+ 1 - 1
BaseDataMaintenance/model/ots/BaseModel.py

@@ -42,7 +42,7 @@ class BaseModel():
             if _key=="all_columns":
                 continue
             _v = self.getProperties().get(_key)
-            if _v is not None and _v!="":
+            if _v is not None:
                 if isinstance(_v,list):
                     _v = json.dumps(_v)
                 _list.append((_key,_v))

+ 12 - 0
BaseDataMaintenance/model/ots/document.py

@@ -84,6 +84,18 @@ class Document(BaseModel):
     def getPrimary_keys(self):
         return ["partitionkey","docid"]
 
+    def getAttribute_turple(self):
+        _list = []
+        for _key in self.getAttribute_keys():
+            if _key=="all_columns":
+                continue
+            _v = self.getProperties().get(_key)
+            if _v is not None and _v!="":
+                if isinstance(_v,list):
+                    _v = json.dumps(_v)
+                _list.append((_key,_v))
+        return _list
+
     # def delete_row(self,ots_client):
     #     raise NotImplementedError()
 

+ 7 - 0
BaseDataMaintenance/model/ots/document_product.py

@@ -47,8 +47,15 @@ DOCUMENT_PRODUCT_ORIGINAL_NAME = "original_name"
 DOCUMENT_PRODUCT_ORIGINAL_BRAND = "original_brand"
 DOCUMENT_PRODUCT_ORIGINAL_SPECS = "original_specs"
 
+
+DOCUMENT_PRODUCT_ATTACHMENTS = "attachments"
 DOCUMENT_PRODUCT_BID_FILEMD5S = "bid_filemd5s"
 
+DOCUMENT_PRODUCT_HAS_BIDFILE = "has_bidfile"
+DOCUMENT_PRODUCT_HAS_ATTACHMENTS = "has_attachments"
+
+DOCUMENT_PRODUCT_PARAMETER_STATUS = "parameter_status"
+
 Document_product_table_name = "document_product2"
 
 class Document_product(BaseModel):