|
@@ -0,0 +1,1716 @@
|
|
|
+#coding:utf8
|
|
|
+from bs4 import BeautifulSoup
|
|
|
+import json
|
|
|
+import re
|
|
|
+import traceback
|
|
|
+
|
|
|
+import logging
|
|
|
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
|
|
+
|
|
|
+logger = logging.getLogger(__name__)
|
|
|
+logger.setLevel(logging.INFO)
|
|
|
+from BiddingKG.dl.interface.Preprocessing import tableToText
|
|
|
+from uuid import uuid4
|
|
|
+
|
|
|
+def log(msg):
|
|
|
+ '''
|
|
|
+ @summary:打印信息
|
|
|
+ '''
|
|
|
+ logger.info(msg)
|
|
|
+
|
|
|
+class DotDict(dict):
|
|
|
+
|
|
|
+ def __getattr__(self,name):
|
|
|
+ try:
|
|
|
+ return self[name]
|
|
|
+ except KeyError:
|
|
|
+ raise AttributeError("No attribute '%s'" % name)
|
|
|
+
|
|
|
+ def __setattr__(self,name,value):
|
|
|
+ self[name] = value
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+def get_tables(soup,dict_table = None):
|
|
|
+ is_first = False
|
|
|
+ if dict_table is None:
|
|
|
+ dict_table = {"children":[]}
|
|
|
+ is_first = True
|
|
|
+ if soup and soup.name:
|
|
|
+ childs = soup.contents
|
|
|
+
|
|
|
+ else:
|
|
|
+ childs = []
|
|
|
+
|
|
|
+ # tr+tbody
|
|
|
+ _flag = False
|
|
|
+ if len(childs)>=2:
|
|
|
+ if childs[0].name=="tr" and childs[1].name=="tbody":
|
|
|
+ childs[1].insert(0,copy.copy(childs[0]))
|
|
|
+ childs[0].decompose()
|
|
|
+ _flag = True
|
|
|
+
|
|
|
+ childs_bak = childs
|
|
|
+ # tbody+tbody
|
|
|
+ _flag = False
|
|
|
+ if soup and soup.name:
|
|
|
+ childs = soup.find_all("tbody",recursive=False)
|
|
|
+ if len(childs)>=2:
|
|
|
+ if childs[0].name=="tbody" and childs[1].name=="tbody":
|
|
|
+ child0_tr = childs[0].find_all("tr",recursive=False)
|
|
|
+ has_td_count = 0
|
|
|
+ tr_line = None
|
|
|
+ for tr in child0_tr:
|
|
|
+ if len(tr.find_all("td",recursive=False))>0:
|
|
|
+ has_td_count += 1
|
|
|
+ tr_line = tr
|
|
|
+ if has_td_count==1:
|
|
|
+ childs[1].insert(0,copy.copy(tr_line))
|
|
|
+ childs[0].decompose()
|
|
|
+ _flag = True
|
|
|
+
|
|
|
+ childs = childs_bak
|
|
|
+ for child in childs:
|
|
|
+ _d = {"children":[]}
|
|
|
+ if child.name in ("table","tbody"):
|
|
|
+ if len(child.find_all("tr",recursive=False))>0:
|
|
|
+ # _d["table"] = str(child)
|
|
|
+ _d["table"] = child
|
|
|
+ dict_table["children"].append(_d)
|
|
|
+ child_dict_table = get_tables(child,_d)
|
|
|
+
|
|
|
+ if is_first:
|
|
|
+ if soup.name in ("table","tbody"):
|
|
|
+ if not _flag:
|
|
|
+ if len(soup.find_all("tr",recursive=False))>0:
|
|
|
+ # dict_table["table"] = str(soup)
|
|
|
+ dict_table["table"] = soup
|
|
|
+
|
|
|
+ dict_table = squeeze_tables(dict_table)
|
|
|
+
|
|
|
+ return dict_table
|
|
|
+def squeeze_tables(dict_table):
|
|
|
+ _i = -1
|
|
|
+ new_children = []
|
|
|
+ for child in dict_table["children"]:
|
|
|
+ _i += 1
|
|
|
+ child_table = squeeze_tables(child)
|
|
|
+
|
|
|
+ if child_table is not None:
|
|
|
+ new_children.append(child_table)
|
|
|
+
|
|
|
+ if dict_table.get("table") is not None:
|
|
|
+ if len(new_children)>0:
|
|
|
+ dict_table["children"] = new_children
|
|
|
+ else:
|
|
|
+ del dict_table["children"]
|
|
|
+ return dict_table
|
|
|
+ if len(new_children)==1:
|
|
|
+ return new_children[0]
|
|
|
+ if len(new_children)>1:
|
|
|
+ dict_table["children"] = new_children
|
|
|
+ return dict_table
|
|
|
+
|
|
|
+ return None
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+def table_to_tree(soup,json_obj=None):
|
|
|
+
|
|
|
+
|
|
|
+ if json_obj is None:
|
|
|
+ json_obj = DotDict({"tag": "table","children":[]})
|
|
|
+
|
|
|
+
|
|
|
+ dict_table = get_tables(soup)
|
|
|
+
|
|
|
+ children = dict_table.get("children",[])
|
|
|
+ for child in children:
|
|
|
+ _d = DotDict({"tag": "table","children":[]})
|
|
|
+ json_obj["children"].append(_d)
|
|
|
+ table = child.get("table")
|
|
|
+ if table is not None:
|
|
|
+ table_id = str(uuid4())
|
|
|
+ table_to_tree(table,_d)
|
|
|
+
|
|
|
+ table = dict_table.get("table")
|
|
|
+ if table is not None:
|
|
|
+ table_id = str(uuid4())
|
|
|
+ json_obj["table_id"] = table_id
|
|
|
+ soup, kv_list, text = tableToText(table,return_kv=True)
|
|
|
+ _flag = False
|
|
|
+ if soup and soup.name:
|
|
|
+ if soup.contents:
|
|
|
+ _flag = True
|
|
|
+ soup.contents[0].insert_before(table_id)
|
|
|
+ if not _flag:
|
|
|
+ soup.insert_before(table_id)
|
|
|
+ json_obj["text"] = text
|
|
|
+ json_obj["kv"] = kv_list
|
|
|
+ for _d in kv_list:
|
|
|
+ _d["position"] = {"key_begin_sentence":0,
|
|
|
+ "key_begin_sentence_start":_d.get("key_sen_index",0),
|
|
|
+ "key_end_sentence":0,
|
|
|
+ "key_end_sentence_end":_d.get("key_sen_index",0)+len(_d.get("key","")),
|
|
|
+ "value_begin_sentence":0,
|
|
|
+ "value_begin_sentence_start":_d.get("value_sen_index",0),
|
|
|
+ "value_end_sentence":0,
|
|
|
+ "value_end_sentence_end":_d.get("value_sen_index",0)+len(_d.get("value",""))
|
|
|
+ }
|
|
|
+ if "key_sen_index" in _d:
|
|
|
+ _d.pop("key_sen_index")
|
|
|
+ if "value_sen_index" in _d:
|
|
|
+ _d.pop("value_sen_index")
|
|
|
+ return json_obj
|
|
|
+
|
|
|
+
|
|
|
+def update_table_position(table,sentence_index):
|
|
|
+
|
|
|
+
|
|
|
+ def get_table_idx_lengths(list_table_id,index):
|
|
|
+ _length = 0
|
|
|
+ for _d in list_table_id:
|
|
|
+ table_id = _d.get("table_id")
|
|
|
+ idx = _d.get("idx",-1)
|
|
|
+
|
|
|
+ if idx>=0 and _idx<=index:
|
|
|
+ _length += len(table_id)
|
|
|
+ return _length
|
|
|
+
|
|
|
+ def get_sentence_index(list_sent_span,idx):
|
|
|
+ list_sent_span.sort(key=lambda x:x[0])
|
|
|
+ for _i in range(len(list_sent_span)):
|
|
|
+ if list_sent_span[_i][0]<=idx and idx<=list_sent_span[_i][1]:
|
|
|
+ return _i
|
|
|
+ return 0
|
|
|
+
|
|
|
+ def get_list_tables(table,list_table=[]):
|
|
|
+
|
|
|
+ table_id = table.get("table_id")
|
|
|
+ if table_id:
|
|
|
+ list_table.append(table)
|
|
|
+ childs = table.get("children",[])
|
|
|
+ for child in childs:
|
|
|
+ get_list_tables(child,list_table)
|
|
|
+ return list_table
|
|
|
+
|
|
|
+ tables = get_list_tables(table)
|
|
|
+ if tables:
|
|
|
+
|
|
|
+ list_table_id = []
|
|
|
+ text = tables[0].get("text","")
|
|
|
+
|
|
|
+ for table in tables:
|
|
|
+ table_id = table.get("table_id")
|
|
|
+
|
|
|
+ if table_id:
|
|
|
+ _idx = text.find(table_id)
|
|
|
+ list_table_id.append({"table_id":table_id,"idx":_idx})
|
|
|
+ if _idx>=0:
|
|
|
+ kv_list = table.get("kv",[])
|
|
|
+ for _d in kv_list:
|
|
|
+ _d["position"]["key_begin_sentence_start"] += _idx
|
|
|
+ _d["position"]["key_end_sentence_end"] += _idx
|
|
|
+ _d["position"]["value_begin_sentence_start"] += _idx
|
|
|
+ _d["position"]["value_end_sentence_end"] += _idx
|
|
|
+
|
|
|
+ # remove table_id
|
|
|
+ for table in tables:
|
|
|
+ table_id = table.get("table_id")
|
|
|
+
|
|
|
+ if table_id:
|
|
|
+ kv_list = table.get("kv",[])
|
|
|
+ for _d in kv_list:
|
|
|
+ _length = get_table_idx_lengths(list_table_id,_d["position"]["key_begin_sentence_start"])
|
|
|
+ _d["position"]["key_begin_sentence_start"] -= _length
|
|
|
+ _length = get_table_idx_lengths(list_table_id,_d["position"]["key_end_sentence_end"])
|
|
|
+ _d["position"]["key_end_sentence_end"] -= _length
|
|
|
+ _length = get_table_idx_lengths(list_table_id,_d["position"]["value_begin_sentence_start"])
|
|
|
+ _d["position"]["value_begin_sentence_start"] -= _length
|
|
|
+ _length = get_table_idx_lengths(list_table_id,_d["position"]["value_end_sentence_end"])
|
|
|
+ _d["position"]["value_end_sentence_end"] -= _length
|
|
|
+
|
|
|
+ for table in tables:
|
|
|
+ if table.get("table_id"):
|
|
|
+ text = table.get("text","")
|
|
|
+ for _d in list_table_id:
|
|
|
+ table_id = _d.get("table_id")
|
|
|
+ text = text.replace(table_id,"")
|
|
|
+ table["text"] = text
|
|
|
+
|
|
|
+ # split sentence
|
|
|
+ text = tables[0].get("text","")
|
|
|
+ list_sentence = str(text).split("。")
|
|
|
+ list_sent_span = []
|
|
|
+ _begin = 0
|
|
|
+ for _i in range(len(list_sentence)):
|
|
|
+ list_sentence[_i] += "。"
|
|
|
+ _end = _begin+len(list_sentence[_i])
|
|
|
+ list_sent_span.append([_begin,_end])
|
|
|
+ _begin = _end
|
|
|
+ tables[0]["sentences"] = list_sentence
|
|
|
+
|
|
|
+ for table in tables:
|
|
|
+
|
|
|
+ kv_list = table.get("kv",[])
|
|
|
+ for _d in kv_list:
|
|
|
+ key_begin_sentence = get_sentence_index(list_sent_span,_d["position"]["key_begin_sentence_start"])
|
|
|
+ _d["position"]["key_begin_sentence"] = key_begin_sentence+sentence_index
|
|
|
+ key_end_sentence = get_sentence_index(list_sent_span,_d["position"]["key_end_sentence_end"])
|
|
|
+ _d["position"]["key_end_sentence"] = key_end_sentence+sentence_index
|
|
|
+ value_begin_sentence = get_sentence_index(list_sent_span,_d["position"]["value_begin_sentence_start"])
|
|
|
+ _d["position"]["value_begin_sentence"] = value_begin_sentence+sentence_index
|
|
|
+ value_end_sentence = get_sentence_index(list_sent_span,_d["position"]["value_end_sentence_end"])
|
|
|
+ _d["position"]["value_end_sentence"] = value_end_sentence+sentence_index
|
|
|
+
|
|
|
+ return sentence_index + len(list_sentence)
|
|
|
+ return sentence_index
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+def tree_reposition(tree,sentence_index=None):
|
|
|
+ if sentence_index is None:
|
|
|
+ sentence_index = 0
|
|
|
+
|
|
|
+ wordOffset_begin = 0
|
|
|
+ wordOffset_end = 0
|
|
|
+ for obj in tree:
|
|
|
+ is_table = True if obj.get("tag","")=="table" else False
|
|
|
+ if not is_table:
|
|
|
+ sentence_index += 1
|
|
|
+ obj["sentence_index"] = sentence_index
|
|
|
+ obj["sentences"] = [obj.get("text","")]
|
|
|
+ for _t in obj["sentences"]:
|
|
|
+ wordOffset_end += len(_t)
|
|
|
+ obj["wordOffset_begin"] = wordOffset_begin
|
|
|
+ obj["wordOffset_end"] = wordOffset_end
|
|
|
+ wordOffset_begin = wordOffset_end
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ list_kv = obj.get("kv",[])
|
|
|
+ for _d in list_kv:
|
|
|
+ _d["position"]["key_begin_sentence"] = sentence_index
|
|
|
+ _d["position"]["key_end_sentence"] = sentence_index
|
|
|
+ _d["position"]["value_begin_sentence"] = sentence_index
|
|
|
+ _d["position"]["value_end_sentence"] = sentence_index
|
|
|
+
|
|
|
+ else:
|
|
|
+ sentence_index += 1
|
|
|
+ obj["sentence_index"] = sentence_index
|
|
|
+ obj["sentence_index_start"] = sentence_index
|
|
|
+ obj["sentences"] = [obj.get("text","")]
|
|
|
+ sentence_index_end = update_table_position(obj,sentence_index)
|
|
|
+ obj["sentence_index_end"] = sentence_index_end
|
|
|
+ sentence_index = sentence_index_end
|
|
|
+
|
|
|
+ for _t in obj["sentences"]:
|
|
|
+ wordOffset_end += len(_t)
|
|
|
+ obj["wordOffset_begin"] = wordOffset_begin
|
|
|
+ obj["wordOffset_end"] = wordOffset_end
|
|
|
+ wordOffset_begin = wordOffset_end
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+# 递归地将 DOM 转换为 JSON
|
|
|
+def dom_to_tree(node):
|
|
|
+ if node.name: # 如果是标签节点
|
|
|
+ json_obj = DotDict({"tag": node.name})
|
|
|
+ if node.attrs:
|
|
|
+ json_obj["attributes"] = node.attrs
|
|
|
+
|
|
|
+ is_table = False
|
|
|
+ if node.name in ("table","tbody"):
|
|
|
+ json_obj = table_to_tree(node)
|
|
|
+ is_table = True
|
|
|
+
|
|
|
+ if not is_table:
|
|
|
+ children = []
|
|
|
+ for child in node.contents:
|
|
|
+ _child = dom_to_tree(child)
|
|
|
+ if _child is not None:
|
|
|
+ children.append(_child)
|
|
|
+ if children:
|
|
|
+ json_obj["children"] = children
|
|
|
+ json_obj["name"] = json_obj.get("tag")
|
|
|
+ return json_obj
|
|
|
+ elif node.string and node.string.strip(): # 如果是纯文本节点
|
|
|
+ _text = node.string.strip()
|
|
|
+ _text = re.sub('\xa0','',_text)
|
|
|
+ list_text = re.split("\s",_text)
|
|
|
+ _text = ""
|
|
|
+ for _t in list_text:
|
|
|
+ if len(_t)<3:
|
|
|
+ if len(_t)>0:
|
|
|
+ _text += _t
|
|
|
+ else:
|
|
|
+ _text += _t+" "
|
|
|
+ _text = _text.strip()
|
|
|
+ return DotDict({"tag":"text","name":"text","text": _text})
|
|
|
+ return None # 忽略空白字符
|
|
|
+
|
|
|
+def tree_pop_parent(tree):
|
|
|
+
|
|
|
+ if isinstance(tree,list):
|
|
|
+ for child in tree:
|
|
|
+ tree_pop_parent(child)
|
|
|
+ if isinstance(tree,dict):
|
|
|
+ if "parent" in tree:
|
|
|
+ del tree["parent"]
|
|
|
+ for child in tree.get("children",[]):
|
|
|
+ tree_pop_parent(child)
|
|
|
+
|
|
|
+
|
|
|
+def html_to_tree(html_content):
|
|
|
+ # 使用 BeautifulSoup 解析 HTML
|
|
|
+ soup = BeautifulSoup(html_content, "lxml")
|
|
|
+ dom_tree = dom_to_tree(soup)
|
|
|
+ extract_kv_from_tree(dom_tree)
|
|
|
+ list_objs = get_outobjs_from_tree(dom_tree)
|
|
|
+ tree_reposition(list_objs)
|
|
|
+ return dom_tree
|
|
|
+
|
|
|
+def print_tree(dom_tree):
|
|
|
+ # 转换为 JSON 格式
|
|
|
+ tree_pop_parent(dom_tree)
|
|
|
+ json_output = json.dumps(dom_tree,ensure_ascii=False, indent=2)
|
|
|
+
|
|
|
+# kv_pattern = "\s*(?P<key>.{,10})[::]\s*(?P<value>[^::。,()]+?)(\s+|$|;|;)(?![\u4e00-\u9fa5]+:)"
|
|
|
+kv_pattern = r"(?P<key>[\u4e00-\u9fa5]+):\s*(?P<value>[^\s,。();;]+)"
|
|
|
+
|
|
|
+def get_kv_pattern():
|
|
|
+ import re
|
|
|
+
|
|
|
+ text = """
|
|
|
+ name: John age: 30 note: invalid;
|
|
|
+ """
|
|
|
+
|
|
|
+ # 正则模式
|
|
|
+ kv_pattern = r"(?P<key>[a-zA-Z]+)[::](?P<value>.+(?!.*[::]))"
|
|
|
+
|
|
|
+ # 提取匹配
|
|
|
+ matches = re.findall(kv_pattern, text)
|
|
|
+
|
|
|
+ # 打印结果
|
|
|
+ for match in matches:
|
|
|
+ key, value = match
|
|
|
+ print("{%s}: {%s}"%(key,value))
|
|
|
+
|
|
|
+def extract_kv_from_sentence(sentence):
|
|
|
+ list_kv = []
|
|
|
+ _iter = re.finditer("[::]", sentence)
|
|
|
+ if _iter:
|
|
|
+ list_span = []
|
|
|
+ for iter in _iter:
|
|
|
+ list_span.append(iter.span())
|
|
|
+ if len(list_span)==1:
|
|
|
+ _begin,_end = list_span[0]
|
|
|
+ if _begin<20 and _end<len(sentence)-1:
|
|
|
+ _d = DotDict({"key":sentence[0:_begin],"value":sentence[_end:]})
|
|
|
+ _d["position"] = {"key_begin_sentence":0,
|
|
|
+ "key_begin_sentence_start":0,
|
|
|
+ "key_end_sentence":0,
|
|
|
+ "key_end_sentence_end":_begin,
|
|
|
+ "value_begin_sentence":0,
|
|
|
+ "value_begin_sentence_start":_end,
|
|
|
+ "value_end_sentence":0,
|
|
|
+ "value_end_sentence_end":len(sentence)
|
|
|
+ }
|
|
|
+ list_kv.append(_d)
|
|
|
+ else:
|
|
|
+ _begin = 0
|
|
|
+ _end = len(sentence)-1
|
|
|
+ iter = re.search(kv_pattern,sentence[_begin:_end])
|
|
|
+ if iter is not None:
|
|
|
+ _d = DotDict({})
|
|
|
+
|
|
|
+ _d["key"] = iter.group("key")
|
|
|
+ _d["value"] = iter.group("value")
|
|
|
+
|
|
|
+ _d["position"] = {"key_begin_sentence":0,
|
|
|
+ "key_begin_sentence_start":iter.span("key")[0],
|
|
|
+ "key_end_sentence":0,
|
|
|
+ "key_end_sentence_end":iter.span("key")[0]+len(_d.get("key","")),
|
|
|
+ "value_begin_sentence":0,
|
|
|
+ "value_begin_sentence_start":iter.span("value")[0],
|
|
|
+ "value_end_sentence":0,
|
|
|
+ "value_end_sentence_end":iter.span("value")[0]+len(_d.get("value",""))
|
|
|
+ }
|
|
|
+ list_kv.append(_d)
|
|
|
+
|
|
|
+ elif len(list_span)>1:
|
|
|
+
|
|
|
+ _begin,_end = list_span[0]
|
|
|
+ if _begin<20 and len(sentence)>100:
|
|
|
+ _d = DotDict({"key":sentence[0:_begin],"value":sentence[_end:]})
|
|
|
+ _d["position"] = {"key_begin_sentence":0,
|
|
|
+ "key_begin_sentence_start":0,
|
|
|
+ "key_end_sentence":0,
|
|
|
+ "key_end_sentence_end":_begin,
|
|
|
+ "value_begin_sentence":0,
|
|
|
+ "value_begin_sentence_start":_end,
|
|
|
+ "value_end_sentence":0,
|
|
|
+ "value_end_sentence_end":len(sentence)
|
|
|
+ }
|
|
|
+ list_kv.append(_d)
|
|
|
+
|
|
|
+ else:
|
|
|
+ _begin = 0
|
|
|
+
|
|
|
+ for _i in range(len(list_span)-1):
|
|
|
+ _end = list_span[_i+1][0]
|
|
|
+ iter = re.search(kv_pattern,sentence[_begin:_end])
|
|
|
+ _begin = list_span[_i][1]
|
|
|
+ if iter is not None:
|
|
|
+ _d = DotDict({})
|
|
|
+ _d["key"] = iter.group("key")
|
|
|
+ _d["value"] = iter.group("value")
|
|
|
+
|
|
|
+ _d["position"] = {"key_begin_sentence":0,
|
|
|
+ "key_begin_sentence_start":iter.span("key")[0],
|
|
|
+ "key_end_sentence":0,
|
|
|
+ "key_end_sentence_end":iter.span("key")[0]+len(_d.get("key","")),
|
|
|
+ "value_begin_sentence":0,
|
|
|
+ "value_begin_sentence_start":iter.span("value")[0],
|
|
|
+ "value_end_sentence":0,
|
|
|
+ "value_end_sentence_end":iter.span("value")[0]+len(_d.get("value",""))
|
|
|
+ }
|
|
|
+ list_kv.append(_d)
|
|
|
+
|
|
|
+ _begin = list_span[-2][1]
|
|
|
+ _end = len(sentence)
|
|
|
+ iter = re.search(kv_pattern,sentence[_begin:_end])
|
|
|
+ if iter is not None:
|
|
|
+ _d = DotDict({})
|
|
|
+ _d["key"] = iter.group("key")
|
|
|
+ _d["value"] = iter.group("value")
|
|
|
+
|
|
|
+ _d["position"] = {"key_begin_sentence":0,
|
|
|
+ "key_begin_sentence_start":iter.span("key")[0],
|
|
|
+ "key_end_sentence":0,
|
|
|
+ "key_end_sentence_end":iter.span("key")[0]+len(_d.get("key","")),
|
|
|
+ "value_begin_sentence":0,
|
|
|
+ "value_begin_sentence_start":iter.span("value")[0],
|
|
|
+ "value_end_sentence":0,
|
|
|
+ "value_end_sentence_end":iter.span("value")[0]+len(_d.get("value",""))
|
|
|
+ }
|
|
|
+ list_kv.append(_d)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ # for iter in _iter:
|
|
|
+ # _d = DotDict({})
|
|
|
+ # _d["key"] = iter.group("key")
|
|
|
+ # _d["value"] = iter.group("value")
|
|
|
+ # _d["key_span"] = iter.span("key")
|
|
|
+ # _d["value_span"] = iter.span("value")
|
|
|
+ # list_kv.append(_d)
|
|
|
+ return list_kv
|
|
|
+
|
|
|
+
|
|
|
+def extract_kv_from_node(node):
|
|
|
+ list_kv = []
|
|
|
+ list_text = []
|
|
|
+ childs = node.get("children",[])
|
|
|
+ _text = ""
|
|
|
+
|
|
|
+ has_br = False
|
|
|
+ if childs:
|
|
|
+ for child in childs:
|
|
|
+ node_name = child.get("tag","")
|
|
|
+ child_text = child.get("text")
|
|
|
+ if node_name=="br":
|
|
|
+ list_text.append([])
|
|
|
+ has_br = True
|
|
|
+ if child_text:
|
|
|
+ if len(list_text)==0:
|
|
|
+ list_text.append([])
|
|
|
+ list_text[-1].append(child)
|
|
|
+
|
|
|
+ node["kv"] = []
|
|
|
+ if has_br:
|
|
|
+ new_children = []
|
|
|
+ for texts in list_text:
|
|
|
+ if texts:
|
|
|
+ _text = "".join([a.get("text") for a in texts])
|
|
|
+ tag = texts[0]
|
|
|
+ list_kv = extract_kv_from_sentence(_text)
|
|
|
+ _n = DotDict({"tag":tag,"name":tag,"text":_text,"children":[],"kv":list_kv})
|
|
|
+ new_children.append(_n)
|
|
|
+ node["children"] = new_children
|
|
|
+ else:
|
|
|
+ for texts in list_text:
|
|
|
+ _text = "".join([a.get("text") for a in texts])
|
|
|
+ if _text:
|
|
|
+ list_kv = extract_kv_from_sentence(_text)
|
|
|
+ node["kv"].extend(list_kv)
|
|
|
+ else:
|
|
|
+ _text = node.get("text")
|
|
|
+ if _text:
|
|
|
+ list_kv = extract_kv_from_sentence(_text)
|
|
|
+ node["kv"] = list_kv
|
|
|
+ return list_kv
|
|
|
+
|
|
|
+
|
|
|
+def get_child_text(node):
|
|
|
+ _text = node.get("text","")
|
|
|
+ for child in node.get("children",[]):
|
|
|
+ _text += get_child_text(child)
|
|
|
+ return _text
|
|
|
+def extract_kv_from_tree(tree):
|
|
|
+ if isinstance(tree,list):
|
|
|
+ _count = 0
|
|
|
+ has_table = False
|
|
|
+ for child in tree:
|
|
|
+ _c,_t = extract_kv_from_tree(child)
|
|
|
+ _count += _c
|
|
|
+ if _t:
|
|
|
+ has_table = _t
|
|
|
+ return _count,has_table
|
|
|
+ if isinstance(tree,dict):
|
|
|
+ if tree.get("tag","")!="table":
|
|
|
+ childs = tree.get("children",[])
|
|
|
+
|
|
|
+ if len(childs)>0:
|
|
|
+ _count = 0
|
|
|
+ has_table = False
|
|
|
+ child_has_p_div = False
|
|
|
+ child_has_br = False
|
|
|
+ for child in childs:
|
|
|
+ _c,_t = extract_kv_from_tree(child)
|
|
|
+ _count += _c
|
|
|
+ if _t:
|
|
|
+ has_table = _t
|
|
|
+ if child.get("tag","") in ("p","div","li"):
|
|
|
+ child_has_p_div = True
|
|
|
+ if child.get("tag","")=="br":
|
|
|
+ child_has_br = True
|
|
|
+ if _count==0:
|
|
|
+ if not has_table and not child_has_p_div and not child_has_br:
|
|
|
+ _text = get_child_text(tree)
|
|
|
+ if "children" in tree:
|
|
|
+ del tree["children"]
|
|
|
+ tree["text"] = _text
|
|
|
+ list_kv = extract_kv_from_node(tree)
|
|
|
+ _count = len(list_kv)
|
|
|
+ return _count,has_table
|
|
|
+ if tree.get("tag","") in ("p","div","li") and not has_table and not child_has_p_div:
|
|
|
+ if not child_has_br:
|
|
|
+ _text = get_child_text(tree)
|
|
|
+ tree["text"] = _text
|
|
|
+ if "children" in tree:
|
|
|
+ del tree["children"]
|
|
|
+ p_list_kv = extract_kv_from_node(tree)
|
|
|
+ return len(p_list_kv),has_table
|
|
|
+
|
|
|
+ return _count,has_table
|
|
|
+ else:
|
|
|
+ list_kv = extract_kv_from_node(tree)
|
|
|
+ return len(list_kv),False
|
|
|
+ else:
|
|
|
+ return len(tree.get("kv",[])),True
|
|
|
+ return 0,False
|
|
|
+
|
|
|
+def update_kv_span(list_kv,append_length):
|
|
|
+ for _d in list_kv:
|
|
|
+ _d["position"] = {"key_begin_sentence":0,
|
|
|
+ "key_begin_sentence_start":_d.get("key_sen_index",0),
|
|
|
+ "key_end_sentence":0,
|
|
|
+ "key_end_sentence_end":_d.get("key_sen_index",0)+len(_d.get("key","")),
|
|
|
+ "value_begin_sentence":0,
|
|
|
+ "value_begin_sentence_start":_d.get("value_sen_index",0),
|
|
|
+ "value_end_sentence":0,
|
|
|
+ "value_end_sentence_end":_d.get("value_sen_index",0)+len(_d.get("value",""))
|
|
|
+ }
|
|
|
+ _d["position"]["key_begin_sentence_start"] += append_length
|
|
|
+ _d["position"]["key_end_sentence_end"] += append_length
|
|
|
+ _d["position"]["value_begin_sentence_start"] += append_length
|
|
|
+ _d["position"]["value_end_sentence_end"] += append_length
|
|
|
+
|
|
|
+def get_outobjs_from_tree(tree,list_outobjs=None):
|
|
|
+
|
|
|
+ is_first = False
|
|
|
+ if list_outobjs is None:
|
|
|
+ list_outobjs = []
|
|
|
+ is_first = True
|
|
|
+ if isinstance(tree,list):
|
|
|
+ for child in tree:
|
|
|
+ get_outobjs_from_tree(child,list_outobjs)
|
|
|
+ if isinstance(tree,dict):
|
|
|
+ childs = tree.get("children",[])
|
|
|
+ _text = tree.get("text","")
|
|
|
+ is_table = True if tree.get("tag","")=="table" else False
|
|
|
+ if is_table:
|
|
|
+ list_outobjs.append(tree)
|
|
|
+ else:
|
|
|
+ if _text!="":
|
|
|
+ tree.name = tree.tag
|
|
|
+ list_outobjs.append(tree)
|
|
|
+ for child in childs:
|
|
|
+ get_outobjs_from_tree(child,list_outobjs)
|
|
|
+
|
|
|
+ return list_outobjs
|
|
|
+
|
|
|
+
|
|
|
+def standard_title_context(_title_context):
|
|
|
+ return _title_context.replace("(","(").replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".").replace(".",".")
|
|
|
+
|
|
|
+def standard_product(sentence):
|
|
|
+ return sentence.replace("(","(").replace(")",")")
|
|
|
+
|
|
|
+
|
|
|
+import Levenshtein
|
|
|
+import copy
|
|
|
+def jaccard_score(source,target):
|
|
|
+ source_set = set([s for s in source])
|
|
|
+ target_set = set([s for s in target])
|
|
|
+ if len(source_set)==0 or len(target_set)==0:
|
|
|
+ return 0
|
|
|
+ return max(len(source_set&target_set)/len(source_set),len(source_set&target_set)/len(target_set))
|
|
|
+
|
|
|
+
|
|
|
+def judge_pur_chinese(keyword):
|
|
|
+ """
|
|
|
+ 中文字符的编码范围为: u'\u4e00' -- u'\u9fff:只要在此范围内就可以判断为中文字符串
|
|
|
+ @param keyword:
|
|
|
+ @return:
|
|
|
+ """
|
|
|
+ # 定义一个需要删除的标点符号字符串列表
|
|
|
+ remove_chars = '[·’!"\#$%&\'()#!()*+,-./:;<=>?\@,:?¥★、….>【】[]《》?“”‘’\[\\]^_`{|}~]+'
|
|
|
+ # 利用re.sub来删除中文字符串中的标点符号
|
|
|
+ strings = re.sub(remove_chars, "", keyword) # 将keyword中文字符串中remove_chars中包含的标点符号替换为空字符串
|
|
|
+ for ch in strings:
|
|
|
+ if u'\u4e00' <= ch <= u'\u9fff':
|
|
|
+ pass
|
|
|
+ else:
|
|
|
+ return False
|
|
|
+ return True
|
|
|
+def is_similar(source,target,_radio=None):
|
|
|
+ source = str(source).lower()
|
|
|
+ target = str(target).lower()
|
|
|
+ max_len = max(len(source),len(target))
|
|
|
+ min_len = min(len(source),len(target))
|
|
|
+
|
|
|
+ min_ratio = 90
|
|
|
+ if min_len>=3:
|
|
|
+ min_ratio = 87
|
|
|
+ if min_len>=5:
|
|
|
+ min_ratio = 85
|
|
|
+ if _radio is not None:
|
|
|
+ min_ratio = _radio
|
|
|
+ # dis_len = abs(len(source)-len(target))
|
|
|
+ # min_dis = min(max_len*0.2,4)
|
|
|
+ if min_len==0 and max_len>0:
|
|
|
+ return False
|
|
|
+ if max_len<=2:
|
|
|
+ if source==target:
|
|
|
+ return True
|
|
|
+ if min_len<2:
|
|
|
+ return False
|
|
|
+ #判断相似度
|
|
|
+ similar = Levenshtein.ratio(source,target)*100
|
|
|
+ if similar>=min_ratio:
|
|
|
+ log("%s and %s similar_jaro %d"%(source,target,similar))
|
|
|
+ return True
|
|
|
+ similar_jaro = Levenshtein.jaro(source,target)
|
|
|
+ if similar_jaro*100>=min_ratio:
|
|
|
+ log("%s and %s similar_jaro %d"%(source,target,similar_jaro*100))
|
|
|
+ return True
|
|
|
+ similar_jarow = Levenshtein.jaro_winkler(source,target)
|
|
|
+ if similar_jarow*100>=min_ratio:
|
|
|
+ log("%s and %s similar_jaro %d"%(source,target,similar_jarow*100))
|
|
|
+ return True
|
|
|
+
|
|
|
+ if min_len>=5:
|
|
|
+ if len(source)==max_len and str(source).find(target)>=0:
|
|
|
+ return True
|
|
|
+ elif len(target)==max_len and target.find(source)>=0:
|
|
|
+ return True
|
|
|
+ elif jaccard_score(source, target)==1 and judge_pur_chinese(source) and judge_pur_chinese(target):
|
|
|
+ return True
|
|
|
+ return False
|
|
|
+
|
|
|
+
|
|
|
+end_pattern = "商务要求|评分标准|商务条件|商务条件"
|
|
|
+_param_pattern = "(产品|技术|清单|配置|参数|具体|明细|项目|招标|货物|服务|规格|工作|具体)[及和与]?(指标|配置|条件|要求|参数|需求|规格|条款|名称及要求)|配置清单|(质量|技术).{,10}要求|验收标准|^(参数|功能)$"
|
|
|
+meter_pattern = "[><≤≥±]\d+|\d+(?:[μucmkK微毫千]?[米升LlgGmMΩ]|摄氏度|英寸|度|天|VA|dB|bpm|rpm|kPa|mol|cmH20|%|°|Mpa|Hz|K?HZ|℃|W|min|[*×xX])|[*×xX]\d+|/min|\ds[^a-zA-Z]|GB.{,20}标准|PVC|PP|角度|容积|色彩|自动|流量|外径|轴位|折射率|帧率|柱镜|振幅|磁场|镜片|防漏|强度|允差|心率|倍数|瞳距|底座|色泽|噪音|间距|材质|材料|表面|频率|阻抗|浓度|兼容|防尘|防水|内径|实时|一次性|误差|性能|距离|精确|温度|超温|范围|跟踪|对比度|亮度|[横纵]向|均压|负压|正压|可调|设定值|功能|检测|高度|厚度|宽度|深度|[单双多]通道|效果|指数|模式|尺寸|重量|峰值|谷值|容量|寿命|稳定性|高温|信号|电源|电流|转换率|效率|释放量|转速|离心力|向心力|弯曲|电压|功率|气量|国标|标准协议|灵敏度|最大值|最小值|耐磨|波形|高压|性强|工艺|光源|低压|压力|压强|速度|湿度|重量|毛重|[MLX大中小]+码|净重|颜色|[红橙黄绿青蓝紫]色|不锈钢|输入|输出|噪声|认证|配置"
|
|
|
+not_meter_pattern = "投标报价|中标金额|商务部分|公章|分值构成|业绩|详见|联系人|联系电话|合同价|金额|采购预算|资金来源|费用|质疑|评审因素|评审标准|商务资信|商务评分|专家论证意见|评标方法|代理服务费|售后服务|评分类型|评分项目|预算金额|得\d+分|项目金额|详见招标文件|乙方"
|
|
|
+
|
|
|
+
|
|
|
+def getTrs(tbody):
|
|
|
+ #获取所有的tr
|
|
|
+ trs = []
|
|
|
+ if tbody.name=="table":
|
|
|
+ body = tbody.find("tbody",recursive=False)
|
|
|
+ if body is not None:
|
|
|
+ tbody = body
|
|
|
+ objs = tbody.find_all(recursive=False)
|
|
|
+ for obj in objs:
|
|
|
+ if obj.name=="tr":
|
|
|
+ trs.append(obj)
|
|
|
+ if obj.name=="tbody" or obj.name=="table":
|
|
|
+ for tr in obj.find_all("tr",recursive=False):
|
|
|
+ trs.append(tr)
|
|
|
+ return trs
|
|
|
+
|
|
|
+def fixSpan(tbody):
|
|
|
+ # 处理colspan, rowspan信息补全问题
|
|
|
+ #trs = tbody.findChildren('tr', recursive=False)
|
|
|
+
|
|
|
+ trs = getTrs(tbody)
|
|
|
+ ths_len = 0
|
|
|
+ ths = list()
|
|
|
+ trs_set = set()
|
|
|
+ #修改为先进行列补全再进行行补全,否则可能会出现表格解析混乱
|
|
|
+ # 遍历每一个tr
|
|
|
+
|
|
|
+ for indtr, tr in enumerate(trs):
|
|
|
+ ths_tmp = tr.findChildren('th', recursive=False)
|
|
|
+ #不补全含有表格的tr
|
|
|
+ if len(tr.findChildren('table'))>0:
|
|
|
+ continue
|
|
|
+ if len(ths_tmp) > 0:
|
|
|
+ ths_len = ths_len + len(ths_tmp)
|
|
|
+ for th in ths_tmp:
|
|
|
+ ths.append(th)
|
|
|
+ trs_set.add(tr)
|
|
|
+ # 遍历每行中的element
|
|
|
+ tds = tr.findChildren(recursive=False)
|
|
|
+ for indtd, td in enumerate(tds):
|
|
|
+ # 若有colspan 则补全同一行下一个位置
|
|
|
+ if 'colspan' in td.attrs:
|
|
|
+ if str(re.sub("[^0-9]","",str(td['colspan'])))!="":
|
|
|
+ col = int(re.sub("[^0-9]","",str(td['colspan'])))
|
|
|
+ if col<100 and len(td.get_text())<1000:
|
|
|
+ td['colspan'] = 1
|
|
|
+ for i in range(1, col, 1):
|
|
|
+ td.insert_after(copy.copy(td))
|
|
|
+
|
|
|
+ for indtr, tr in enumerate(trs):
|
|
|
+ ths_tmp = tr.findChildren('th', recursive=False)
|
|
|
+ #不补全含有表格的tr
|
|
|
+ if len(tr.findChildren('table'))>0:
|
|
|
+ continue
|
|
|
+ if len(ths_tmp) > 0:
|
|
|
+ ths_len = ths_len + len(ths_tmp)
|
|
|
+ for th in ths_tmp:
|
|
|
+ ths.append(th)
|
|
|
+ trs_set.add(tr)
|
|
|
+ # 遍历每行中的element
|
|
|
+ tds = tr.findChildren(recursive=False)
|
|
|
+ for indtd, td in enumerate(tds):
|
|
|
+ # 若有rowspan 则补全下一行同样位置
|
|
|
+ if 'rowspan' in td.attrs:
|
|
|
+ if str(re.sub("[^0-9]","",str(td['rowspan'])))!="":
|
|
|
+ row = int(re.sub("[^0-9]","",str(td['rowspan'])))
|
|
|
+ td['rowspan'] = 1
|
|
|
+ for i in range(1, row, 1):
|
|
|
+ # 获取下一行的所有td, 在对应的位置插入
|
|
|
+ if indtr+i<len(trs):
|
|
|
+ tds1 = trs[indtr + i].findChildren(['td','th'], recursive=False)
|
|
|
+ if len(tds1) >= (indtd) and len(tds1)>0:
|
|
|
+ if indtd > 0:
|
|
|
+ tds1[indtd - 1].insert_after(copy.copy(td))
|
|
|
+ else:
|
|
|
+ tds1[0].insert_before(copy.copy(td))
|
|
|
+ elif indtd-2>0 and len(tds1) > 0 and len(tds1) == indtd - 1: # 修正某些表格最后一列没补全
|
|
|
+ tds1[indtd-2].insert_after(copy.copy(td))
|
|
|
+def getTable(tbody):
|
|
|
+ #trs = tbody.findChildren('tr', recursive=False)
|
|
|
+ fixSpan(tbody)
|
|
|
+ trs = getTrs(tbody)
|
|
|
+ inner_table = []
|
|
|
+ for tr in trs:
|
|
|
+ tr_line = []
|
|
|
+ tds = tr.findChildren(['td','th'], recursive=False)
|
|
|
+ if len(tds)==0:
|
|
|
+ tr_line.append([re.sub('\xa0','',tr.get_text()),0]) # 2021/12/21 修复部分表格没有td 造成数据丢失
|
|
|
+ for td in tds:
|
|
|
+ tr_line.append([re.sub('\xa0','',td.get_text()),0])
|
|
|
+ #tr_line.append([td.get_text(),0])
|
|
|
+ inner_table.append(tr_line)
|
|
|
+ return inner_table
|
|
|
+
|
|
|
+def extract_products(list_data,_product,_param_pattern = "产品名称|设备材料|采购内存|标的名称|采购内容|(标的|维修|系统|报价构成|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品?|采购|物装|配件|资产|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|品目|^品名|气体|标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)[\))的]?([、\w]{,4}名称|内容|描述)|标的|标项|项目$|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品|物装|配件|资产|招标内容|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|菜名|^品目$|^品名$|^名称|^内容$"):
|
|
|
+ _product = standard_product(_product)
|
|
|
+ list_result = []
|
|
|
+ list_table_products = []
|
|
|
+ for _data_i in range(len(list_data)):
|
|
|
+ _data = list_data[_data_i]
|
|
|
+ _type = _data["type"]
|
|
|
+ _text = _data["text"]
|
|
|
+
|
|
|
+ if _type=="table":
|
|
|
+ list_table = _data["list_table"]
|
|
|
+ if list_table is None:
|
|
|
+ continue
|
|
|
+ _check = True
|
|
|
+ max_length = max([len(a) for a in list_table])
|
|
|
+ min_length = min([len(a) for a in list_table])
|
|
|
+ if min_length<max_length/2:
|
|
|
+ continue
|
|
|
+ list_head_index = []
|
|
|
+ _begin_index = 0
|
|
|
+ head_cell_text = ""
|
|
|
+ for line_i in range(len(list_table[:2])):
|
|
|
+ line = list_table[line_i]
|
|
|
+ line_text = ",".join([cell[0] for cell in line])
|
|
|
+ for cell_i in range(len(line)):
|
|
|
+ cell = line[cell_i]
|
|
|
+ cell_text = cell[0]
|
|
|
+ if len(cell_text)<10 and re.search(_param_pattern,cell_text) is not None and re.search("单价|数量|预算|限价|总价|品牌|规格|型号|用途|要求|采购量",line_text) is not None:
|
|
|
+ _begin_index = line_i+1
|
|
|
+ list_head_index.append(cell_i)
|
|
|
+
|
|
|
+ for line_i in range(len(list_table)):
|
|
|
+ line = list_table[line_i]
|
|
|
+ for cell_i in list_head_index:
|
|
|
+ if cell_i>=len(line):
|
|
|
+ continue
|
|
|
+ cell = line[cell_i]
|
|
|
+ cell_text = cell[0]
|
|
|
+ head_cell_text += cell_text
|
|
|
+
|
|
|
+ # print("===head_cell_text",head_cell_text)
|
|
|
+ if re.search("招标人|采购人|项目编号|项目名称|金额|^\d+$",head_cell_text) is not None:
|
|
|
+ list_head_index = []
|
|
|
+
|
|
|
+ for line in list_table:
|
|
|
+ line_text = ",".join([cell[0] for cell in line])
|
|
|
+ for cell_i in range(len(line)):
|
|
|
+ cell = line[cell_i]
|
|
|
+ cell_text = cell[0]
|
|
|
+ if cell_text is not None and _product is not None and len(cell_text)<len(_product)*10 and cell_text.find(_product)>=0 and re.search("单价|数量|总价|规格|品牌|型号|用途|要求|采购量",line_text) is not None:
|
|
|
+ list_head_index.append(cell_i)
|
|
|
+
|
|
|
+ list_head_index = list(set(list_head_index))
|
|
|
+ if len(list_head_index)>0:
|
|
|
+ has_number = False
|
|
|
+ for cell_i in list_head_index:
|
|
|
+ table_products = []
|
|
|
+
|
|
|
+ for line_i in range(_begin_index,len(list_table)):
|
|
|
+ line = list_table[line_i]
|
|
|
+
|
|
|
+ for _i in range(len(line)):
|
|
|
+ cell = line[_i]
|
|
|
+ cell_text = cell[0]
|
|
|
+ if re.search("^\d+$",cell_text) is not None:
|
|
|
+ has_number = True
|
|
|
+
|
|
|
+ if cell_i>=len(line):
|
|
|
+ continue
|
|
|
+ cell = line[cell_i]
|
|
|
+ cell_text = cell[0]
|
|
|
+ if re.search(_param_pattern,cell_text) is None or has_number:
|
|
|
+ if re.search("^[\da-zA-Z]+$",cell_text) is None:
|
|
|
+ table_products.append(cell_text)
|
|
|
+
|
|
|
+ if len(table_products)>0:
|
|
|
+ logger.debug("table products %s"%(str(table_products)))
|
|
|
+ if min([len(x) for x in table_products])>0 and max([len(x) for x in table_products])<=30:
|
|
|
+ if re.search("招标人|代理人|预算|数量|交货期|品牌|产地","".join(table_products)) is None:
|
|
|
+ list_table_products.append(table_products)
|
|
|
+ _find = False
|
|
|
+ for table_products in list_table_products:
|
|
|
+ for _p in table_products:
|
|
|
+ if is_similar(_product,_p,90):
|
|
|
+ _find = True
|
|
|
+ logger.debug("similar table_products %s"%(str(table_products)))
|
|
|
+ list_result = list(set([a for a in table_products if len(a)>1 and len(a)<20 and re.search("费用|预算|合计|金额|万元|运费|^其他$",a) is None]))
|
|
|
+ break
|
|
|
+ if not _find:
|
|
|
+ for table_products in list_table_products:
|
|
|
+ list_result.extend(table_products)
|
|
|
+ list_result = list(set([a for a in list_result if len(a)>1 and len(a)<30 and re.search("费用|预算|合计|金额|万元|运费",a) is None]))
|
|
|
+ return list_result
|
|
|
+
|
|
|
+def get_childs(childs, max_depth=None):
|
|
|
+ list_data = []
|
|
|
+ for _child in childs:
|
|
|
+ list_data.append(_child)
|
|
|
+ childs2 = _child.get("child_title",[])
|
|
|
+
|
|
|
+ if len(childs2)>0 and (max_depth==None or max_depth>0):
|
|
|
+ for _child2 in childs2:
|
|
|
+ if max_depth != None:
|
|
|
+ list_data.extend(get_childs([_child2], max_depth-1))
|
|
|
+ else:
|
|
|
+ list_data.extend(get_childs([_child2], None))
|
|
|
+ return list_data
|
|
|
+
|
|
|
+class Html2KVTree():
|
|
|
+
|
|
|
+ def __init__(self,_html,auto_merge_table=True,list_obj = []):
|
|
|
+ if _html is None:
|
|
|
+ _html = ""
|
|
|
+ self.html = _html
|
|
|
+ self.auto_merge_table = auto_merge_table
|
|
|
+
|
|
|
+ if list_obj:
|
|
|
+ self.list_obj = list_obj
|
|
|
+ else:
|
|
|
+
|
|
|
+ _tree = html_to_tree(html_content)
|
|
|
+ self.list_obj = get_outobjs_from_tree(_tree)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ # for obj in self.list_obj:
|
|
|
+ # print("obj",obj.get_text()[:20])
|
|
|
+
|
|
|
+ self.tree = self.buildParsetree(self.list_obj,[],auto_merge_table)
|
|
|
+
|
|
|
+
|
|
|
+ # #识别目录树
|
|
|
+ # self.print_tree(self.tree,"-|")
|
|
|
+
|
|
|
+ def get_soup_objs(self,soup,list_obj=None):
|
|
|
+ if list_obj is None:
|
|
|
+ list_obj = []
|
|
|
+ childs = soup.find_all(recursive=False)
|
|
|
+ for _obj in childs:
|
|
|
+ childs1 = _obj.find_all(recursive=False)
|
|
|
+ if len(childs1)==0 or len(_obj.get_text())<40 or _obj.name=="table":
|
|
|
+ list_obj.append(_obj)
|
|
|
+ elif _obj.name=="p":
|
|
|
+ list_obj.append(_obj)
|
|
|
+ else:
|
|
|
+ self.get_soup_objs(_obj,list_obj)
|
|
|
+ return list_obj
|
|
|
+
|
|
|
+ def fix_tree(self,_product):
|
|
|
+ products = extract_products(self.tree,_product)
|
|
|
+ if len(products)>0:
|
|
|
+ self.tree = self.buildParsetree(self.list_obj,products,self.auto_merge_table)
|
|
|
+
|
|
|
+ def print_tree(self,tree,append="",set_tree_id=None):
|
|
|
+ if set_tree_id is None:
|
|
|
+ set_tree_id = set()
|
|
|
+ if append=="":
|
|
|
+ for t in tree:
|
|
|
+ logger.debug("%s text:%s title:%s title_text:%s before:%s after%s product:%s"%("==>",t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"]))
|
|
|
+
|
|
|
+ for t in tree:
|
|
|
+ _id = id(t)
|
|
|
+ if _id in set_tree_id:
|
|
|
+ continue
|
|
|
+ set_tree_id.add(_id)
|
|
|
+ logger.info("%s text:%s title:%s title_text:%s before:%s after%s product:%s kv:%s"%(append,t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"],str(t["kv"])))
|
|
|
+ childs = t["child_title"]
|
|
|
+ self.print_tree(childs,append=append+"-|",set_tree_id=set_tree_id)
|
|
|
+
|
|
|
+ def is_title_first(self,title):
|
|
|
+ if title in ("一","1","Ⅰ","a","A"):
|
|
|
+ return True
|
|
|
+ return False
|
|
|
+
|
|
|
+ def find_title_by_pattern(self,_text,_pattern="(^|★|▲|:|:|\s+)(?P<title_1>(?P<title_1_index_0_0>第?)(?P<title_1_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_1_index_2_0>[、章册包标部.::]+))|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_3>(?P<title_3_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?)(?P<title_3_index_0_1>[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_3_index_0_2>[、章册包标部.::]+))|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_4>(?P<title_4_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?第?)(?P<title_4_index_1_1>[一二三四五六七八九十]+)(?P<title_4_index_2_0>[节章册部\.::、、]+))|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_5>(?P<title_5_index_0_0>^)(?P<title_5_index_1_1>[一二三四五六七八九十]+)(?P<title_5_index_2_0>)[^一二三四五六七八九十节章册部\.::、])|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_12>(?P<title_12_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_12_index_1_1>\d{1,2})(?P<title_12_index_2_0>[\..、\s\-]?))|"\
|
|
|
+ "([\s★▲\*]*)(?P<title_11>(?P<title_11_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_11_index_1_1>\d{1,2})(?P<title_11_index_2_0>[\..、\s\-]?))|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_10>(?P<title_10_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P<title_10_index_1_1>\d{1,2})(?P<title_10_index_2_0>[\..、\s\-]?))|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_7>(?P<title_7_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..\s\-])(?P<title_7_index_1_1>\d{1,2})(?P<title_7_index_2_0>[\..包标::、\s\-]*))|" \
|
|
|
+ "(^[\s★▲\*]*)(?P<title_6>(?P<title_6_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P<title_6_index_0_1>\d{1,2})(?P<title_6_index_2_0>[\..、\s\-包标]*))|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_15>(?P<title_15_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_15_index_1_1>\d{1,2})(?P<title_15_index_2_0>[))包标\..::、]+))|" \
|
|
|
+ "([\s★▲\*]+)(?P<title_17>(?P<title_17_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_17_index_1_1>[a-zA-Z]+)(?P<title_17_index_2_0>[))包标\..::、]+))|" \
|
|
|
+ "([\s★▲\*]*)(?P<title_19>(?P<title_19_index_0_0>[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P<title_19_index_1_1>[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P<title_19_index_2_0>[))]))"
|
|
|
+ ):
|
|
|
+ _se = re.search(_pattern,_text)
|
|
|
+ groups = []
|
|
|
+ if _se is not None:
|
|
|
+ e = _se.end()
|
|
|
+ if re.search('(时间|日期|编号|账号|号码|手机|价格|\w价|人民币|金额|得分|分值|总分|满分|最高得|扣|减|数量|评委)[::]?\d', _se.group(0)) or (re.search('\d[.::]?$', _se.group(0)) and re.search('^[\d年月日万元天个分秒台条A-Za-z]|^(小时)', _text[e:])):
|
|
|
+ return None
|
|
|
+ elif re.match('[二三四五六七八九十]\w{1,2}[市区县]|五金|四川|八疆|九龙|[一二三四五六七八九十][层天标包]', _text) and re.match('[一二三四五六七八九十]', _se.group(0)): # 289765335 排除三明市等开头作为大纲
|
|
|
+ return None
|
|
|
+ elif re.search('^[\u4e00-\u9fa5]+[::]', _text[:e]):
|
|
|
+ return None
|
|
|
+ _gd = _se.groupdict()
|
|
|
+ for k,v in _gd.items():
|
|
|
+ if v is not None:
|
|
|
+ groups.append((k,v))
|
|
|
+ if len(groups):
|
|
|
+ groups.sort(key=lambda x:x[0])
|
|
|
+ return groups
|
|
|
+ return None
|
|
|
+
|
|
|
+ def make_increase(self,_sort,_title,_add=1):
|
|
|
+ if len(_title)==0 and _add==0:
|
|
|
+ return ""
|
|
|
+ if len(_title)==0 and _add==1:
|
|
|
+ return _sort[0]
|
|
|
+ _index = _sort.index(_title[-1])
|
|
|
+ next_index = (_index+_add)%len(_sort)
|
|
|
+ next_chr = _sort[next_index]
|
|
|
+ if _index==len(_sort)-1:
|
|
|
+ _add = 1
|
|
|
+ else:
|
|
|
+ _add = 0
|
|
|
+ return next_chr+self.make_increase(_sort,_title[:-1],_add)
|
|
|
+
|
|
|
+
|
|
|
+ def get_next_title(self,_title):
|
|
|
+ if re.search("^\d+$",_title) is not None:
|
|
|
+ return str(int(_title)+1)
|
|
|
+ if re.search("^[一二三四五六七八九十百]+$",_title) is not None:
|
|
|
+ if _title[-1]=="十":
|
|
|
+ return _title+"一"
|
|
|
+ if _title[-1]=="百":
|
|
|
+ return _title+"零一"
|
|
|
+
|
|
|
+ if _title[-1]=="九":
|
|
|
+ if len(_title)==1:
|
|
|
+ return "十"
|
|
|
+ if len(_title)==2:
|
|
|
+ if _title[0]=="十":
|
|
|
+ return "二十"
|
|
|
+ if len(_title)==3:
|
|
|
+ if _title[0]=="九":
|
|
|
+ return "一百"
|
|
|
+ else:
|
|
|
+ _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title[0]))
|
|
|
+ return _next_title+"十"
|
|
|
+
|
|
|
+ _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title))
|
|
|
+ _next_title = list(_next_title)
|
|
|
+ _next_title.reverse()
|
|
|
+ if _next_title[-1]!="十":
|
|
|
+ if len(_next_title)>=2:
|
|
|
+ _next_title.insert(-1,'十')
|
|
|
+ if len(_next_title)>=4:
|
|
|
+ _next_title.insert(-3,'百')
|
|
|
+ if _title[0]=="十":
|
|
|
+ if _next_title=="十":
|
|
|
+ _next_title = ["二","十"]
|
|
|
+ _next_title.insert(0,"十")
|
|
|
+ _next_title = "".join(_next_title)
|
|
|
+ return _next_title
|
|
|
+ if re.search("^[a-z]+$",_title) is not None:
|
|
|
+ _next_title = self.make_increase([chr(i+ord('a')) for i in range(26)],_title)
|
|
|
+ _next_title = list(_next_title)
|
|
|
+ _next_title.reverse()
|
|
|
+ return "".join(_next_title)
|
|
|
+ if re.search("^[A-Z]+$",_title) is not None:
|
|
|
+ _next_title = self.make_increase([chr(i+ord('A')) for i in range(26)],_title)
|
|
|
+ _next_title = list(_next_title)
|
|
|
+ _next_title.reverse()
|
|
|
+ return "".join(_next_title)
|
|
|
+ if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$",_title) is not None:
|
|
|
+ _sort = ["Ⅰ","Ⅱ","Ⅲ","Ⅳ","Ⅴ","Ⅵ","Ⅶ","Ⅷ","Ⅸ","Ⅹ","Ⅺ","Ⅻ"]
|
|
|
+ _index = _sort.index(_title)
|
|
|
+ if _index<len(_sort)-1:
|
|
|
+ return _sort[_index+1]
|
|
|
+ return None
|
|
|
+
|
|
|
+ def count_title_before(self,list_obj):
|
|
|
+ dict_before = {}
|
|
|
+ dict_sentence_count = {}
|
|
|
+ illegal_sentence = set()
|
|
|
+ for obj_i in range(len(list_obj)):
|
|
|
+ obj = list_obj[obj_i]
|
|
|
+ _type = "sentence"
|
|
|
+ _text = obj.text.strip()
|
|
|
+ if obj.name=="table":
|
|
|
+ _type = "table"
|
|
|
+ _text = str(obj)
|
|
|
+ _append = False
|
|
|
+
|
|
|
+
|
|
|
+ if _type=="sentence":
|
|
|
+ if len(_text)>10 and len(_text)<100:
|
|
|
+ if _text not in dict_sentence_count:
|
|
|
+ dict_sentence_count[_text] = 0
|
|
|
+ dict_sentence_count[_text] += 1
|
|
|
+ if re.search("\d+页",_text) is not None:
|
|
|
+ illegal_sentence.add(_text)
|
|
|
+ elif len(_text)<10:
|
|
|
+ if re.search("第\d+页",_text) is not None:
|
|
|
+ illegal_sentence.add(_text)
|
|
|
+
|
|
|
+ sentence_groups = self.find_title_by_pattern(_text[:10])
|
|
|
+ if sentence_groups:
|
|
|
+ # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups)
|
|
|
+ sentence_title = sentence_groups[0][0]
|
|
|
+ sentence_title_text = sentence_groups[0][1]
|
|
|
+ title_index = sentence_groups[-2][1]
|
|
|
+ title_before = sentence_groups[1][1].replace("(","(").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
|
|
|
+ title_after = sentence_groups[-1][1].replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".")
|
|
|
+ next_index = self.get_next_title(title_index)
|
|
|
+ if title_before not in dict_before:
|
|
|
+ dict_before[title_before] = 0
|
|
|
+ dict_before[title_before] += 1
|
|
|
+
|
|
|
+ for k,v in dict_sentence_count.items():
|
|
|
+ if v>10:
|
|
|
+ illegal_sentence.add(k)
|
|
|
+ return dict_before,illegal_sentence
|
|
|
+
|
|
|
+ def is_page_no(self,sentence):
|
|
|
+ if len(sentence)<10:
|
|
|
+ if re.search("\d+页|^\-\d+\-$",sentence) is not None:
|
|
|
+ return True
|
|
|
+
|
|
|
+ def block_tree(self,childs):
|
|
|
+ for child in childs:
|
|
|
+
|
|
|
+ if not child["block"]:
|
|
|
+ child["block"] = True
|
|
|
+ childs2 = child["child_title"]
|
|
|
+ self.block_tree(childs2)
|
|
|
+
|
|
|
+
|
|
|
+ def buildParsetree(self,list_obj,products=[],auto_merge_table=True,auto_append=False):
|
|
|
+
|
|
|
+ self.parseTree = None
|
|
|
+ trees = []
|
|
|
+ list_length = []
|
|
|
+ for obj in list_obj[:200]:
|
|
|
+ if obj.name!="table":
|
|
|
+ list_length.append(len(obj.text))
|
|
|
+ if len(list_length)>0:
|
|
|
+ max_length = max(list_length)
|
|
|
+ else:
|
|
|
+ max_length = 40
|
|
|
+ max_length = min(max_length,40)
|
|
|
+
|
|
|
+ logger.debug("%s:%d"%("max_length",max_length))
|
|
|
+
|
|
|
+
|
|
|
+ list_data = []
|
|
|
+ last_table_index = None
|
|
|
+ last_table_columns = None
|
|
|
+ last_table = None
|
|
|
+ dict_before,illegal_sentence = self.count_title_before(list_obj)
|
|
|
+ for obj_i in range(len(list_obj)):
|
|
|
+ obj = list_obj[obj_i]
|
|
|
+
|
|
|
+ # logger.debug("==obj %s"%obj.text[:20])
|
|
|
+
|
|
|
+ _type = "sentence"
|
|
|
+ _text = standard_product(obj.text)
|
|
|
+ if obj.name=="table":
|
|
|
+ _type = "table"
|
|
|
+ _text = standard_product(str(obj))
|
|
|
+ _append = False
|
|
|
+ sentence_title = None
|
|
|
+ sentence_title_text = None
|
|
|
+ sentence_groups = None
|
|
|
+ title_index = None
|
|
|
+ next_index = None
|
|
|
+ parent_title = None
|
|
|
+ title_before = None
|
|
|
+ title_after = None
|
|
|
+ title_next = None
|
|
|
+ childs = []
|
|
|
+ # new
|
|
|
+ sentence_index = obj.sentence_index
|
|
|
+ wordOffset_begin = obj.wordOffset_begin
|
|
|
+ wordOffset_end = obj.wordOffset_end
|
|
|
+ sentences = obj.sentences
|
|
|
+
|
|
|
+ list_kv = obj.get("kv",[])
|
|
|
+
|
|
|
+ table_id = obj.get("table_id")
|
|
|
+
|
|
|
+ list_table = None
|
|
|
+ block = False
|
|
|
+
|
|
|
+ has_product = False
|
|
|
+ position = obj.get("position",{})
|
|
|
+
|
|
|
+ if _type=="sentence":
|
|
|
+ if _text in illegal_sentence:
|
|
|
+ continue
|
|
|
+
|
|
|
+
|
|
|
+ sentence_groups = self.find_title_by_pattern(_text[:10])
|
|
|
+ if sentence_groups:
|
|
|
+ title_before = standard_title_context(sentence_groups[1][1])
|
|
|
+ title_after = sentence_groups[-1][1]
|
|
|
+ sentence_title_text = sentence_groups[0][1]
|
|
|
+ other_text = _text.replace(sentence_title_text,"")
|
|
|
+ if (title_before in dict_before and dict_before[title_before]>1) or title_after!="":
|
|
|
+ sentence_title = sentence_groups[0][0]
|
|
|
+
|
|
|
+ title_index = sentence_groups[-2][1]
|
|
|
+ next_index = self.get_next_title(title_index)
|
|
|
+
|
|
|
+ other_text = _text.replace(sentence_title_text,"")
|
|
|
+
|
|
|
+ for p in products:
|
|
|
+ if other_text.strip()==p.strip():
|
|
|
+ has_product = True
|
|
|
+
|
|
|
+ else:
|
|
|
+ _fix = False
|
|
|
+
|
|
|
+ for p in products:
|
|
|
+ if other_text.strip()==p.strip():
|
|
|
+ title_before = "=产品"
|
|
|
+ sentence_title = "title_0"
|
|
|
+ sentence_title_text = p
|
|
|
+ title_index = "0"
|
|
|
+ title_after = "产品="
|
|
|
+ next_index = "0"
|
|
|
+ _fix = True
|
|
|
+ has_product = True
|
|
|
+ break
|
|
|
+ if not _fix:
|
|
|
+ title_before = None
|
|
|
+ title_after = None
|
|
|
+ sentence_title_text = None
|
|
|
+ else:
|
|
|
+ if len(_text)<40 and re.search(_param_pattern,_text) is not None:
|
|
|
+ for p in products:
|
|
|
+ if _text.find(p)>=0:
|
|
|
+ title_before = "=产品"
|
|
|
+ sentence_title = "title_0"
|
|
|
+ sentence_title_text = p
|
|
|
+ title_index = "0"
|
|
|
+ title_after = "产品="
|
|
|
+ next_index = "0"
|
|
|
+ _fix = True
|
|
|
+ has_product = True
|
|
|
+ break
|
|
|
+
|
|
|
+ # 合并两个非标题句子 20241106 注销,由于 485441521 招标内容结束位置不对
|
|
|
+ if auto_append:
|
|
|
+ if _type=="sentence":
|
|
|
+ if sentence_title is None and len(list_data)>0 and list_data[-1]["sentence_title"] is not None and list_data[-1]["line_width"]>=max_length*0.6:
|
|
|
+ list_data[-1]["text"] += _text
|
|
|
+ list_data[-1]["line_width"] = len(_text)
|
|
|
+ update_kv_span(list_kv,len(_text))
|
|
|
+ list_data[-1]["kv"].extend(list_kv)
|
|
|
+ list_data[-1]["sentences"].extend(sentences)
|
|
|
+ _append = True
|
|
|
+ elif sentence_title is None and len(list_data)>0 and _type==list_data[-1]["type"]:
|
|
|
+ if list_data[-1]["line_width"]>=max_length*0.7:
|
|
|
+ list_data[-1]["text"] += _text
|
|
|
+ list_data[-1]["line_width"] = len(_text)
|
|
|
+ update_kv_span(list_kv,len(_text))
|
|
|
+ list_data[-1]["kv"].extend(list_kv)
|
|
|
+ list_data[-1]["sentences"].extend(sentences)
|
|
|
+ _append = True
|
|
|
+
|
|
|
+
|
|
|
+ if not _append:
|
|
|
+ _data = {"type":_type,"tag":obj.get("tag"),"table_id":table_id, "text":_text,"sentences":sentences,"list_table":list_table,
|
|
|
+ "line_width":len(_text),"sentence_title":sentence_title,"title_index":title_index,
|
|
|
+ "sentence_title_text":sentence_title_text,"sentence_groups":sentence_groups,"parent_title":parent_title,
|
|
|
+ "child_title":childs,"title_before":title_before,"title_after":title_after,"title_next":title_next,"next_index":next_index,
|
|
|
+ "block":block,"has_product":has_product,
|
|
|
+ "sentence_index":sentence_index,"wordOffset_begin":wordOffset_begin,"wordOffset_end":wordOffset_end,
|
|
|
+ "kv":list_kv,"position":position
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ if sentence_title is not None:
|
|
|
+ if len(list_data)>0:
|
|
|
+ if self.is_title_first(title_index):
|
|
|
+ for i in range(1,len(list_data)+1):
|
|
|
+ _d = list_data[-i]
|
|
|
+ if _d["sentence_title"] is not None:
|
|
|
+ _data["parent_title"] = _d
|
|
|
+ _d["child_title"].append(_data)
|
|
|
+ break
|
|
|
+ else:
|
|
|
+ _find = False
|
|
|
+ for i in range(1,len(list_data)+1):
|
|
|
+ if _find:
|
|
|
+ break
|
|
|
+ _d = list_data[-i]
|
|
|
+ if _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
|
|
|
+ if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
|
|
|
+ _data["parent_title"] = _d["parent_title"]
|
|
|
+ _d["title_next"] = _data
|
|
|
+ if len(_d["child_title"])>0:
|
|
|
+ _d["child_title"][-1]["title_next"] = ""
|
|
|
+ self.block_tree(_d["child_title"])
|
|
|
+ if _d["parent_title"] is not None:
|
|
|
+ _d["parent_title"]["child_title"].append(_data)
|
|
|
+ _find = True
|
|
|
+ break
|
|
|
+ for i in range(1,len(list_data)+1):
|
|
|
+ if _find:
|
|
|
+ break
|
|
|
+ _d = list_data[-i]
|
|
|
+ if i==1 and not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]:
|
|
|
+ _data["parent_title"] = _d["parent_title"]
|
|
|
+ _d["title_next"] = _data
|
|
|
+ if len(_d["child_title"])>0:
|
|
|
+ _d["child_title"][-1]["title_next"] = ""
|
|
|
+ self.block_tree(_d["child_title"])
|
|
|
+ if _d["parent_title"] is not None:
|
|
|
+ _d["parent_title"]["child_title"].append(_data)
|
|
|
+ _find = True
|
|
|
+ break
|
|
|
+ title_before = standard_title_context(title_before)
|
|
|
+ title_after = standard_title_context(title_after)
|
|
|
+ for i in range(1,len(list_data)+1):
|
|
|
+ if _find:
|
|
|
+ break
|
|
|
+ _d = list_data[-i]
|
|
|
+ if _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
|
|
|
+ if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]:
|
|
|
+ _data["parent_title"] = _d["parent_title"]
|
|
|
+ _d["title_next"] = _data
|
|
|
+ if len(_d["child_title"])>0:
|
|
|
+ _d["child_title"][-1]["title_next"] = ""
|
|
|
+ self.block_tree(_d["child_title"])
|
|
|
+ if _d["parent_title"] is not None:
|
|
|
+ _d["parent_title"]["child_title"].append(_data)
|
|
|
+ _find = True
|
|
|
+ break
|
|
|
+ for i in range(1,len(list_data)+1):
|
|
|
+ if _find:
|
|
|
+ break
|
|
|
+ _d = list_data[-i]
|
|
|
+ if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]):
|
|
|
+ _data["parent_title"] = _d["parent_title"]
|
|
|
+ _d["title_next"] = _data
|
|
|
+ if len(_d["child_title"])>0:
|
|
|
+ _d["child_title"][-1]["title_next"] = ""
|
|
|
+ # self.block_tree(_d["child_title"])
|
|
|
+ if _d["parent_title"] is not None:
|
|
|
+ _d["parent_title"]["child_title"].append(_data)
|
|
|
+ _find = True
|
|
|
+ break
|
|
|
+ for i in range(1,min(len(list_data)+1,20)):
|
|
|
+ if _find:
|
|
|
+ break
|
|
|
+ _d = list_data[-i]
|
|
|
+ if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]):
|
|
|
+ _data["parent_title"] = _d["parent_title"]
|
|
|
+ _d["title_next"] = _data
|
|
|
+ if len(_d["child_title"])>0:
|
|
|
+ _d["child_title"][-1]["title_next"] = ""
|
|
|
+ # self.block_tree(_d["child_title"])
|
|
|
+ if _d["parent_title"] is not None:
|
|
|
+ _d["parent_title"]["child_title"].append(_data)
|
|
|
+ _find = True
|
|
|
+ break
|
|
|
+
|
|
|
+ if not _find:
|
|
|
+ if len(list_data)>0:
|
|
|
+ for i in range(1,len(list_data)+1):
|
|
|
+ _d = list_data[-i]
|
|
|
+ if _d.get("sentence_title") is not None:
|
|
|
+ _data["parent_title"] = _d
|
|
|
+ _d["child_title"].append(_data)
|
|
|
+ break
|
|
|
+
|
|
|
+
|
|
|
+ else:
|
|
|
+ if len(list_data)>0:
|
|
|
+ for i in range(1,len(list_data)+1):
|
|
|
+ _d = list_data[-i]
|
|
|
+ if _d.get("sentence_title") is not None:
|
|
|
+ _data["parent_title"] = _d
|
|
|
+ _d["child_title"].append(_data)
|
|
|
+ break
|
|
|
+
|
|
|
+ list_data.append(_data)
|
|
|
+
|
|
|
+ for _data in list_data:
|
|
|
+
|
|
|
+ childs = _data["child_title"]
|
|
|
+
|
|
|
+ for c_i in range(len(childs)):
|
|
|
+ cdata = childs[c_i]
|
|
|
+ if cdata["has_product"]:
|
|
|
+ continue
|
|
|
+ else:
|
|
|
+ if c_i>0:
|
|
|
+ last_cdata = childs[c_i-1]
|
|
|
+ if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
|
|
|
+ cdata["has_product"] = True
|
|
|
+ if c_i<len(childs)-1:
|
|
|
+ last_cdata = childs[c_i+1]
|
|
|
+ if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
|
|
|
+ cdata["has_product"] = True
|
|
|
+ for c_i in range(len(childs)):
|
|
|
+ cdata = childs[len(childs)-1-c_i]
|
|
|
+ if cdata["has_product"]:
|
|
|
+ continue
|
|
|
+ else:
|
|
|
+ if c_i>0:
|
|
|
+ last_cdata = childs[c_i-1]
|
|
|
+ if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
|
|
|
+ cdata["has_product"] = True
|
|
|
+ if c_i<len(childs)-1:
|
|
|
+ last_cdata = childs[c_i+1]
|
|
|
+ if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]:
|
|
|
+ cdata["has_product"] = True
|
|
|
+
|
|
|
+
|
|
|
+ return list_data
|
|
|
+
|
|
|
+ def get_tree_sentence(self):
|
|
|
+ list_sentence = []
|
|
|
+ for obj in self.tree:
|
|
|
+ list_sentence.extend(obj.get("sentences",[]))
|
|
|
+
|
|
|
+ return list_sentence
|
|
|
+
|
|
|
+
|
|
|
+ def extract_kvs_from_table(self,list_pattern,tree=None,result_kv=None):
|
|
|
+ if result_kv is None:
|
|
|
+ result_kv = [[] for i in list_pattern]
|
|
|
+ try:
|
|
|
+ for pattern in list_pattern:
|
|
|
+ re.compile(pattern)
|
|
|
+ except Exception as e:
|
|
|
+ log("list_pattern error: "+str(e))
|
|
|
+ return result_kv
|
|
|
+ if tree is None:
|
|
|
+ tree = self.tree
|
|
|
+ for obj in tree:
|
|
|
+ is_table = True if obj.get("tag","")=="table" else False
|
|
|
+ if is_table:
|
|
|
+
|
|
|
+ table_id = obj.get("table_id")
|
|
|
+ list_kv = obj.get("kv")
|
|
|
+ for _pi in range(len(list_pattern)):
|
|
|
+ table_kvs = []
|
|
|
+ for _d0 in list_kv:
|
|
|
+ _k = _d0.get("key","")
|
|
|
+ _v = _d0.get("value","")
|
|
|
+ _d = {"key":_k,"value":_v,"position":_d0.get("position",{})}
|
|
|
+ if re.search(list_pattern[_pi],_k) is not None:
|
|
|
+ table_kvs.append(_d)
|
|
|
+ if table_kvs:
|
|
|
+ result_kv[_pi].append({"table_id":table_id,"kv":table_kvs})
|
|
|
+ childs = obj.get("children",[])
|
|
|
+ for child in childs:
|
|
|
+ self.extract_kvs_from_table(list_pattern,child,result_kv)
|
|
|
+ return result_kv
|
|
|
+
|
|
|
+ def extract_kvs_from_sentence(self,list_pattern,tree=None,result_kv=None):
|
|
|
+ if result_kv is None:
|
|
|
+ result_kv = [[] for i in list_pattern]
|
|
|
+ try:
|
|
|
+ for pattern in list_pattern:
|
|
|
+ re.compile(pattern)
|
|
|
+ except Exception as e:
|
|
|
+ log("list_pattern error: "+str(e))
|
|
|
+ return result_kv
|
|
|
+ if tree is None:
|
|
|
+ tree = self.tree
|
|
|
+ for obj in tree:
|
|
|
+ is_table = True if obj.get("tag","")=="table" else False
|
|
|
+ if not is_table:
|
|
|
+ list_kv = obj.get("kv",[])
|
|
|
+ for _pi in range(len(list_pattern)):
|
|
|
+ for _d in list_kv:
|
|
|
+ _k = _d.get("key","")
|
|
|
+ _v = _d.get("value","")
|
|
|
+ if re.search(list_pattern[_pi],_k) is not None:
|
|
|
+ result_kv[_pi].append(_d)
|
|
|
+ return result_kv
|
|
|
+
|
|
|
+ def extract_kvs_from_outline(self,list_pattern,tree=None,result_kv=None):
|
|
|
+ if result_kv is None:
|
|
|
+ result_kv = [[] for i in list_pattern]
|
|
|
+ try:
|
|
|
+ for pattern in list_pattern:
|
|
|
+ re.compile(pattern)
|
|
|
+ except Exception as e:
|
|
|
+ log("list_pattern error: "+str(e))
|
|
|
+ return result_kv
|
|
|
+ if tree is None:
|
|
|
+ tree = self.tree
|
|
|
+ for obj in tree:
|
|
|
+ is_table = True if obj.get("tag","")=="table" else False
|
|
|
+ if not is_table:
|
|
|
+
|
|
|
+ _text = obj["text"]
|
|
|
+
|
|
|
+ for _pi in range(len(list_pattern)):
|
|
|
+
|
|
|
+ sentence_index_from = obj["sentence_index"]
|
|
|
+ sentence_index_to = sentence_index_from
|
|
|
+
|
|
|
+ if re.search(list_pattern[_pi],_text) is not None and obj.get("sentence_title") is not None:
|
|
|
+
|
|
|
+ childs = get_childs([obj])
|
|
|
+ _child_text = ""
|
|
|
+ for _child in childs:
|
|
|
+ sentence_index_to = _child["sentence_index"]
|
|
|
+ _child_text+=_child["text"]+"\n"
|
|
|
+ result_kv[_pi].append({"key":_text,"value":_child_text,"from_outline":True,"key_sentence_index_from":sentence_index_from,
|
|
|
+ "key_sentence_index_to":sentence_index_from,"value_sentence_index_from":sentence_index_from,
|
|
|
+ "value_sentence_index_to":sentence_index_to,})
|
|
|
+ return result_kv
|
|
|
+
|
|
|
+
|
|
|
+ def extract_kv(self,k_pattern,from_sentence=True,from_outline=True,from_table=True):
|
|
|
+ result_kv = []
|
|
|
+ try:
|
|
|
+ re.compile(k_pattern)
|
|
|
+ except Exception as e:
|
|
|
+ log("k_pattern error: "+str(e))
|
|
|
+ traceback.print_exc()
|
|
|
+ return result_kv
|
|
|
+ result_kv = []
|
|
|
+ if from_table:
|
|
|
+ result_kv_table = self.extract_kvs_from_table([k_pattern])
|
|
|
+ for table_d in result_kv_table[0]:
|
|
|
+ table_id = table_d.get("table_id")
|
|
|
+ table_kvs = table_d.get("kv",[])
|
|
|
+ for _d in table_kvs:
|
|
|
+ _d["from_table"] = True
|
|
|
+ result_kv.extend(table_kvs)
|
|
|
+ if from_sentence:
|
|
|
+ result_kv_sentence = self.extract_kvs_from_sentence([k_pattern])
|
|
|
+ for _d in result_kv_sentence[0]:
|
|
|
+ _d["from_sentence"] = True
|
|
|
+ result_kv.extend(result_kv_sentence[0])
|
|
|
+ if from_outline:
|
|
|
+ result_kv_outline = self.extract_kvs_from_outline([k_pattern])
|
|
|
+ for _d in result_kv_outline[0]:
|
|
|
+ _d["from_outline"] = True
|
|
|
+ result_kv.extend(result_kv_outline[0])
|
|
|
+
|
|
|
+ return result_kv
|
|
|
+
|
|
|
+ # def extract_kvs_from_table(self,list_pattern):
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+if __name__ == '__main__':
|
|
|
+ # HTML 文本
|
|
|
+ html_content = """
|
|
|
+<div>
|
|
|
+<div>
|
|
|
+ 工程造价咨询
|
|
|
+ </div>
|
|
|
+<div>
|
|
|
+<div>
|
|
|
+ 关于为【意溪镇四宁村农村人居环境及村道整治提升项目-前期费用结算审核】公开选取【工程造价咨询】机构的公告
|
|
|
+ </div>
|
|
|
+</div>
|
|
|
+<div>
|
|
|
+<p> 2024-12-24 17:30 ,在广东省网上中介服务超市为<a target="_blank" class="markBlue" href="/bdqyhx/216515788655505408.html" style="color: #3083EB !important;text-decoration: underline;">潮州市湘桥区意溪镇四宁村民委员会</a> 公开选取工程造价咨询中介服务机构,现将相关事项公告如下: </p>
|
|
|
+<p>此项目采用多选一的直接选取方式,项目业主将在报名的若干家中介机构中,自主选定一家作为中选机构,未被选中的机构不应有任何异议。</p>
|
|
|
+<ul>
|
|
|
+<li> <b>项目业主</b>
|
|
|
+<div data-purorgcode="G19187326" data-tyshxydm="54445102G191873262">
|
|
|
+ <a target="_blank" class="markBlue" href="/bdqyhx/216515788655505408.html" style="color: #3083EB !important;text-decoration: underline;">潮州市湘桥区意溪镇四宁村民委员会</a>
|
|
|
+ </div> </li>
|
|
|
+<li> <b>采购项目名称</b>
|
|
|
+<div data-divisioncode="445100">
|
|
|
+ 意溪镇四宁村农村人居环境及村道整治提升项目-前期费用结算审核
|
|
|
+ </div> </li>
|
|
|
+<li> <b>中介服务事项</b>
|
|
|
+<div data-servicesubjectcode="">
|
|
|
+ 无(属于非行政管理的中介服务项目采购)
|
|
|
+ </div> </li>
|
|
|
+<li> <b>投资审批项目</b>
|
|
|
+<div>
|
|
|
+ 否
|
|
|
+ </div> </li>
|
|
|
+<li> <b>采购项目编码</b>
|
|
|
+<div>
|
|
|
+ 445100G191873262412200638
|
|
|
+ </div> </li>
|
|
|
+<li> <b>项目规模</b>
|
|
|
+<div data-restrictionsforehead="1500000.0" data-restrictionsforeheadtype="amountInvested">
|
|
|
+<p>投资额(¥1,500,000.00元)</p>
|
|
|
+</div> </li>
|
|
|
+<li> <b>所需服务</b>
|
|
|
+<div>
|
|
|
+ 工程造价咨询
|
|
|
+ </div> </li>
|
|
|
+<li> <b>服务内容</b>
|
|
|
+<div>
|
|
|
+ 本次工程为意溪镇四宁村农村人居环境及村道整治提升项目,位于潮州市湘桥区意溪镇四宁村,投资额为1500000.00元,建设内容包括路面便底化约800平方米,黑底化约2150平方米,新安装太阳能路灯14盏等,现已竣工验收完成,根据业主要求,对本项目进行前期费用结算审核(概算编制费2571.86元,施工图审查费5024.5元,设计费54700元,预算编制费5315.66元,工程监理费35275.46元,工程测绘费22634.89元,建设方案编制费10500元),并出具前期费用结算审核报告及定案表。
|
|
|
+ </div> </li>
|
|
|
+<li> <b>中介机构要求</b>
|
|
|
+<div>
|
|
|
+ 仅承诺服务即可
|
|
|
+ </div> </li>
|
|
|
+<li> <b>其他要求说明:</b>
|
|
|
+<div>
|
|
|
+ 无
|
|
|
+ </div> </li>
|
|
|
+<li> <b>服务时限说明</b>
|
|
|
+<div>
|
|
|
+ 无要求,按合同约定。
|
|
|
+ </div> </li>
|
|
|
+<li> <b>服务金额</b>
|
|
|
+<div data-biddingmode="" data-highprice="" data-lowprice="1600.0">
|
|
|
+ ¥1,600.00元
|
|
|
+ </div> </li>
|
|
|
+<li> <b>金额说明</b>
|
|
|
+<div>
|
|
|
+ 按广东省物价局关于调整我省建设工程造价咨询服务收费的复函(粤价函[2011]742号)规定的造价咨询行业收费标准,最终价格以财政审核或第三方工程造价公司审核价为准。
|
|
|
+ </div> </li>
|
|
|
+<li> <b>选取中介服务机构方式</b>
|
|
|
+<div data-selectmodetype="DXYZJXQ">
|
|
|
+ 直接选取
|
|
|
+ </div> </li>
|
|
|
+<li> <b>是否选取中介</b>
|
|
|
+<div>
|
|
|
+ 否
|
|
|
+ </div> </li>
|
|
|
+<li> <b>有无回避情况</b>
|
|
|
+<div>
|
|
|
+ 否
|
|
|
+ </div> </li>
|
|
|
+<li> <b> 截止报名时间 </b>
|
|
|
+<div>
|
|
|
+ 2024-12-24 17:30
|
|
|
+ </div> </li>
|
|
|
+<li> <b>业主单位咨询电话</b>
|
|
|
+<div>
|
|
|
+ <a target="_blank" class="markBlue" href="/bdqyhx/216515788655505408.html" style="color: #3083EB !important;text-decoration: underline;">潮州市湘桥区意溪镇四宁村民委员会</a> (登录后查看)
|
|
|
+ </div> </li>
|
|
|
+<li> <b>采购需求书下载</b>
|
|
|
+<div>
|
|
|
+<a target="_blank" class="markBlue" filelink="277a2f13e4a4d41149766c82adfc8762" href="https://attachment-hub.oss-cn-hangzhou.aliyuncs.com/277a/20230710/2023-07-10/04733/1688982497468.jpg?Expires=1734688561&OSSAccessKeyId=LTAI5tHoEUDSy6FnZjMKsNiZ&Signature=G3SAJQuJlYZ5lOpHNc%2BWHspfDpE%3D" original="https://ygp.gdzwfw.gov.cn/zjfwcs/gd-zjcs-pub/file/downloadfile/PjAttachment/7618f614-ae9c-48cc-97f7-17ffd05d4200" rel="noreferrer">资金说明1.jpg</a>
|
|
|
+<br>
|
|
|
+<a target="_blank" class="markBlue" filelink="3e3795ea0244f1b4cc77123512edd30a" href="https://attachment-hub.oss-cn-hangzhou.aliyuncs.com/3e37/20230710/2023-07-10/04733/1688982505603.jpg?Expires=1734688561&OSSAccessKeyId=LTAI5tHoEUDSy6FnZjMKsNiZ&Signature=ZP8xdy%2F1a%2Blbb%2FOAhFyzjpadprg%3D" original="https://ygp.gdzwfw.gov.cn/zjfwcs/gd-zjcs-pub/file/downloadfile/PjAttachment/beb5d765-4ce3-471f-a290-14a12d9ad64e" rel="noreferrer">资金说明2.jpg</a>
|
|
|
+<br>
|
|
|
+</div> </li>
|
|
|
+</ul>
|
|
|
+</div>
|
|
|
+<p> 广东省网上中介服务超市已经向符合资质条件的在库中介服务机构的业务授权人手机号码和中介专属网页发送通知,诚邀符合资质条件的在库中介服务机构登录中介专属网页进行报名。 </p>
|
|
|
+<p><span>潮州市公共资源交易中心</span><br> <span>2024-12-20</span></p>
|
|
|
+</div>
|
|
|
+ """
|
|
|
+ _tree = html_to_tree(html_content)
|
|
|
+
|
|
|
+
|
|
|
+ _pd = Html2KVTree(html_content)
|
|
|
+ _pd.print_tree(_pd.tree,"-|")
|
|
|
+
|
|
|
+ list_kv = _pd.extract_kv("资质要求")
|
|
|
+ print(list_kv)
|
|
|
+
|
|
|
+ #获取预处理后的所有句子,该句子与kv值对应
|
|
|
+ print(_pd.get_tree_sentence())
|
|
|
+
|
|
|
+ # soup = BeautifulSoup(html_content,"lxml")
|
|
|
+ # table_tree = table_to_tree(soup)
|
|
|
+ # print(json.dumps(table_tree,ensure_ascii=False))
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|