#coding:utf8 from bs4 import BeautifulSoup import json import re import traceback import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) from BiddingKG.dl.interface.Preprocessing import tableToText from uuid import uuid4 def log(msg): ''' @summary:打印信息 ''' logger.info(msg) class DotDict(dict): def __getattr__(self,name): try: return self[name] except KeyError: raise AttributeError("No attribute '%s'" % name) def __setattr__(self,name,value): self[name] = value def get_tables(soup,dict_table = None): is_first = False if dict_table is None: dict_table = {"children":[]} is_first = True if soup and soup.name: childs = soup.contents else: childs = [] # tr+tbody _flag = False if len(childs)>=2: if childs[0].name=="tr" and childs[1].name=="tbody": childs[1].insert(0,copy.copy(childs[0])) childs[0].decompose() _flag = True childs_bak = childs # tbody+tbody _flag = False if soup and soup.name: childs = soup.find_all("tbody",recursive=False) if len(childs)>=2: if childs[0].name=="tbody" and childs[1].name=="tbody": child0_tr = childs[0].find_all("tr",recursive=False) has_td_count = 0 tr_line = None for tr in child0_tr: if len(tr.find_all("td",recursive=False))>0: has_td_count += 1 tr_line = tr if has_td_count==1: childs[1].insert(0,copy.copy(tr_line)) childs[0].decompose() _flag = True childs = childs_bak for child in childs: _d = {"children":[]} if child.name in ("table","tbody"): if len(child.find_all("tr",recursive=False))>0: # _d["table"] = str(child) _d["table"] = child dict_table["children"].append(_d) child_dict_table = get_tables(child,_d) if is_first: if soup.name in ("table","tbody"): if not _flag: if len(soup.find_all("tr",recursive=False))>0: # dict_table["table"] = str(soup) dict_table["table"] = soup dict_table = squeeze_tables(dict_table) return dict_table def squeeze_tables(dict_table): _i = -1 new_children = [] for child in dict_table["children"]: _i += 1 child_table = squeeze_tables(child) if child_table is not None: new_children.append(child_table) if dict_table.get("table") is not None: if len(new_children)>0: dict_table["children"] = new_children else: del dict_table["children"] return dict_table if len(new_children)==1: return new_children[0] if len(new_children)>1: dict_table["children"] = new_children return dict_table return None def table_to_tree(soup,json_obj=None): if json_obj is None: json_obj = DotDict({"tag": "table","children":[]}) dict_table = get_tables(soup) children = dict_table.get("children",[]) for child in children: _d = DotDict({"tag": "table","children":[]}) json_obj["children"].append(_d) table = child.get("table") if table is not None: table_id = str(uuid4()) table_to_tree(table,_d) table = dict_table.get("table") if table is not None: table_id = str(uuid4()) json_obj["table_id"] = table_id soup, kv_list, text = tableToText(table,return_kv=True) _flag = False if soup and soup.name: if soup.contents: _flag = True soup.contents[0].insert_before(table_id) if not _flag: soup.insert_before(table_id) json_obj["text"] = text json_obj["kv"] = kv_list for _d in kv_list: _d["position"] = {"key_begin_sentence":0, "key_begin_sentence_start":_d.get("key_sen_index",0), "key_end_sentence":0, "key_end_sentence_end":_d.get("key_sen_index",0)+len(_d.get("key","")), "value_begin_sentence":0, "value_begin_sentence_start":_d.get("value_sen_index",0), "value_end_sentence":0, "value_end_sentence_end":_d.get("value_sen_index",0)+len(_d.get("value","")) } if "key_sen_index" in _d: _d.pop("key_sen_index") if "value_sen_index" in _d: _d.pop("value_sen_index") return json_obj def update_table_position(table,sentence_index): def get_table_idx_lengths(list_table_id,index): _length = 0 for _d in list_table_id: table_id = _d.get("table_id") idx = _d.get("idx",-1) if idx>=0 and _idx<=index: _length += len(table_id) return _length def get_sentence_index(list_sent_span,idx): list_sent_span.sort(key=lambda x:x[0]) for _i in range(len(list_sent_span)): if list_sent_span[_i][0]<=idx and idx<=list_sent_span[_i][1]: return _i return 0 def get_list_tables(table,list_table=[]): table_id = table.get("table_id") if table_id: list_table.append(table) childs = table.get("children",[]) for child in childs: get_list_tables(child,list_table) return list_table tables = get_list_tables(table) if tables: list_table_id = [] text = tables[0].get("text","") for table in tables: table_id = table.get("table_id") if table_id: _idx = text.find(table_id) list_table_id.append({"table_id":table_id,"idx":_idx}) if _idx>=0: kv_list = table.get("kv",[]) for _d in kv_list: _d["position"]["key_begin_sentence_start"] += _idx _d["position"]["key_end_sentence_end"] += _idx _d["position"]["value_begin_sentence_start"] += _idx _d["position"]["value_end_sentence_end"] += _idx # remove table_id for table in tables: table_id = table.get("table_id") if table_id: kv_list = table.get("kv",[]) for _d in kv_list: _length = get_table_idx_lengths(list_table_id,_d["position"]["key_begin_sentence_start"]) _d["position"]["key_begin_sentence_start"] -= _length _length = get_table_idx_lengths(list_table_id,_d["position"]["key_end_sentence_end"]) _d["position"]["key_end_sentence_end"] -= _length _length = get_table_idx_lengths(list_table_id,_d["position"]["value_begin_sentence_start"]) _d["position"]["value_begin_sentence_start"] -= _length _length = get_table_idx_lengths(list_table_id,_d["position"]["value_end_sentence_end"]) _d["position"]["value_end_sentence_end"] -= _length for table in tables: if table.get("table_id"): text = table.get("text","") for _d in list_table_id: table_id = _d.get("table_id") text = text.replace(table_id,"") table["text"] = text # split sentence text = tables[0].get("text","") list_sentence = str(text).split("。") list_sent_span = [] _begin = 0 for _i in range(len(list_sentence)): list_sentence[_i] += "。" _end = _begin+len(list_sentence[_i]) list_sent_span.append([_begin,_end]) _begin = _end tables[0]["sentences"] = list_sentence for table in tables: kv_list = table.get("kv",[]) for _d in kv_list: key_begin_sentence = get_sentence_index(list_sent_span,_d["position"]["key_begin_sentence_start"]) _d["position"]["key_begin_sentence"] = key_begin_sentence+sentence_index key_end_sentence = get_sentence_index(list_sent_span,_d["position"]["key_end_sentence_end"]) _d["position"]["key_end_sentence"] = key_end_sentence+sentence_index value_begin_sentence = get_sentence_index(list_sent_span,_d["position"]["value_begin_sentence_start"]) _d["position"]["value_begin_sentence"] = value_begin_sentence+sentence_index value_end_sentence = get_sentence_index(list_sent_span,_d["position"]["value_end_sentence_end"]) _d["position"]["value_end_sentence"] = value_end_sentence+sentence_index return sentence_index + len(list_sentence) return sentence_index def tree_reposition(tree,sentence_index=None): if sentence_index is None: sentence_index = 0 wordOffset_begin = 0 wordOffset_end = 0 for obj in tree: is_table = True if obj.get("tag","")=="table" else False if not is_table: sentence_index += 1 obj["sentence_index"] = sentence_index obj["sentences"] = [obj.get("text","")] for _t in obj["sentences"]: wordOffset_end += len(_t) obj["wordOffset_begin"] = wordOffset_begin obj["wordOffset_end"] = wordOffset_end wordOffset_begin = wordOffset_end else: sentence_index += 1 obj["sentence_index"] = sentence_index obj["sentence_index_start"] = sentence_index obj["sentences"] = [obj.get("text","")] sentence_index_end = update_table_position(obj,sentence_index) obj["sentence_index_end"] = sentence_index_end sentence_index = sentence_index_end for _t in obj["sentences"]: wordOffset_end += len(_t) obj["wordOffset_begin"] = wordOffset_begin obj["wordOffset_end"] = wordOffset_end wordOffset_begin = wordOffset_end # 递归地将 DOM 转换为 JSON # 递归地将 DOM 转换为 JSON def dom_to_tree(node): if node.name: # 如果是标签节点 json_obj = DotDict({"tag": node.name}) if node.attrs: json_obj["attributes"] = node.attrs is_table = False if node.name in ("table","tbody"): json_obj = table_to_tree(node) is_table = True if not is_table: children = [] for child in node.contents: _child = dom_to_tree(child) if _child is not None: children.append(_child) if children: json_obj["children"] = children json_obj["name"] = json_obj.get("tag") return json_obj elif node.string and node.string.strip(): # 如果是纯文本节点 return DotDict({"tag":"text","name":"text","text": node.string.strip()}) return None # 忽略空白字符 def tree_pop_parent(tree): if isinstance(tree,list): for child in tree: tree_pop_parent(child) if isinstance(tree,dict): if "parent" in tree: del tree["parent"] for child in tree.get("children",[]): tree_pop_parent(child) def html_to_tree(html_content): # 使用 BeautifulSoup 解析 HTML soup = BeautifulSoup(html_content, "lxml") dom_tree = dom_to_tree(soup) extract_kv_from_tree(dom_tree) list_objs = get_outobjs_from_tree(dom_tree) tree_reposition(list_objs) return dom_tree def print_tree(dom_tree): # 转换为 JSON 格式 tree_pop_parent(dom_tree) json_output = json.dumps(dom_tree,ensure_ascii=False, indent=2) # kv_pattern = "\s*(?P.{,10})[::]\s*(?P[^::。,()]+?)(\s+|$|;|;)(?![\u4e00-\u9fa5]+:)" kv_pattern = r"(?P[\u4e00-\u9fa5]+):\s*(?P[^\s,。();;]+)" def get_kv_pattern(): import re text = """ name: John age: 30 note: invalid; """ # 正则模式 kv_pattern = r"(?P[a-zA-Z]+)[::](?P.+(?!.*[::]))" # 提取匹配 matches = re.findall(kv_pattern, text) # 打印结果 for match in matches: key, value = match print("{%s}: {%s}"%(key,value)) def extract_kv_from_sentence(sentence): list_kv = [] _iter = re.finditer("[::]", sentence) if _iter: list_span = [] for iter in _iter: list_span.append(iter.span()) if len(list_span)==1: _begin,_end = list_span[0] if _begin<20 and _end1: _begin = 0 for _i in range(len(list_span)-1): _end = list_span[_i+1][0] iter = re.search(kv_pattern,sentence[_begin:_end]) _begin = list_span[_i][1] if iter is not None: _d = DotDict({}) _d["key"] = iter.group("key") _d["value"] = iter.group("value") _d["position"] = {"key_begin_sentence":0, "key_begin_sentence_start":iter.span("key")[0], "key_end_sentence":0, "key_end_sentence_end":iter.span("key")[0]+len(_d.get("key","")), "value_begin_sentence":0, "value_begin_sentence_start":iter.span("value")[0], "value_end_sentence":0, "value_end_sentence_end":iter.span("value")[0]+len(_d.get("value","")) } list_kv.append(_d) _begin = list_span[-2][1] _end = len(sentence) iter = re.search(kv_pattern,sentence[_begin:_end]) if iter is not None: _d = DotDict({}) _d["key"] = iter.group("key") _d["value"] = iter.group("value") _d["position"] = {"key_begin_sentence":0, "key_begin_sentence_start":iter.span("key")[0], "key_end_sentence":0, "key_end_sentence_end":iter.span("key")[0]+len(_d.get("key","")), "value_begin_sentence":0, "value_begin_sentence_start":iter.span("value")[0], "value_end_sentence":0, "value_end_sentence_end":iter.span("value")[0]+len(_d.get("value","")) } list_kv.append(_d) # for iter in _iter: # _d = DotDict({}) # _d["key"] = iter.group("key") # _d["value"] = iter.group("value") # _d["key_span"] = iter.span("key") # _d["value_span"] = iter.span("value") # list_kv.append(_d) return list_kv def extract_kv_from_node(node): list_kv = [] _text = node.get("text") if _text: list_kv = extract_kv_from_sentence(_text) node["kv"] = list_kv return list_kv def get_child_text(node): _text = node.get("text","") for child in node.get("children",[]): _text += get_child_text(child) return _text def extract_kv_from_tree(tree): if isinstance(tree,list): _count = 0 has_table = False for child in tree: _c,_t = extract_kv_from_tree(child) _count += _c if _t: has_table = _t return _count,has_table if isinstance(tree,dict): if tree.get("tag","")!="table": childs = tree.get("children",[]) if len(childs)>0: _count = 0 has_table = False for child in childs: _c,_t = extract_kv_from_tree(child) _count += _c if _t: has_table = _t if _count==0: _text = get_child_text(tree) if "children" in tree: del tree["children"] tree["text"] = _text list_kv = extract_kv_from_node(tree) _count = len(list_kv) return _count,has_table if tree.get("tag","")=="p" and not has_table: _text = get_child_text(tree) tree["text"] = _text p_list_kv = extract_kv_from_node(tree) if len(p_list_kv)>=_count: if "children" in tree: del tree["children"] else: tree["text"] = "" return len(p_list_kv),has_table return _count,has_table else: list_kv = extract_kv_from_node(tree) return len(list_kv),False else: return len(tree.get("kv",[])),True return 0,False def update_kv_span(list_kv,append_length): for _d in list_kv: _d["position"] = {"key_begin_sentence":0, "key_begin_sentence_start":_d.get("key_sen_index",0), "key_end_sentence":0, "key_end_sentence_end":_d.get("key_sen_index",0)+len(_d.get("key","")), "value_begin_sentence":0, "value_begin_sentence_start":_d.get("value_sen_index",0), "value_end_sentence":0, "value_end_sentence_end":_d.get("value_sen_index",0)+len(_d.get("value","")) } _d["position"]["key_begin_sentence_start"] += append_length _d["position"]["key_end_sentence_end"] += append_length _d["position"]["value_begin_sentence_start"] += append_length _d["position"]["value_end_sentence_end"] += append_length def get_outobjs_from_tree(tree,list_outobjs=None): is_first = False if list_outobjs is None: list_outobjs = [] is_first = True if isinstance(tree,list): for child in tree: get_outobjs_from_tree(child,list_outobjs) if isinstance(tree,dict): childs = tree.get("children",[]) _text = tree.get("text","") is_table = True if tree.get("tag","")=="table" else False if is_table: list_outobjs.append(tree) else: if _text!="": tree.name = tree.tag list_outobjs.append(tree) for child in childs: get_outobjs_from_tree(child,list_outobjs) return list_outobjs def standard_title_context(_title_context): return _title_context.replace("(","(").replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".").replace(".",".") def standard_product(sentence): return sentence.replace("(","(").replace(")",")") import Levenshtein import copy def jaccard_score(source,target): source_set = set([s for s in source]) target_set = set([s for s in target]) if len(source_set)==0 or len(target_set)==0: return 0 return max(len(source_set&target_set)/len(source_set),len(source_set&target_set)/len(target_set)) def judge_pur_chinese(keyword): """ 中文字符的编码范围为: u'\u4e00' -- u'\u9fff:只要在此范围内就可以判断为中文字符串 @param keyword: @return: """ # 定义一个需要删除的标点符号字符串列表 remove_chars = '[·’!"\#$%&\'()#!()*+,-./:;<=>?\@,:?¥★、….>【】[]《》?“”‘’\[\\]^_`{|}~]+' # 利用re.sub来删除中文字符串中的标点符号 strings = re.sub(remove_chars, "", keyword) # 将keyword中文字符串中remove_chars中包含的标点符号替换为空字符串 for ch in strings: if u'\u4e00' <= ch <= u'\u9fff': pass else: return False return True def is_similar(source,target,_radio=None): source = str(source).lower() target = str(target).lower() max_len = max(len(source),len(target)) min_len = min(len(source),len(target)) min_ratio = 90 if min_len>=3: min_ratio = 87 if min_len>=5: min_ratio = 85 if _radio is not None: min_ratio = _radio # dis_len = abs(len(source)-len(target)) # min_dis = min(max_len*0.2,4) if min_len==0 and max_len>0: return False if max_len<=2: if source==target: return True if min_len<2: return False #判断相似度 similar = Levenshtein.ratio(source,target)*100 if similar>=min_ratio: log("%s and %s similar_jaro %d"%(source,target,similar)) return True similar_jaro = Levenshtein.jaro(source,target) if similar_jaro*100>=min_ratio: log("%s and %s similar_jaro %d"%(source,target,similar_jaro*100)) return True similar_jarow = Levenshtein.jaro_winkler(source,target) if similar_jarow*100>=min_ratio: log("%s and %s similar_jaro %d"%(source,target,similar_jarow*100)) return True if min_len>=5: if len(source)==max_len and str(source).find(target)>=0: return True elif len(target)==max_len and target.find(source)>=0: return True elif jaccard_score(source, target)==1 and judge_pur_chinese(source) and judge_pur_chinese(target): return True return False end_pattern = "商务要求|评分标准|商务条件|商务条件" _param_pattern = "(产品|技术|清单|配置|参数|具体|明细|项目|招标|货物|服务|规格|工作|具体)[及和与]?(指标|配置|条件|要求|参数|需求|规格|条款|名称及要求)|配置清单|(质量|技术).{,10}要求|验收标准|^(参数|功能)$" meter_pattern = "[><≤≥±]\d+|\d+(?:[μucmkK微毫千]?[米升LlgGmMΩ]|摄氏度|英寸|度|天|VA|dB|bpm|rpm|kPa|mol|cmH20|%|°|Mpa|Hz|K?HZ|℃|W|min|[*×xX])|[*×xX]\d+|/min|\ds[^a-zA-Z]|GB.{,20}标准|PVC|PP|角度|容积|色彩|自动|流量|外径|轴位|折射率|帧率|柱镜|振幅|磁场|镜片|防漏|强度|允差|心率|倍数|瞳距|底座|色泽|噪音|间距|材质|材料|表面|频率|阻抗|浓度|兼容|防尘|防水|内径|实时|一次性|误差|性能|距离|精确|温度|超温|范围|跟踪|对比度|亮度|[横纵]向|均压|负压|正压|可调|设定值|功能|检测|高度|厚度|宽度|深度|[单双多]通道|效果|指数|模式|尺寸|重量|峰值|谷值|容量|寿命|稳定性|高温|信号|电源|电流|转换率|效率|释放量|转速|离心力|向心力|弯曲|电压|功率|气量|国标|标准协议|灵敏度|最大值|最小值|耐磨|波形|高压|性强|工艺|光源|低压|压力|压强|速度|湿度|重量|毛重|[MLX大中小]+码|净重|颜色|[红橙黄绿青蓝紫]色|不锈钢|输入|输出|噪声|认证|配置" not_meter_pattern = "投标报价|中标金额|商务部分|公章|分值构成|业绩|详见|联系人|联系电话|合同价|金额|采购预算|资金来源|费用|质疑|评审因素|评审标准|商务资信|商务评分|专家论证意见|评标方法|代理服务费|售后服务|评分类型|评分项目|预算金额|得\d+分|项目金额|详见招标文件|乙方" def getTrs(tbody): #获取所有的tr trs = [] if tbody.name=="table": body = tbody.find("tbody",recursive=False) if body is not None: tbody = body objs = tbody.find_all(recursive=False) for obj in objs: if obj.name=="tr": trs.append(obj) if obj.name=="tbody" or obj.name=="table": for tr in obj.find_all("tr",recursive=False): trs.append(tr) return trs def fixSpan(tbody): # 处理colspan, rowspan信息补全问题 #trs = tbody.findChildren('tr', recursive=False) trs = getTrs(tbody) ths_len = 0 ths = list() trs_set = set() #修改为先进行列补全再进行行补全,否则可能会出现表格解析混乱 # 遍历每一个tr for indtr, tr in enumerate(trs): ths_tmp = tr.findChildren('th', recursive=False) #不补全含有表格的tr if len(tr.findChildren('table'))>0: continue if len(ths_tmp) > 0: ths_len = ths_len + len(ths_tmp) for th in ths_tmp: ths.append(th) trs_set.add(tr) # 遍历每行中的element tds = tr.findChildren(recursive=False) for indtd, td in enumerate(tds): # 若有colspan 则补全同一行下一个位置 if 'colspan' in td.attrs: if str(re.sub("[^0-9]","",str(td['colspan'])))!="": col = int(re.sub("[^0-9]","",str(td['colspan']))) if col<100 and len(td.get_text())<1000: td['colspan'] = 1 for i in range(1, col, 1): td.insert_after(copy.copy(td)) for indtr, tr in enumerate(trs): ths_tmp = tr.findChildren('th', recursive=False) #不补全含有表格的tr if len(tr.findChildren('table'))>0: continue if len(ths_tmp) > 0: ths_len = ths_len + len(ths_tmp) for th in ths_tmp: ths.append(th) trs_set.add(tr) # 遍历每行中的element tds = tr.findChildren(recursive=False) for indtd, td in enumerate(tds): # 若有rowspan 则补全下一行同样位置 if 'rowspan' in td.attrs: if str(re.sub("[^0-9]","",str(td['rowspan'])))!="": row = int(re.sub("[^0-9]","",str(td['rowspan']))) td['rowspan'] = 1 for i in range(1, row, 1): # 获取下一行的所有td, 在对应的位置插入 if indtr+i= (indtd) and len(tds1)>0: if indtd > 0: tds1[indtd - 1].insert_after(copy.copy(td)) else: tds1[0].insert_before(copy.copy(td)) elif indtd-2>0 and len(tds1) > 0 and len(tds1) == indtd - 1: # 修正某些表格最后一列没补全 tds1[indtd-2].insert_after(copy.copy(td)) def getTable(tbody): #trs = tbody.findChildren('tr', recursive=False) fixSpan(tbody) trs = getTrs(tbody) inner_table = [] for tr in trs: tr_line = [] tds = tr.findChildren(['td','th'], recursive=False) if len(tds)==0: tr_line.append([re.sub('\xa0','',tr.get_text()),0]) # 2021/12/21 修复部分表格没有td 造成数据丢失 for td in tds: tr_line.append([re.sub('\xa0','',td.get_text()),0]) #tr_line.append([td.get_text(),0]) inner_table.append(tr_line) return inner_table def extract_products(list_data,_product,_param_pattern = "产品名称|设备材料|采购内存|标的名称|采购内容|(标的|维修|系统|报价构成|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品?|采购|物装|配件|资产|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|品目|^品名|气体|标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)[\))的]?([、\w]{,4}名称|内容|描述)|标的|标项|项目$|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品|物装|配件|资产|招标内容|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|菜名|^品目$|^品名$|^名称|^内容$"): _product = standard_product(_product) list_result = [] list_table_products = [] for _data_i in range(len(list_data)): _data = list_data[_data_i] _type = _data["type"] _text = _data["text"] if _type=="table": list_table = _data["list_table"] if list_table is None: continue _check = True max_length = max([len(a) for a in list_table]) min_length = min([len(a) for a in list_table]) if min_length=len(line): continue cell = line[cell_i] cell_text = cell[0] head_cell_text += cell_text # print("===head_cell_text",head_cell_text) if re.search("招标人|采购人|项目编号|项目名称|金额|^\d+$",head_cell_text) is not None: list_head_index = [] for line in list_table: line_text = ",".join([cell[0] for cell in line]) for cell_i in range(len(line)): cell = line[cell_i] cell_text = cell[0] if cell_text is not None and _product is not None and len(cell_text)=0 and re.search("单价|数量|总价|规格|品牌|型号|用途|要求|采购量",line_text) is not None: list_head_index.append(cell_i) list_head_index = list(set(list_head_index)) if len(list_head_index)>0: has_number = False for cell_i in list_head_index: table_products = [] for line_i in range(_begin_index,len(list_table)): line = list_table[line_i] for _i in range(len(line)): cell = line[_i] cell_text = cell[0] if re.search("^\d+$",cell_text) is not None: has_number = True if cell_i>=len(line): continue cell = line[cell_i] cell_text = cell[0] if re.search(_param_pattern,cell_text) is None or has_number: if re.search("^[\da-zA-Z]+$",cell_text) is None: table_products.append(cell_text) if len(table_products)>0: logger.debug("table products %s"%(str(table_products))) if min([len(x) for x in table_products])>0 and max([len(x) for x in table_products])<=30: if re.search("招标人|代理人|预算|数量|交货期|品牌|产地","".join(table_products)) is None: list_table_products.append(table_products) _find = False for table_products in list_table_products: for _p in table_products: if is_similar(_product,_p,90): _find = True logger.debug("similar table_products %s"%(str(table_products))) list_result = list(set([a for a in table_products if len(a)>1 and len(a)<20 and re.search("费用|预算|合计|金额|万元|运费|^其他$",a) is None])) break if not _find: for table_products in list_table_products: list_result.extend(table_products) list_result = list(set([a for a in list_result if len(a)>1 and len(a)<30 and re.search("费用|预算|合计|金额|万元|运费",a) is None])) return list_result def get_childs(childs, max_depth=None): list_data = [] for _child in childs: list_data.append(_child) childs2 = _child.get("child_title",[]) if len(childs2)>0 and (max_depth==None or max_depth>0): for _child2 in childs2: if max_depth != None: list_data.extend(get_childs([_child2], max_depth-1)) else: list_data.extend(get_childs([_child2], None)) return list_data class Html2KVTree(): def __init__(self,_html,auto_merge_table=True,list_obj = []): if _html is None: _html = "" self.html = _html self.auto_merge_table = auto_merge_table if list_obj: self.list_obj = list_obj else: _tree = html_to_tree(html_content) self.list_obj = get_outobjs_from_tree(_tree) # for obj in self.list_obj: # print("obj",obj.get_text()[:20]) self.tree = self.buildParsetree(self.list_obj,[],auto_merge_table) # #识别目录树 # self.print_tree(self.tree,"-|") def get_soup_objs(self,soup,list_obj=None): if list_obj is None: list_obj = [] childs = soup.find_all(recursive=False) for _obj in childs: childs1 = _obj.find_all(recursive=False) if len(childs1)==0 or len(_obj.get_text())<40 or _obj.name=="table": list_obj.append(_obj) elif _obj.name=="p": list_obj.append(_obj) else: self.get_soup_objs(_obj,list_obj) return list_obj def fix_tree(self,_product): products = extract_products(self.tree,_product) if len(products)>0: self.tree = self.buildParsetree(self.list_obj,products,self.auto_merge_table) def print_tree(self,tree,append="",set_tree_id=None): if set_tree_id is None: set_tree_id = set() if append=="": for t in tree: logger.debug("%s text:%s title:%s title_text:%s before:%s after%s product:%s"%("==>",t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"])) for t in tree: _id = id(t) if _id in set_tree_id: continue set_tree_id.add(_id) logger.info("%s text:%s title:%s title_text:%s before:%s after%s product:%s kv:%s"%(append,t["text"][:50],t["sentence_title"],t["sentence_title_text"],t["title_before"],t["title_after"],t["has_product"],str(t["kv"]))) childs = t["child_title"] self.print_tree(childs,append=append+"-|",set_tree_id=set_tree_id) def is_title_first(self,title): if title in ("一","1","Ⅰ","a","A"): return True return False def find_title_by_pattern(self,_text,_pattern="(^|★|▲|:|:|\s+)(?P(?P第?)(?P[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P[、章册包标部.::]+))|" \ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?)(?P[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P[、章册包标部.::]+))|" \ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?第?)(?P[一二三四五六七八九十]+)(?P[节章册部\.::、、]+))|" \ "([\s★▲\*]*)(?P(?P^)(?P[一二三四五六七八九十]+)(?P)[^一二三四五六七八九十节章册部\.::、])|" \ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P\d{1,2})(?P[\..、\s\-]?))|"\ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P\d{1,2})(?P[\..、\s\-]?))|" \ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P\d{1,2})(?P[\..、\s\-]?))|" \ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?\d{1,2}[\..\s\-])(?P\d{1,2})(?P[\..包标::、\s\-]*))|" \ "(^[\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?包?)(?P\d{1,2})(?P[\..、\s\-包标]*))|" \ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P\d{1,2})(?P[))包标\..::、]+))|" \ "([\s★▲\*]+)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P[a-zA-Z]+)(?P[))包标\..::、]+))|" \ "([\s★▲\*]*)(?P(?P[^一二三四五六七八九十\dⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]{,3}?[((]?)(?P[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P[))]))" ): _se = re.search(_pattern,_text) groups = [] if _se is not None: e = _se.end() if re.search('(时间|日期|编号|账号|号码|手机|价格|\w价|人民币|金额|得分|分值|总分|满分|最高得|扣|减|数量|评委)[::]?\d', _se.group(0)) or (re.search('\d[.::]?$', _se.group(0)) and re.search('^[\d年月日万元天个分秒台条A-Za-z]|^(小时)', _text[e:])): return None elif re.match('[二三四五六七八九十]\w{1,2}[市区县]|五金|四川|八疆|九龙|[一二三四五六七八九十][层天标包]', _text) and re.match('[一二三四五六七八九十]', _se.group(0)): # 289765335 排除三明市等开头作为大纲 return None elif re.search('^[\u4e00-\u9fa5]+[::]', _text[:e]): return None _gd = _se.groupdict() for k,v in _gd.items(): if v is not None: groups.append((k,v)) if len(groups): groups.sort(key=lambda x:x[0]) return groups return None def make_increase(self,_sort,_title,_add=1): if len(_title)==0 and _add==0: return "" if len(_title)==0 and _add==1: return _sort[0] _index = _sort.index(_title[-1]) next_index = (_index+_add)%len(_sort) next_chr = _sort[next_index] if _index==len(_sort)-1: _add = 1 else: _add = 0 return next_chr+self.make_increase(_sort,_title[:-1],_add) def get_next_title(self,_title): if re.search("^\d+$",_title) is not None: return str(int(_title)+1) if re.search("^[一二三四五六七八九十百]+$",_title) is not None: if _title[-1]=="十": return _title+"一" if _title[-1]=="百": return _title+"零一" if _title[-1]=="九": if len(_title)==1: return "十" if len(_title)==2: if _title[0]=="十": return "二十" if len(_title)==3: if _title[0]=="九": return "一百" else: _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title[0])) return _next_title+"十" _next_title = self.make_increase(['一','二','三','四','五','六','七','八','九','十'],re.sub("[十百]",'',_title)) _next_title = list(_next_title) _next_title.reverse() if _next_title[-1]!="十": if len(_next_title)>=2: _next_title.insert(-1,'十') if len(_next_title)>=4: _next_title.insert(-3,'百') if _title[0]=="十": if _next_title=="十": _next_title = ["二","十"] _next_title.insert(0,"十") _next_title = "".join(_next_title) return _next_title if re.search("^[a-z]+$",_title) is not None: _next_title = self.make_increase([chr(i+ord('a')) for i in range(26)],_title) _next_title = list(_next_title) _next_title.reverse() return "".join(_next_title) if re.search("^[A-Z]+$",_title) is not None: _next_title = self.make_increase([chr(i+ord('A')) for i in range(26)],_title) _next_title = list(_next_title) _next_title.reverse() return "".join(_next_title) if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$",_title) is not None: _sort = ["Ⅰ","Ⅱ","Ⅲ","Ⅳ","Ⅴ","Ⅵ","Ⅶ","Ⅷ","Ⅸ","Ⅹ","Ⅺ","Ⅻ"] _index = _sort.index(_title) if _index10 and len(_text)<100: if _text not in dict_sentence_count: dict_sentence_count[_text] = 0 dict_sentence_count[_text] += 1 if re.search("\d+页",_text) is not None: illegal_sentence.add(_text) elif len(_text)<10: if re.search("第\d+页",_text) is not None: illegal_sentence.add(_text) sentence_groups = self.find_title_by_pattern(_text[:10]) if sentence_groups: # c062f53cf83401e671822003d63c1828print("sentence_groups",sentence_groups) sentence_title = sentence_groups[0][0] sentence_title_text = sentence_groups[0][1] title_index = sentence_groups[-2][1] title_before = sentence_groups[1][1].replace("(","(").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".") title_after = sentence_groups[-1][1].replace(")",")").replace(":",":").replace(":",";").replace(",",".").replace(",",".").replace("、",".") next_index = self.get_next_title(title_index) if title_before not in dict_before: dict_before[title_before] = 0 dict_before[title_before] += 1 for k,v in dict_sentence_count.items(): if v>10: illegal_sentence.add(k) return dict_before,illegal_sentence def is_page_no(self,sentence): if len(sentence)<10: if re.search("\d+页|^\-\d+\-$",sentence) is not None: return True def block_tree(self,childs): for child in childs: if not child["block"]: child["block"] = True childs2 = child["child_title"] self.block_tree(childs2) def buildParsetree(self,list_obj,products=[],auto_merge_table=True,auto_append=False): self.parseTree = None trees = [] list_length = [] for obj in list_obj[:200]: if obj.name!="table": list_length.append(len(obj.text)) if len(list_length)>0: max_length = max(list_length) else: max_length = 40 max_length = min(max_length,40) logger.debug("%s:%d"%("max_length",max_length)) list_data = [] last_table_index = None last_table_columns = None last_table = None dict_before,illegal_sentence = self.count_title_before(list_obj) for obj_i in range(len(list_obj)): obj = list_obj[obj_i] # logger.debug("==obj %s"%obj.text[:20]) _type = "sentence" _text = standard_product(obj.text) if obj.name=="table": _type = "table" _text = standard_product(str(obj)) _append = False sentence_title = None sentence_title_text = None sentence_groups = None title_index = None next_index = None parent_title = None title_before = None title_after = None title_next = None childs = [] # new sentence_index = obj.sentence_index wordOffset_begin = obj.wordOffset_begin wordOffset_end = obj.wordOffset_end sentences = obj.sentences list_kv = obj.get("kv",[]) table_id = obj.get("table_id") list_table = None block = False has_product = False position = obj.get("position",{}) if _type=="sentence": if _text in illegal_sentence: continue sentence_groups = self.find_title_by_pattern(_text[:10]) if sentence_groups: title_before = standard_title_context(sentence_groups[1][1]) title_after = sentence_groups[-1][1] sentence_title_text = sentence_groups[0][1] other_text = _text.replace(sentence_title_text,"") if (title_before in dict_before and dict_before[title_before]>1) or title_after!="": sentence_title = sentence_groups[0][0] title_index = sentence_groups[-2][1] next_index = self.get_next_title(title_index) other_text = _text.replace(sentence_title_text,"") for p in products: if other_text.strip()==p.strip(): has_product = True else: _fix = False for p in products: if other_text.strip()==p.strip(): title_before = "=产品" sentence_title = "title_0" sentence_title_text = p title_index = "0" title_after = "产品=" next_index = "0" _fix = True has_product = True break if not _fix: title_before = None title_after = None sentence_title_text = None else: if len(_text)<40 and re.search(_param_pattern,_text) is not None: for p in products: if _text.find(p)>=0: title_before = "=产品" sentence_title = "title_0" sentence_title_text = p title_index = "0" title_after = "产品=" next_index = "0" _fix = True has_product = True break # 合并两个非标题句子 20241106 注销,由于 485441521 招标内容结束位置不对 if auto_append: if _type=="sentence": if sentence_title is None and len(list_data)>0 and list_data[-1]["sentence_title"] is not None and list_data[-1]["line_width"]>=max_length*0.6: list_data[-1]["text"] += _text list_data[-1]["line_width"] = len(_text) update_kv_span(list_kv,len(_text)) list_data[-1]["kv"].extend(list_kv) list_data[-1]["sentences"].extend(sentences) _append = True elif sentence_title is None and len(list_data)>0 and _type==list_data[-1]["type"]: if list_data[-1]["line_width"]>=max_length*0.7: list_data[-1]["text"] += _text list_data[-1]["line_width"] = len(_text) update_kv_span(list_kv,len(_text)) list_data[-1]["kv"].extend(list_kv) list_data[-1]["sentences"].extend(sentences) _append = True if not _append: _data = {"type":_type,"tag":obj.get("tag"),"table_id":table_id, "text":_text,"sentences":sentences,"list_table":list_table, "line_width":len(_text),"sentence_title":sentence_title,"title_index":title_index, "sentence_title_text":sentence_title_text,"sentence_groups":sentence_groups,"parent_title":parent_title, "child_title":childs,"title_before":title_before,"title_after":title_after,"title_next":title_next,"next_index":next_index, "block":block,"has_product":has_product, "sentence_index":sentence_index,"wordOffset_begin":wordOffset_begin,"wordOffset_end":wordOffset_end, "kv":list_kv,"position":position } if sentence_title is not None: if len(list_data)>0: if self.is_title_first(title_index): for i in range(1,len(list_data)+1): _d = list_data[-i] if _d["sentence_title"] is not None: _data["parent_title"] = _d _d["child_title"].append(_data) break else: _find = False for i in range(1,len(list_data)+1): if _find: break _d = list_data[-i] if _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]: if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]: _data["parent_title"] = _d["parent_title"] _d["title_next"] = _data if len(_d["child_title"])>0: _d["child_title"][-1]["title_next"] = "" self.block_tree(_d["child_title"]) if _d["parent_title"] is not None: _d["parent_title"]["child_title"].append(_data) _find = True break for i in range(1,len(list_data)+1): if _find: break _d = list_data[-i] if i==1 and not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==_d["title_before"] and title_after==_d["title_after"]: _data["parent_title"] = _d["parent_title"] _d["title_next"] = _data if len(_d["child_title"])>0: _d["child_title"][-1]["title_next"] = "" self.block_tree(_d["child_title"]) if _d["parent_title"] is not None: _d["parent_title"]["child_title"].append(_data) _find = True break title_before = standard_title_context(title_before) title_after = standard_title_context(title_after) for i in range(1,len(list_data)+1): if _find: break _d = list_data[-i] if _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]): if _d["next_index"]==title_index and _d["title_next"] is None and not _d["block"]: _data["parent_title"] = _d["parent_title"] _d["title_next"] = _data if len(_d["child_title"])>0: _d["child_title"][-1]["title_next"] = "" self.block_tree(_d["child_title"]) if _d["parent_title"] is not None: _d["parent_title"]["child_title"].append(_data) _find = True break for i in range(1,len(list_data)+1): if _find: break _d = list_data[-i] if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]) and title_after==standard_title_context(_d["title_after"]): _data["parent_title"] = _d["parent_title"] _d["title_next"] = _data if len(_d["child_title"])>0: _d["child_title"][-1]["title_next"] = "" # self.block_tree(_d["child_title"]) if _d["parent_title"] is not None: _d["parent_title"]["child_title"].append(_data) _find = True break for i in range(1,min(len(list_data)+1,20)): if _find: break _d = list_data[-i] if not _d["block"] and _d.get("sentence_title")==sentence_title and title_before==standard_title_context(_d["title_before"]): _data["parent_title"] = _d["parent_title"] _d["title_next"] = _data if len(_d["child_title"])>0: _d["child_title"][-1]["title_next"] = "" # self.block_tree(_d["child_title"]) if _d["parent_title"] is not None: _d["parent_title"]["child_title"].append(_data) _find = True break if not _find: if len(list_data)>0: for i in range(1,len(list_data)+1): _d = list_data[-i] if _d.get("sentence_title") is not None: _data["parent_title"] = _d _d["child_title"].append(_data) break else: if len(list_data)>0: for i in range(1,len(list_data)+1): _d = list_data[-i] if _d.get("sentence_title") is not None: _data["parent_title"] = _d _d["child_title"].append(_data) break list_data.append(_data) for _data in list_data: childs = _data["child_title"] for c_i in range(len(childs)): cdata = childs[c_i] if cdata["has_product"]: continue else: if c_i>0: last_cdata = childs[c_i-1] if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]: cdata["has_product"] = True if c_i0: last_cdata = childs[c_i-1] if cdata["sentence_title"] is not None and last_cdata["sentence_title"] is not None and last_cdata["title_before"]==cdata["title_before"] and last_cdata["title_after"]==cdata["title_after"] and last_cdata["has_product"]: cdata["has_product"] = True if c_i
辽宁机电职业技术学院食堂账户开立项目(二次)竞争性磋商公告

项目概况

辽宁机电职业技术学院食堂账户开立项目 采购项目的潜在供应商应在辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)获取采购文件,并于2024年11月26日 14点00分(北京时间)前提交响应文件。

一、项目基本情况

项目编号:LNSY-2024101702

项目名称:辽宁机电职业技术学院食堂账户开立项目

采购方式:竞争性磋商

预算金额:0.000000 万元(人民币)

最高限价(如有):0.000000 万元(人民币)

采购需求:

本项目为丹东市辽宁机电职业技术学院食堂账户开立项目采购项目,银行须提供给学校所需的包括日常资金结算服务、代收代付、转账汇款、账户对帐服务、年检服务、网银操作、安全保障等在内的一切配套基本服务。(具体详见第三章服务需求)

合同履行期限:5年

本项目(不接受 )联合体投标。

二、申请人的资格要求:

1.满足《中华人民共和国政府采购法》第二十二条规定;

2.落实政府采购政策需满足的资格要求:

3.本项目的特定资格要求:投标人应属于在中华人民共和国境内依法设立的国有商业银行、股份制商业银行、邮政储蓄银行、城市商业银行、农村商业银行、农村合作银行及政策性银行,并符合以下条件;(一)在甲方所在地设有分支机构;(二)在甲方所在地范围内依法开展经营活动,内部管理机制健全,具有较强的风险控制能力,近3年内在经营活动中无重大违法违规记录、未发生金融风险及重大违约事件。(三)投标人若为支行,须提供总行或丹东市分行针对本项目唯一授权书。各投标主体不得隶属于同一法人。不接受联合体投标。投标人若为总行或丹东市分行,须提供业务承办银行确认函(明确中标后承办本项目业务的银行名称)。(四)有专人负责办理相关业务。

三、获取采购文件

时间:2024年11月15日 至2024年11月25日,每天上午8:30至11:30,下午13:00至16:30。(北京时间,法定节假日除外)

地点:辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)

方式:现场或电子邮件领取

售价:¥500.0 元(人民币)

四、响应文件提交

截止时间:2024年11月26日 14点00分(北京时间)

地点:辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)

五、开启

时间:2024年11月26日 14点00分(北京时间)

地点:辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)

六、公告期限

自本公告发布之日起3个工作日。

七、其他补充事宜

(一)质疑与投诉

供应商认为自己的权益受到损害的,可以在知道或者应知其权益受到损害之日起七个工作日内,向采购代理机构或采购人提出质疑。

1、接收质疑函方式:书面纸质或电子质疑函

2、质疑函内容、格式:应符合《政府采购质疑和投诉办法》相关规定和财政部制定的《政府采购质疑函范本》格式,详见辽宁政府采购网。

质疑供应商对采购人、采购代理机构的答复不满意,或者采购人、采购代理机构未在规定时间内作出答复的,可以在答复期满后15个工作日内向本级财政部门提起投诉。

(二)购买采购文件时须提供以下材料(以下材料均须加盖单位公章):

  1. 法人或者其他组织的营业执照等主体证明文件或自然人的身份证明复印件(自然人身份证明仅限在自然人作为响应主体时使用);
  2. 法定代表人(或非法人组织负责人)身份证明书原件(附法定代表人身份证复印件)(自然人作为响应主体时不需提供);
  3. 授权委托书原件(附授权委托人身份证复印件)(法定代表人、非法人组织负责人、自然人本人购买采购文件的无需提供);

注:电子邮件方式领取采购文件的供应商,将上述材料加盖公章的扫描件发送至指定邮箱(lnsy9688@163.com)并致电0415-2199688,主题写明“供应商名称、项目名称、联系人、联系电话”,在领取采购文件截止时间前资料审查通过后,代理机构将采购文件电子版发送至供应商邮箱。

八、凡对本次采购提出询问,请按以下方式联系。

1.采购人信息

名 称:辽宁机电职业技术学院     

地址:丹东市振兴区洋河大街30号        

联系方式:王老师0415-3853804      

2.采购代理机构信息

名 称:辽宁顺业工程咨询有限公司            

地 址:辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)            

联系方式:吴平0415-2199688            

3.项目联系方式

项目联系人:吴平

电 话:  04152199688

 

公告概要:
公告信息:
采购项目名称 辽宁机电职业技术学院食堂账户开立项目
品目

服务/金融服务/银行服务/其他银行服务

采购单位 辽宁机电职业技术学院
行政区域 振兴区 公告时间 2024年11月14日 15:25
获取采购文件时间 2024年11月15日至2024年11月25日
每日上午:8:30 至 11:30下午:13:00 至 16:30(北京时间,法定节假日除外)
响应文件递交地点 辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)
响应文件开启时间 2024年11月26日 14:00
响应文件开启地点 辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)
预算金额 ¥0.000000万元(人民币)
联系人及联系方式:
项目联系人 吴平
项目联系电话 04152199688
采购单位 辽宁机电职业技术学院
采购单位地址 丹东市振兴区洋河大街30号
采购单位联系方式 王老师0415-3853804
代理机构名称 辽宁顺业工程咨询有限公司
代理机构地址 辽宁顺业工程咨询有限公司(丹东市振兴区纤维南路1-2-7号)
代理机构联系方式 吴平0415-2199688
""" _tree = html_to_tree(html_content) _pd = Html2KVTree(html_content) _pd.print_tree(_pd.tree,"-|") list_kv = _pd.extract_kv("获取采购文件时间") print(list_kv) #获取预处理后的所有句子,该句子与kv值对应 print(_pd.get_tree_sentence()) # soup = BeautifulSoup(html_content,"lxml") # table_tree = table_to_tree(soup) # print(json.dumps(table_tree,ensure_ascii=False))