#!/usr/bin/python3 # -*- coding: utf-8 -*- # @Author : bidikeji # @Time : 2021/1/25 0025 16:35 #!/usr/bin/python3 # -*- coding: utf-8 -*- # @Author : bidikeji # @Time : 2020/12/24 0024 15:23 import re import os import time import tensorflow as tf from BiddingKG.dl.common.Utils import * from BiddingKG.dl.common.nerUtils import * from keras.preprocessing.sequence import pad_sequences def decode(logits, trans, sequence_lengths, tag_num): viterbi_sequences = [] for logit, length in zip(logits, sequence_lengths): score = logit[:length] viterbi_seq, viterbi_score = viterbi_decode(score, trans) viterbi_sequences.append(viterbi_seq) return viterbi_sequences class Punish_Extract(): def __init__(self, model_file = os.path.dirname(__file__)+"/models/punish_code.pb"): print('model_file_path:',model_file) self.sess = tf.Session(graph=tf.Graph()) self.code = "" self.punish_dicition = "" self.model_file = model_file #预测编号模型 self.load_model() # 加载处罚编号预测模型 def load_model(self): log("get model of time") with self.sess.as_default(): with self.sess.graph.as_default(): output_graph_def = tf.GraphDef() with open(self.model_file, 'rb') as f: output_graph_def.ParseFromString(f.read()) tf.import_graph_def(output_graph_def, name="") self.sess.run(tf.global_variables_initializer()) self.char_input = self.sess.graph.get_tensor_by_name("char_input:0") self.length = self.sess.graph.get_tensor_by_name("length:0") self.trans = self.sess.graph.get_tensor_by_name("crf_loss/transitons:0") self.logits = self.sess.graph.get_tensor_by_name("CRF/output/logits:0") # 处罚编号预测 def predict_punishCode(self,list_sentences, MAX_AREA=5000): ''' 每个句子预测处罚编号 :param list_sentences: 多篇文章句子列表[[每篇文章句子列表]] :param MAX_AREA: 控制最大每个句子长度,超过截断 :return: 处罚编号字符串,若有多个;号隔开 ''' re_ner = re.compile("12+?3") article_ner_list = [] count = 0 with self.sess.as_default(): with self.sess.graph.as_default(): for sentences in list_sentences: count += 1 # print(count) sentences.sort(key=lambda x: len(x.sentence_text), reverse=True) _begin_index = 0 while True: MAX_LEN = len(sentences[_begin_index].sentence_text) if MAX_LEN > MAX_AREA: MAX_LEN = MAX_AREA _LEN = MAX_AREA // MAX_LEN # sentence_len = [len(sentence.sentence_text) for sentence in sentences[_begin_index:_begin_index+_LEN]] sentence_len = [len(sentence.sentence_text) if len(sentence.sentence_text)<=MAX_LEN else MAX_LEN for sentence in sentences[_begin_index:_begin_index+_LEN]] sentences_x = [] for sentence in sentences[_begin_index:_begin_index+_LEN]: sentence = sentence.sentence_text sentence = list(sentence) sentence2id = [getIndexOfWord(word) for word in sentence] sentences_x.append(sentence2id) sentences_x = pad_sequences(sentences_x, maxlen=MAX_LEN, padding="post", truncating="post") sentences_x = [np.array(x) for x in sentences_x] _logits, _trans = self.sess.run([self.logits, self.trans], feed_dict={self.char_input: np.array(sentences_x), self.length: sentence_len}) viterbi_sequence = decode(logits=_logits, trans=_trans, sequence_lengths=sentence_len, tag_num=4) ner_list = [] for _seq, sentence in zip(viterbi_sequence, sentences[_begin_index:_begin_index+_LEN]): sentence = sentence.sentence_text seq_id = ''.join([str(s) for s in _seq]) if re_ner.search(seq_id): # print("sentence: ",sentence) for _ner in re_ner.finditer(seq_id): start = _ner.start() end = _ner.end() n = sentence[start:end] # print(n,'<==>',start,end) # ner_list.append((n, start, end)) ner_list.append(n) # 改为只返回实体字符 # article_ner_list.append(ner_list) article_ner_list.append(';'.join(set(ner_list))) if _begin_index+_LEN >= len(sentences): break _begin_index += _LEN return article_ner_list[0] # 处罚类型 def get_punishType(self, x1, x2): '''通过文章标题及内容判断文章类别 x1: 标题 x2: 内容 return 类别''' # x1 = x1.replace('(','(').replace(')', ')').replace(' ','') # x2 = x2.replace('(', '(').replace(')', ')').replace(' ', '') '''标题正则''' # 未知公告 unknow = re.compile('采购方式|采购公告|采购招标|磋商公告|谈判公告|交易公告$|征集|征求|招标公告|竞标公告|中标公告|' '成交公告|成交信息|流标公告|废标公告|城市管理考评|决算表|决算|预算|资格考试|招聘|选聘' '|聘请|拟录用|无违规违法|无此项信息|暂无工程投标违法|管理办法|指导意见|无投诉|投诉办法' '公共资源交易情况|绩效评价|考试成绩|付息公告|不动产|办证|印发|转发') #|结果公示 部分是 # 投诉处理 tscl = re.compile('投诉不予[处受]理|投诉不成立|终止投诉|投诉终止|不予受理|投诉事?项?的?处理') # 行政处罚 xzcf = re.compile('行政处罚|行政处理|政处罚|行政裁决|防罚|公罚|医罚|环罚|政罚|文罚|局罚|旅罚|财罚|运罚') # 监督检查 jdjc = re.compile('(监督检查的?问?题?(处理|整改|记分|结果|决定|处罚))|监督处罚|调查处理|监督处理') # 严重违法 yzwf = re.compile('严重违法失信|黑名单|失信名单') # 不良行为 blxw = re.compile('((不良|失信|不诚信|差错|不规范|违规|违约|处罚|违法)(行为|记录|信息))|((违约|违规|违法)(处理|操作|情况|问题))' '|通报批评|记分管理|迟到|早退|缺席|虚假材料|弄虚作假|履职不到位|诚信考核扣分|串通投标' '|审核不通过|码一致|地址一致|扣分处理|扣分通知|扣[0-9]+分|责令整改|信用信息认定书$' '|关于.{,30}的处罚|关于.{,10}的?考评通报|关于.{,30}扣分情况|不规范代理行为' '|(取消|暂停|限制).{,50}((专家|评标|评委|投标|竞价|被抽取|中标|供应商|候选人)资格)' '|(代理服?务?机构).{,10}(扣分)|(专家).{,30}(扣分|记分|处罚)|对.{,30}处理|冻结.{,30}账号') # 其他不良行为 other = re.compile('质疑|代理机构进场交易情况|网上投诉办理|信用奖惩|信用奖罚|进场工作.{,5}考核' '|举报处理|结果无效|成交无效|行政复议') '''正文内容正则''' # 投诉处理 tscl_c = re.compile('(投诉(人|单位)[1-9]?(名称)?[::])|(投诉事项[1-5一二三四五、]*部?分?(成立|予以受理))' '|((驳回|撤回|撤销|终止)[^,。]{,60}(投诉|质疑))') # 行政处罚 xzcf_c = re.compile('((处理依据及结果|处理结果|处罚结果)).*行政处罚|如下行政处罚|行政处罚决定') # 诚信加分 cxjf_c = re.compile('处罚结果.*诚信加分') # 严重违法失信 yzwf_c = re.compile('工商部门严重违法失信起名单|严重违法失信的具体情形') #|严重违法失信的具体情形 # 不良行为 blxw_c = re.compile('(取消|暂停|限制).{,30}((专家|评标|评委|投标|采购|竞价|被抽取|中标|供应商)的?资格)' '|(处罚结果|处罚情况).*(扣[1-9]*分|记分|不良行为|不良记录|不良信用|不诚信|扣除信用' '|诚信档案|信用信息|取消.*资格|口头警告|处罚机关|责令改正|罚款|限制投标|暂扣|禁止' '|暂停|封禁|暂无|行政处罚)|处罚结果' '|处罚主题|禁止参与.{,10}政府采购活动|列入不良行为|处罚如下|如下处罚|违规处罚|处罚违规' '|责令改正|责令整改|处罚依据|进行以下处理|处理依据及结果|处理结果|处罚决定书|' '(不规范|不良|不诚信)行为记录') # 其他不良行为 other_c = re.compile('质疑(人|单位)[1-9]?(名称)?:|公告期内受质疑') if re.search(unknow, x1): return re.search(unknow, x1).group(0), '未知类别' elif re.search(yzwf, x1): return re.search(yzwf, x1).group(0), '严重违法' elif re.search(yzwf_c, x2): return re.search(yzwf_c, x2).group(0), '严重违法' elif re.search(tscl, x1): return re.search(tscl, x1).group(0), '投诉处理' elif re.search(xzcf, x1): return re.search(xzcf, x1).group(0), '行政处罚' elif re.search(jdjc, x1): return re.search(jdjc, x1).group(0), '监督检查' elif re.search(blxw, x1): return re.search(blxw, x1).group(0), '不良行为' elif re.search(other, x1): return re.search(other, x1).group(0), '其他不良行为' elif re.search(tscl_c, x2): return re.search(tscl_c, x2).group(0), '投诉处理' elif re.search(xzcf_c, x2): return re.search(xzcf_c, x2).group(0), '行政处罚' elif re.search(cxjf_c, x2): return re.search(cxjf_c, x2).group(0), '诚信加分' elif re.search(blxw_c, x2): return re.search(blxw_c, x2).group(0), '不良行为' elif re.search(other_c, x2): return re.search(other_c, x2).group(0), '其他不良行为' return ' ', '未知类别' # 处罚决定 def get_punishDecision(self, x, x2): '''通过正则匹配文章内容中的处理决定 x:正文内容 x2: 处罚类别 return 处理决定字符串''' rule1 = re.compile( '(((如下|以下|处理|研究|本机关|我机关|本局|我局)决定)|((决定|处理|处理意见|行政处罚|处罚)(如下|如下))' '|((以下|如下)(决定|处理|处理意见|行政处罚|处罚))|处理依据及结果|处理结果|处罚结果|处罚情况|限制行为' '|整改意见)[::].{5,}') rule2 = re.compile( '(((如下|以下|处理|研究|本机关|我机关|本局|我局)决定)|((决定|处理|处罚|处理意见)(如下|如下))' '|((以下|如下)(决定|处理|处理意见|处罚))|处理依据及结果|处理结果|处罚结果|处罚情况|限制行为' '|处罚内容)[:,,].{10,}') rule3 = re.compile('考评结果:?.*') rule4 = re.compile('(依据|根据)《.*》.*') if x2 == '未知类别': return ' ' elif re.search(rule1, x[-int(len(x)*0.4):]): return re.search(rule1, x[-int(len(x)*0.4):]).group(0) elif re.search(rule1, x[-int(len(x)*0.6):]): return re.search(rule1, x[-int(len(x)*0.6):]).group(0) elif re.search(rule2, x[-int(len(x)*0.7):]): return re.search(rule2, x[-int(len(x)*0.7):]).group(0) elif re.search(rule3, x[-int(len(x)*0.6):]): return re.search(rule3, x[-int(len(x)*0.6):]).group(0) elif re.search(rule4, x[-int(len(x)*0.4):]): return re.search(rule4, x[-int(len(x)*0.4):]).group(0) else: return ' ' # 投诉是否成立 def get_punishWhether(self, x1, x2, x3): '''通过正则匹配处理决定判断投诉是否成立 x1: 处理决定字符串 x2: 正文内容 x3: 处罚类别 return 投诉是否成立''' p1 = re.compile('(投诉|投拆|质疑|举报)(事项|内容|事实)?[^不,。]{,10}(成立|属实|予以受理|予以支持)|责令|废标|(中标|成交)[^,。]{,10}无效' '|取消[^,。]{,60}资格|罚款|重新(组织|开展)?(招标|采购)|投诉成立|被投诉人存在违法违规行为' '|采购活动违法|(中标|评标|成交)结果无效') p2 = re.compile('投诉不予[处受]理|((投诉|投拆|质疑|举报)(事项|内容|事实)?[^,。]{,10}(不成立|情?况?不属实|不予支持|缺乏事实依据))' '|((驳回|撤回|撤销|终止)[^,。]*(投诉|质疑|诉求))|终止[^,。]{,20}(行政裁决|投诉处理|采购活动)|投诉终止|投诉无效' '|予以驳回|不予受理|继续开展采购|被投诉人不存在违法违规行为|中标结果有效|投诉[^,。]{,10}不成立' '|维持被投诉人|不支持[^,。]{,20}投诉|无确凿证据') if x3 != '投诉处理': return ' ' elif re.search(p1, x1): return '投诉成立' elif re.search(p2, x1): return '投诉无效' elif re.search(p1, x2): return '投诉成立' elif re.search(p2, x2): return '投诉无效' return ' ' # 执法机构、处罚时间 def get_institution(self, title, sentences_l, entity_l): ''' 通过判断实体前信息判断改实体是否为执法机构 :param title: 文章标题 :param sentences_l: 单篇公告句子列表 :param entity_l: 单篇公告实体列表 :return: 执法机构及处罚时间字符串,多个的用;号隔开 ''' institutions = [] punishTimes = [] institution_1 = re.compile("(?:处罚执行部门|认定部门|执法机关名称|执法单位|通报部门|处罚机关|处罚部门)[::]") punishTimes_1 = re.compile("(?:处罚日期|限制行为开始时间|曝光开始日期|处罚决定日期|处罚期限|处罚时间|处理日期|公告开始时间)[::]") # 通过实体前面关键词判断是否为执法机构或处罚时间 for ner in entity_l: if ner.entity_type == 'org': left = sentences_l[ner.sentence_index].sentence_text[ max(0, ner.wordOffset_begin - 15):ner.wordOffset_begin] if institution_1.search(left): institutions.append(ner) elif institutions != [] and ner.sentence_index == institutions[-1].sentence_index and \ ner.wordOffset_begin - institutions[-1].wordOffset_end < 2 and \ sentences_l[ner.sentence_index].sentence_text[ ner.wordOffset_begin:institutions[-1].wordOffset_end] \ in ['', '、', '和', '及']: institutions.append(ner) elif ner.entity_type == 'time': left = sentences_l[ner.sentence_index].sentence_text[ max(0, ner.wordOffset_begin - 15):ner.wordOffset_begin] if punishTimes_1.search(left): punishTimes.append(ner) institution_title = re.compile("财政局|财政厅|监督管理局|公管局|公共资源局|委员会") institution_time = re.compile( "(^,?[\d一二三四五六七八九十]{4},?[/年-][\d一二三四五六七八九十]{1,2},?[/月-][\d一二三四五六七八九十]{1,2},?[/日-]?)") ins = "" ptime = "" # 如果前面步骤找不到处罚机构则在标题找实体,并正则检查是否有关键词 if institutions == [] and len(title)>10: title_ners = getNers([title], useselffool=True) if title_ners[0]: for title_ner in title_ners[0]: if title_ner[2] == 'org' and institution_title.search(title_ner[3]): ins = title_ner[3] break if punishTimes == [] or institutions == []: # 如果前面步骤还没找到要素,则通过公司实体后面是否有日期关键词,有则作为处罚机构和处罚时间 for ner in [ner for ner in entity_l if ner.entity_type == 'org'][-5:][::-1]: right = sentences_l[ner.sentence_index].sentence_text[ner.wordOffset_end:ner.wordOffset_end + 16] if institution_time.search(right): if ins == '': ins = ner.entity_text if ptime == '': ptime = institution_time.search(right).group(1) break # 前面步骤都没找到则判断最后一个时间实体是否在文章末尾,是则作为处罚时间 if ptime == '': n_time = [ner for ner in entity_l if ner.entity_type == 'time'] if len(n_time) != 0: ner = n_time[-1] if ner.sentence_index == len(sentences_l) - 1: textLong = len(sentences_l[ner.sentence_index].sentence_text) if ner.wordOffset_end > textLong - 3 and len(ner.entity_text) > 3: ptime = ner.entity_text institutions = [ner.entity_text for ner in institutions] punishTimes = [ner.entity_text for ner in punishTimes] if institutions == [] and ins != "": institutions.append(ins) if punishTimes == [] and ptime != "": punishTimes.append(ptime) return ";".join(institutions), ";".join(punishTimes) # 投诉人、被投诉人、被处罚人 def get_complainant(self, punishType, sentences_l, entity_l): ''' 通过对公告类别、句子列表、实体列表正则寻找投诉人、被投诉人、处罚人 :param punishType: 公告处罚类别 :param sentences_l: 单篇公告句子列表 :param entity_l: 单篇公告实体列表 :return: 投诉人、被投诉人 ''' complainants = [] # 投诉人 punishPeople = [] # 被投诉人、被处罚人 size = 16 # 投诉人、质疑人 complainants_rule1 = re.compile( "(?:[^被]|^)(?:投[诉拆][人方]|质疑[人方]|质疑供应商|质疑单位|疑问[人方]|检举[人方]|举报[人方])[\d一二三四五六七八九十]?(\(.+?\))?(:?,?名称[\d一二三四五六七八九十]?)?(?:[::,]+.{0,3}$|$)") # 被处罚人,被投诉人 punishPeople_rule1 = re.compile( "(被投[诉拆][人方]|被检举[人方]|被举报[人方]|被处罚人|被处罚单位|行政相对人|单位名称|不良行为单位或个人|被查单位|处罚主题|企业|主体|违规对象|违规单位|当事人)[\d一二三四五六七八九十]?(\(.+?\))?(:?,?名称[\d一二三四五六七八九十]?)?(?:[::,]+.{0,3}$|$)") punishPeople_rule2_1 = re.compile(",$") punishPeople_rule2_2 = re.compile("^[::]") punishPeople_rule3_1 = re.compile("(?:关于|对)[^,。]*$") punishPeople_rule3_2 = re.compile("^[^,。]*(?:通报|处罚|披露|处理|信用奖惩|不良行为|不良记录)") punish_l = [] # 处罚实体列表 tmp = [] for ner in [ner for ner in entity_l if ner.entity_type in ['org', 'company', 'person']]: if tmp == []: tmp.append(ner) elif ner.entity_type == tmp[-1].entity_type and ner.sentence_index == tmp[-1].sentence_index and \ ner.wordOffset_begin - tmp[-1].wordOffset_end < 2 \ and sentences_l[ner.sentence_index].sentence_text[ner.wordOffset_begin:tmp[-1].wordOffset_end] in [ '', '、', '和', '及']: tmp.append(ner) elif ner.entity_type in ['org', 'company'] and tmp[-1].entity_type in ['org', 'company'] and \ ner.sentence_index == tmp[-1].sentence_index and ner.wordOffset_begin - tmp[-1].wordOffset_end < 2 \ and sentences_l[ner.sentence_index].sentence_text[ner.wordOffset_begin:tmp[-1].wordOffset_end] in [ '', '、', '和', '及']: tmp.append(ner) else: punish_l.append(tmp) tmp = [ner] for ner_l in punish_l: begin_index = ner_l[0].wordOffset_begin end_index = ner_l[-1].wordOffset_end left = sentences_l[ner_l[0].sentence_index].sentence_text[max(0, begin_index - size):begin_index] right = sentences_l[ner_l[0].sentence_index].sentence_text[end_index:end_index + size] if complainants_rule1.search(left): complainants.append(ner_l) elif punishPeople_rule1.search(left): punishPeople.append(ner_l) elif punishPeople_rule2_1.search(left) and punishPeople_rule2_2.search(right): if punishType == '投诉处理': complainants.append(ner_l) else: punishPeople.append(ner_l) elif punishPeople_rule3_1.search(left) and punishPeople_rule3_2.search(right): punishPeople.append(ner_l) complainants = set([it.entity_text for l in complainants for it in l]) punishPeople = set([it.entity_text for l in punishPeople for it in l]) return ';'.join(complainants), ';'.join(punishPeople) def get_punish_extracts(self,list_articles,list_sentences, list_entitys): list_result = [] for article,list_sentence,list_entity in zip(list_articles,list_sentences,list_entitys): title = article.title text=article.content keyword, punishType = self.get_punishType(title, text) if punishType == "未知类别": punishType = "" # print('处罚类型:',punishType) punish_code = self.predict_punishCode(list_sentences) # print('处罚编号: ',punish_code) institutions, punishTimes = self.get_institution(title, list_sentence, list_entity) # print('执法机构:',institutions, '\n 处罚时间:', punishTimes) punishDecision = self.get_punishDecision(text, punishType) # print('处罚决定:',punishDecision) punishWhether= self.get_punishWhether(punishDecision, text, punishType) # print('投诉是否成立:',punishWhether) complainants, punishPeople = self.get_complainant(punishType, list_sentence, list_entity) # print('投诉人:%s 被投诉人:%s'%(complainants, punishPeople)) punish_dic = {'punish_code':punish_code, 'punishType':punishType, 'punishDecision':punishDecision, 'complainants':complainants, 'punishPeople':punishPeople, 'punishWhether':punishWhether, 'institutions':institutions, 'punishTimes':punishTimes} return {k: v for k, v in punish_dic.items() if v not in ['', ' ']} if __name__ == "__main__": punish = Punish_Extract() import pandas as pd # with open('G:/失信数据/ALLDATA_re2-3.xlsx') as f: # df = pd.read_excel('G:/失信数据/ALLDATA_re2-3.xlsx', index=0)[2:10] # i = 89 # predict('2', df.loc[i, 'PAGE_TITLE'],df.loc[i, 'PAGE_CONTENT']) # i = 92 # predict('2', df.loc[i, 'PAGE_TITLE'],df.loc[i, 'PAGE_CONTENT']) # t1 = time.time() # for i in df.index: # punish_code, punishType, punishDecision, complainants, punishPeople, punishWhether, institutions, punishTimes = \ # get_punish_extracts(i, df.loc[i, 'PAGE_TITLE'], df.loc[i, 'PAGE_CONTENT']) # df.loc[i, '投诉人'] = complainants # df.loc[i, '被投诉人'] = punishPeople # df.loc[i, '执法机构'] = institutions # df.loc[i, '处罚时间'] = punishTimes # df.loc[i, '处罚编号'] = punish_code # print('完成第%d篇'%i) # # df.to_excel('G:/失信数据/ALLDATA_re2-4.xlsx', encoding='utf-8',columns=[['PAGE_TITLE', 'PAGE_CONTENT', # # '关键词', '类别', '处理决定', '投诉是否成立', # # 'DETAILLINK', 'sentences', 'PAGE_TIME', 'complainant', 'punishPeople', # # 'institution', 'punishTime', 'ner_test']]) # t2 = time.time() # # df.to_excel('G:/失信数据/ALLDATA_re2-4.xlsx', encoding='utf-8',columns=['PAGE_TITLE', 'PAGE_CONTENT', # # '关键词', '类别', '处理决定', '投诉是否成立', # # 'DETAILLINK', 'sentences', 'PAGE_TIME', 'complainant', '投诉人', 'punishPeople', '被投诉人', # # 'institution', '执法机构', 'punishTime', '处罚时间', 'ner_test', '处罚编号']) # df.to_excel('G:/失信数据/ALLDATA_re2-4.xlsx', encoding='utf-8',columns=['PAGE_TITLE', 'PAGE_CONTENT', # '关键词', '类别', '处理决定', '投诉是否成立', '投诉人', '被投诉人','执法机构', '处罚时间', '处罚编号', # 'DETAILLINK', 'sentences', 'PAGE_TIME']) # t3 = time.time() # print('处理耗时:%.4f, 保存耗时:%.4f'%(t2-t1, t3-t2)) # s = '''兰州铁路公安局台式计算机比价采购成交公告 比价单号: BJ21012710021020 比价单名称: 兰州铁路公安局台式计算机比价采购 报价截止时间: 2021-01-29 00:00:00 采购单位: 兰州铁路公安局 采购内容: 台式计算机 采购数量: 1 成交供应商名称: 北京未来长盛科技有限公司 成交金额(元): 4896.00 供应商报价详情 序号 是否成交 供应商名称 企业性质 报价商品名称 单价(元) 总报价(元) 排名价(元) 报价时间 1 成交 北京未来长盛科技有限公司 台式计算机HP ProOne 400 G6 V802105105A I3/8G/1T/2G/23.8寸一体机 4896.00 4896.00 4896.00 2021-01-28 17:36:03 2 北京中际远华科贸有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4899.00 4899.00 4899.00 2021-01-27 17:51:48 3 金典高科(北京)科技有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4938.00 4938.00 4938.00 2021-01-28 17:36:07 4 福建兄弟文仪企业服务股份有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4950.00 4950.00 4950.00 2021-01-28 13:56:11 5 北京华信泰博科技有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4985.00 4985.00 4985.00 2021-01-27 17:22:43 6 北京思科派电子产品有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4989.00 4989.00 4989.00 2021-01-27 18:07:55 7 北京伟豪基业信息科技有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4999.00 4999.00 4999.00 2021-01-27 16:57:23 8 兰州亿佳科技有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4999.00 4999.00 4999.00 2021-01-28 14:52:45 9 北京瀚海浩达科技有限公司 联想(Lenovo)启天M62G-D008/I5-9500/8G/1T/2G独显/刻录光驱/win10政府版/21.5寸/三年质保/商用台式计算机 4999.00 4999.00 4999.00 2021-01-28 17:50:20 比价规则: 1.有比价单列表中商品在售的供应商可参与相应比价单的比价。 2.比价截止后,系统自动确定比价结果。 3.有效报价不足三家,系统自动废标。 4.有效报价满足三家,将自动确定最低价成交。 5.如成交供应商超过72小时未确认订单,视为该供应商放弃成交,可顺延至第二名成交。 6.小微企业、残疾人福利性单位、监狱企业将享受国家规定的价格优惠支持政策。 7.生产厂商和销售商同时满足价格支持政策方可享受价格优惠。 # ''' # # list_sentences = [s.split('。')] # punish_code= punish.predict_punishCode( list_sentences) # print(punish_code) # punish_code, punishType, punishDecision, complainants, punishPeople, punishWhether, institutions, punishTimes = \ # get_punish_extracts(text=s) # punish_dic = punish.get_punish_extracts(text=s) # print(punish_dic)