punish_rule.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. #!/usr/bin/python3
  2. # -*- coding: utf-8 -*-
  3. # @Author : bidikeji
  4. # @Time : 2020/12/24 0024 15:23
  5. import re
  6. import os
  7. import time
  8. import tensorflow as tf
  9. from BiddingKG.dl.common.Utils import *
  10. from tensorflow.contrib.crf import crf_log_likelihood
  11. from tensorflow.contrib.layers.python.layers import initializers
  12. from keras.preprocessing.sequence import pad_sequences
  13. import BiddingKG.dl.interface.Preprocessing as Preprocessing
  14. from BiddingKG.dl.interface.Preprocessing import *
  15. def decode(logits, trans, sequence_lengths, tag_num):
  16. viterbi_sequences = []
  17. for logit, length in zip(logits, sequence_lengths):
  18. score = logit[:length]
  19. viterbi_seq, viterbi_score = viterbi_decode(score, trans)
  20. viterbi_sequences.append(viterbi_seq)
  21. return viterbi_sequences
  22. class Punish_Extract():
  23. def __init__(self, model_file = os.path.dirname(__file__)+"/models/complaint_code.pb"):
  24. # print('model_file_path:',model_file)
  25. self.sess = tf.Session(graph=tf.Graph())
  26. self.code = ""
  27. self.punish_dicition = ""
  28. self.model_file = model_file #预测编号模型
  29. self.load_model()
  30. # 加载处罚编号预测模型
  31. def load_model(self):
  32. with self.sess.as_default() as sess:
  33. with sess.graph.as_default():
  34. output_graph_def = tf.GraphDef()
  35. with open(self.model_file, 'rb') as f:
  36. output_graph_def.ParseFromString(f.read())
  37. tf.import_graph_def(output_graph_def, name="")
  38. sess.run(tf.global_variables_initializer())
  39. self.char_input = sess.graph.get_tensor_by_name('char_input:0')
  40. self.length = sess.graph.get_tensor_by_name('length:0')
  41. self.trans = sess.graph.get_tensor_by_name('crf_loss/transitons:0')
  42. self.logits = sess.graph.get_tensor_by_name('CRF/output/logits:0')
  43. # 处罚编号预测
  44. def predict_punishCode(self,list_sentences):
  45. re_ner = re.compile("12+?3")
  46. article_ner_list = []
  47. count = 0
  48. with self.sess.as_default() as sess:
  49. with sess.graph.as_default():
  50. for sentences in list_sentences:
  51. count += 1
  52. # print(count)
  53. sentence_len = [len(sentence.sentence_text) for sentence in sentences]
  54. maxlen = max(sentence_len)
  55. sentences_x = []
  56. for sentence in sentences:
  57. sentence = sentence.sentence_text
  58. sentence = list(sentence)
  59. sentence2id = [getIndexOfWord(word) for word in sentence]
  60. sentences_x.append(sentence2id)
  61. sentences_x = pad_sequences(sentences_x, maxlen=maxlen, padding="post", truncating="post")
  62. sentences_x = [np.array(x) for x in sentences_x]
  63. _logits,_trans = self.sess.run([self.logits, self.trans],
  64. feed_dict={self.char_input: np.array(sentences_x), self.length: sentence_len})
  65. viterbi_sequence = decode(logits=_logits, trans=_trans, sequence_lengths=sentence_len, tag_num=4)
  66. ner_list = []
  67. for _seq, sentence in zip(viterbi_sequence, sentences):
  68. sentence = sentence.sentence_text
  69. seq_id = ''.join([str(s) for s in _seq])
  70. if re_ner.search(seq_id):
  71. # print("sentence: ",sentence)
  72. for _ner in re_ner.finditer(seq_id):
  73. start = _ner.start()
  74. end = _ner.end()
  75. n = sentence[start:end]
  76. # print(n,'<==>',start,end)
  77. # ner_list.append((n, start, end))
  78. ner_list.append(n) # 改为只返回实体字符
  79. # article_ner_list.append(ner_list)
  80. article_ner_list.append(';'.join(set(ner_list)))
  81. return article_ner_list[0]
  82. # 处罚类型
  83. def get_punishType(self, x1, x2):
  84. '''通过文章标题及内容判断文章类别
  85. x1: 标题
  86. x2: 内容
  87. return 类别'''
  88. # x1 = x1.replace('(','(').replace(')', ')').replace(' ','')
  89. # x2 = x2.replace('(', '(').replace(')', ')').replace(' ', '')
  90. '''标题正则'''
  91. # 未知公告
  92. unknow = re.compile('采购方式|采购公告|磋商公告|谈判公告|交易公告$|征集|征求|招标公告|竞标公告|中标公告|'
  93. '成交公告|成交信息|流标公告|废标公告|城市管理考评|决算表|决算|预算|资格考试|招聘|选聘'
  94. '|聘请|拟录用|无违规违法|无此项信息|暂无工程投标违法|管理办法|指导意见|无投诉|投诉办法'
  95. '公共资源交易情况|绩效评价|考试成绩|付息公告|不动产|办证|印发|转发') #|结果公示 部分是
  96. # 投诉处理
  97. tscl = re.compile('投诉不予[处受]理|投诉不成立|终止投诉|投诉终止|不予受理|投诉事?项?的?处理')
  98. # 行政处罚
  99. xzcf = re.compile('行政处罚|行政处理|政处罚|行政裁决|防罚|公罚|医罚|环罚|政罚|文罚|局罚|旅罚|财罚|运罚')
  100. # 监督检查
  101. jdjc = re.compile('(监督检查的?问?题?(处理|整改|记分|结果|决定|处罚))|监督处罚|调查处理|监督处理')
  102. # 严重违法
  103. yzwf = re.compile('严重违法失信|黑名单|失信名单')
  104. # 不良行为
  105. blxw = re.compile('((不良|失信|不诚信|差错|不规范|违规|违约|处罚|违法)(行为|记录|信息))|((违约|违规|违法)(处理|操作|情况|问题))'
  106. '|通报批评|记分管理|迟到|早退|缺席|虚假材料|弄虚作假|履职不到位|诚信考核扣分|串通投标'
  107. '|审核不通过|码一致|地址一致|扣分处理|扣分通知|扣[0-9]+分|责令整改|信用信息认定书$'
  108. '|关于.{,30}的处罚|关于.{,10}的?考评通报|关于.{,30}扣分情况|不规范代理行为'
  109. '|(取消|暂停|限制).{,50}((专家|评标|评委|投标|竞价|被抽取|中标|供应商|候选人)资格)'
  110. '|(代理服?务?机构).{,10}(扣分)|(专家).{,30}(扣分|记分|处罚)|对.{,30}处理|冻结.{,30}账号')
  111. # 其他不良行为
  112. other = re.compile('质疑|代理机构进场交易情况|网上投诉办理|信用奖惩|信用奖罚|进场工作.{,5}考核'
  113. '|举报处理|结果无效|成交无效|行政复议')
  114. '''正文内容正则'''
  115. # 投诉处理
  116. tscl_c = re.compile('(投诉(人|单位)[1-9]?(名称)?[::])|(投诉事项[1-5一二三四五、]*部?分?(成立|予以受理))'
  117. '|((驳回|撤回|撤销|终止)[^,。]{,60}(投诉|质疑))')
  118. # 行政处罚
  119. xzcf_c = re.compile('((处理依据及结果|处理结果|处罚结果)).*行政处罚|如下行政处罚|行政处罚决定')
  120. # 诚信加分
  121. cxjf_c = re.compile('处罚结果.*诚信加分')
  122. # 严重违法失信
  123. yzwf_c = re.compile('工商部门严重违法失信起名单|严重违法失信的具体情形') #|严重违法失信的具体情形
  124. # 不良行为
  125. blxw_c = re.compile('(取消|暂停|限制).{,30}((专家|评标|评委|投标|采购|竞价|被抽取|中标|供应商)的?资格)'
  126. '|(处罚结果|处罚情况).*(扣[1-9]*分|记分|不良行为|不良记录|不良信用|不诚信|扣除信用'
  127. '|诚信档案|信用信息|取消.*资格|口头警告|处罚机关|责令改正|罚款|限制投标|暂扣|禁止'
  128. '|暂停|封禁|暂无|行政处罚)|处罚结果'
  129. '|处罚主题|禁止参与.{,10}政府采购活动|列入不良行为|处罚如下|如下处罚|违规处罚|处罚违规'
  130. '|责令改正|责令整改|处罚依据|进行以下处理|处理依据及结果|处理结果|处罚决定书|'
  131. '(不规范|不良|不诚信)行为记录')
  132. # 其他不良行为
  133. other_c = re.compile('质疑(人|单位)[1-9]?(名称)?:|公告期内受质疑')
  134. if re.search(unknow, x1):
  135. return re.search(unknow, x1).group(0), '未知类别'
  136. elif re.search(yzwf, x1):
  137. return re.search(yzwf, x1).group(0), '严重违法'
  138. elif re.search(yzwf_c, x2):
  139. return re.search(yzwf_c, x2).group(0), '严重违法'
  140. elif re.search(tscl, x1):
  141. return re.search(tscl, x1).group(0), '投诉处理'
  142. elif re.search(xzcf, x1):
  143. return re.search(xzcf, x1).group(0), '行政处罚'
  144. elif re.search(jdjc, x1):
  145. return re.search(jdjc, x1).group(0), '监督检查'
  146. elif re.search(blxw, x1):
  147. return re.search(blxw, x1).group(0), '不良行为'
  148. elif re.search(other, x1):
  149. return re.search(other, x1).group(0), '其他不良行为'
  150. elif re.search(tscl_c, x2):
  151. return re.search(tscl_c, x2).group(0), '投诉处理'
  152. elif re.search(xzcf_c, x2):
  153. return re.search(xzcf_c, x2).group(0), '行政处罚'
  154. elif re.search(cxjf_c, x2):
  155. return re.search(cxjf_c, x2).group(0), '诚信加分'
  156. elif re.search(blxw_c, x2):
  157. return re.search(blxw_c, x2).group(0), '不良行为'
  158. elif re.search(other_c, x2):
  159. return re.search(other_c, x2).group(0), '其他不良行为'
  160. return ' ', '未知类别'
  161. # 处罚决定
  162. def get_punishDecision(self, x, x2):
  163. '''通过正则匹配文章内容中的处理决定
  164. x:正文内容
  165. x2: 处罚类别
  166. return 处理决定字符串'''
  167. rule1 = re.compile(
  168. '(((如下|以下|处理|研究|本机关|我机关|本局|我局)决定)|((决定|处理|处理意见|行政处罚|处罚)(如下|如下))'
  169. '|((以下|如下)(决定|处理|处理意见|行政处罚|处罚))|处理依据及结果|处理结果|处罚结果|处罚情况|限制行为'
  170. '|整改意见)[::].{5,}')
  171. rule2 = re.compile(
  172. '(((如下|以下|处理|研究|本机关|我机关|本局|我局)决定)|((决定|处理|处罚|处理意见)(如下|如下))'
  173. '|((以下|如下)(决定|处理|处理意见|处罚))|处理依据及结果|处理结果|处罚结果|处罚情况|限制行为'
  174. '|处罚内容)[:,,].{10,}')
  175. rule3 = re.compile('考评结果:?.*')
  176. rule4 = re.compile('(依据|根据)《.*》.*')
  177. if x2 == '未知类别':
  178. return ' '
  179. elif re.search(rule1, x[-int(len(x)*0.4):]):
  180. return re.search(rule1, x[-int(len(x)*0.4):]).group(0)
  181. elif re.search(rule1, x[-int(len(x)*0.6):]):
  182. return re.search(rule1, x[-int(len(x)*0.6):]).group(0)
  183. elif re.search(rule2, x[-int(len(x)*0.7):]):
  184. return re.search(rule2, x[-int(len(x)*0.7):]).group(0)
  185. elif re.search(rule3, x[-int(len(x)*0.6):]):
  186. return re.search(rule3, x[-int(len(x)*0.6):]).group(0)
  187. elif re.search(rule4, x[-int(len(x)*0.4):]):
  188. return re.search(rule4, x[-int(len(x)*0.4):]).group(0)
  189. else:
  190. return ' '
  191. # 投诉是否成立
  192. def get_punishWhether(self, x1, x2, x3):
  193. '''通过正则匹配处理决定判断投诉是否成立
  194. x1: 处理决定字符串
  195. x2: 正文内容
  196. x3: 处罚类别
  197. return 投诉是否成立'''
  198. p1 = re.compile('(投诉|投拆|质疑|举报)(事项|内容|事实)?[^不,。]{,10}(成立|属实|予以受理|予以支持)|责令|废标|(中标|成交)[^,。]{,10}无效'
  199. '|取消[^,。]{,60}资格|罚款|重新(组织|开展)?(招标|采购)|投诉成立|被投诉人存在违法违规行为'
  200. '|采购活动违法|(中标|评标|成交)结果无效')
  201. p2 = re.compile('投诉不予[处受]理|((投诉|投拆|质疑|举报)(事项|内容|事实)?[^,。]{,10}(不成立|情?况?不属实|不予支持|缺乏事实依据))'
  202. '|((驳回|撤回|撤销|终止)[^,。]*(投诉|质疑|诉求))|终止[^,。]{,20}(行政裁决|投诉处理|采购活动)|投诉终止|投诉无效'
  203. '|予以驳回|不予受理|继续开展采购|被投诉人不存在违法违规行为|中标结果有效|投诉[^,。]{,10}不成立'
  204. '|维持被投诉人|不支持[^,。]{,20}投诉|无确凿证据')
  205. if x3 != '投诉处理':
  206. return ' '
  207. elif re.search(p1, x1):
  208. return '投诉成立'
  209. elif re.search(p2, x1):
  210. return '投诉无效'
  211. elif re.search(p1, x2):
  212. return '投诉成立'
  213. elif re.search(p2, x2):
  214. return '投诉无效'
  215. return ' '
  216. # 执法机构、处罚时间
  217. def get_institution(self, title, sentences_l, entity_l):
  218. '''
  219. 通过判断实体前信息判断改实体是否为执法机构
  220. :param title: 文章标题
  221. :param sentences_l: 单篇公告句子列表
  222. :param entity_l: 单篇公告实体列表
  223. :return: 执法机构及处罚时间字符串,多个的用;号隔开
  224. '''
  225. institutions = []
  226. punishTimes = []
  227. institution_1 = re.compile("(?:处罚执行部门|认定部门|执法机关名称|执法单位|通报部门|处罚机关|处罚部门)[::]")
  228. punishTimes_1 = re.compile("(?:处罚日期|限制行为开始时间|曝光开始日期|处罚决定日期|处罚期限|处罚时间|处理日期|公告开始时间)[::]")
  229. # 通过实体前面关键词判断是否为执法机构或处罚时间
  230. for ner in entity_l:
  231. if ner.entity_type == 'org':
  232. left = sentences_l[ner.sentence_index].sentence_text[
  233. max(0, ner.wordOffset_begin - 15):ner.wordOffset_begin]
  234. if institution_1.search(left):
  235. institutions.append(ner)
  236. elif institutions != [] and ner.sentence_index == institutions[-1].sentence_index and \
  237. ner.wordOffset_begin - institutions[-1].wordOffset_end < 2 and \
  238. sentences_l[ner.sentence_index].sentence_text[
  239. ner.wordOffset_begin:institutions[-1].wordOffset_end] \
  240. in ['', '、', '和', '及']:
  241. institutions.append(ner)
  242. elif ner.entity_type == 'time':
  243. left = sentences_l[ner.sentence_index].sentence_text[
  244. max(0, ner.wordOffset_begin - 15):ner.wordOffset_begin]
  245. if punishTimes_1.search(left):
  246. punishTimes.append(ner)
  247. institution_title = re.compile("财政局|财政厅|监督管理局|公管局|公共资源局|委员会")
  248. institution_time = re.compile(
  249. "(^,?[\d一二三四五六七八九十]{4},?[/年-][\d一二三四五六七八九十]{1,2},?[/月-][\d一二三四五六七八九十]{1,2},?[/日-]?)")
  250. ins = ""
  251. ptime = ""
  252. # 如果前面步骤找不到处罚机构则在标题找实体,并正则检查是否有关键词
  253. if institutions == [] and len(title)>10:
  254. title_ners = getNers([title], useselffool=True)
  255. if title_ners[0]:
  256. for title_ner in title_ners[0]:
  257. if title_ner[2] == 'org' and institution_title.search(title_ner[3]):
  258. ins = title_ner[3]
  259. break
  260. if punishTimes == [] or institutions == []:
  261. # 如果前面步骤还没找到要素,则通过公司实体后面是否有日期关键词,有则作为处罚机构和处罚时间
  262. for ner in [ner for ner in entity_l if ner.entity_type == 'org'][-5:][::-1]:
  263. right = sentences_l[ner.sentence_index].sentence_text[ner.wordOffset_end:ner.wordOffset_end + 16]
  264. if institution_time.search(right):
  265. if ins == '':
  266. ins = ner.entity_text
  267. if ptime == '':
  268. ptime = institution_time.search(right).group(1)
  269. break
  270. # 前面步骤都没找到则判断最后一个时间实体是否在文章末尾,是则作为处罚时间
  271. if ptime == '':
  272. n_time = [ner for ner in entity_l if ner.entity_type == 'time']
  273. if len(n_time) != 0:
  274. ner = n_time[-1]
  275. if ner.sentence_index == len(sentences_l) - 1:
  276. textLong = len(sentences_l[ner.sentence_index].sentence_text)
  277. if ner.wordOffset_end > textLong - 3 and len(ner.entity_text) > 3:
  278. ptime = ner.entity_text
  279. institutions = [ner.entity_text for ner in institutions]
  280. punishTimes = [ner.entity_text for ner in punishTimes]
  281. if institutions == [] and ins != "":
  282. institutions.append(ins)
  283. if punishTimes == [] and ptime != "":
  284. punishTimes.append(ptime)
  285. return ";".join(institutions), ";".join(punishTimes)
  286. # 投诉人、被投诉人、被处罚人
  287. def get_complainant(self, punishType, sentences_l, entity_l):
  288. '''
  289. 通过对公告类别、句子列表、实体列表正则寻找投诉人、被投诉人、处罚人
  290. :param punishType: 公告处罚类别
  291. :param sentences_l: 单篇公告句子列表
  292. :param entity_l: 单篇公告实体列表
  293. :return: 投诉人、被投诉人
  294. '''
  295. complainants = [] # 投诉人
  296. punishPeople = [] # 被投诉人、被处罚人
  297. size = 16
  298. # 投诉人、质疑人
  299. complainants_rule1 = re.compile(
  300. "(?:[^被]|^)(?:投[诉拆][人方]|质疑[人方]|质疑供应商|质疑单位|疑问[人方]|检举[人方]|举报[人方])[\d一二三四五六七八九十]?(\(.+?\))?(:?,?名称[\d一二三四五六七八九十]?)?(?:[::,]+.{0,3}$|$)")
  301. # 被处罚人,被投诉人
  302. punishPeople_rule1 = re.compile(
  303. "(被投[诉拆][人方]|被检举[人方]|被举报[人方]|被处罚人|被处罚单位|行政相对人|单位名称|不良行为单位或个人|被查单位|处罚主题|企业|主体|违规对象|违规单位|当事人)[\d一二三四五六七八九十]?(\(.+?\))?(:?,?名称[\d一二三四五六七八九十]?)?(?:[::,]+.{0,3}$|$)")
  304. punishPeople_rule2_1 = re.compile(",$")
  305. punishPeople_rule2_2 = re.compile("^[::]")
  306. punishPeople_rule3_1 = re.compile("(?:关于|对)[^,。]*$")
  307. punishPeople_rule3_2 = re.compile("^[^,。]*(?:通报|处罚|披露|处理|信用奖惩|不良行为|不良记录)")
  308. punish_l = [] # 处罚实体列表
  309. tmp = []
  310. for ner in [ner for ner in entity_l if ner.entity_type in ['org', 'company', 'person']]:
  311. if tmp == []:
  312. tmp.append(ner)
  313. elif ner.entity_type == tmp[-1].entity_type and ner.sentence_index == tmp[-1].sentence_index and \
  314. ner.wordOffset_begin - tmp[-1].wordOffset_end < 2 \
  315. and sentences_l[ner.sentence_index].sentence_text[ner.wordOffset_begin:tmp[-1].wordOffset_end] in [
  316. '',
  317. '、',
  318. '和',
  319. '及']:
  320. tmp.append(ner)
  321. elif ner.entity_type in ['org', 'company'] and tmp[-1].entity_type in ['org', 'company'] and \
  322. ner.sentence_index == tmp[-1].sentence_index and ner.wordOffset_begin - tmp[-1].wordOffset_end < 2 \
  323. and sentences_l[ner.sentence_index].sentence_text[ner.wordOffset_begin:tmp[-1].wordOffset_end] in [
  324. '',
  325. '、',
  326. '和',
  327. '及']:
  328. tmp.append(ner)
  329. else:
  330. punish_l.append(tmp)
  331. tmp = [ner]
  332. for ner_l in punish_l:
  333. begin_index = ner_l[0].wordOffset_begin
  334. end_index = ner_l[-1].wordOffset_end
  335. left = sentences_l[ner_l[0].sentence_index].sentence_text[max(0, begin_index - size):begin_index]
  336. right = sentences_l[ner_l[0].sentence_index].sentence_text[end_index:end_index + size]
  337. if complainants_rule1.search(left):
  338. complainants.append(ner_l)
  339. elif punishPeople_rule1.search(left):
  340. punishPeople.append(ner_l)
  341. elif punishPeople_rule2_1.search(left) and punishPeople_rule2_2.search(right):
  342. if punishType == '投诉处理':
  343. complainants.append(ner_l)
  344. else:
  345. punishPeople.append(ner_l)
  346. elif punishPeople_rule3_1.search(left) and punishPeople_rule3_2.search(right):
  347. punishPeople.append(ner_l)
  348. complainants = set([it.entity_text for l in complainants for it in l])
  349. punishPeople = set([it.entity_text for l in punishPeople for it in l])
  350. return ';'.join(complainants), ';'.join(punishPeople)
  351. def get_punish_extracts_backup(self, doc_id=' ', title=' ', text=' '):
  352. list_articles, list_sentences, list_entitys, _ = Preprocessing.get_preprocessed([[doc_id, text, "", "", ""]],
  353. useselffool=True)
  354. punish_code = punish.predict_punishCode(list_sentences)
  355. # print('处罚编号: ',punish_code)
  356. institutions, punishTimes = punish.get_institution(title, list_sentences[0], list_entitys[0])
  357. # print('执法机构:',institutions, '\n 处罚时间:', punishTimes)
  358. keyword, punishType = punish.get_punishType(title, text)
  359. # print('处罚类型:',punishType)
  360. punishDecision = punish.get_punishDecision(text, punishType)
  361. # print('处罚决定:',punishDecision)
  362. punishWhether= punish.get_punishWhether(punishDecision, text, punishType)
  363. # print('投诉是否成立:',punishWhether)
  364. complainants, punishPeople = punish.get_complainant(punishType, list_sentences[0], list_entitys[0])
  365. # print('投诉人:%s 被投诉人:%s'%(complainants, punishPeople))
  366. punish_dic = {'punish_code':punish_code,
  367. 'punishType':punishType,
  368. 'punishDecision':punishDecision,
  369. 'complainants':complainants,
  370. 'punishPeople':punishPeople,
  371. 'punishWhether':punishWhether,
  372. 'institutions':institutions,
  373. 'punishTimes':punishTimes}
  374. return punish_dic
  375. # return punish_code, punishType, punishDecision, complainants, punishPeople, punishWhether,institutions, punishTimes
  376. def get_punish_extracts(self,list_sentences, list_entitys, title=' ', text=' '):
  377. keyword, punishType = self.get_punishType(title, text)
  378. if punishType == "未知类别":
  379. return {'punishType':punishType}
  380. # print('处罚类型:',punishType)
  381. punish_code = self.predict_punishCode(list_sentences)
  382. # print('处罚编号: ',punish_code)
  383. institutions, punishTimes = self.get_institution(title, list_sentences[0], list_entitys[0])
  384. # print('执法机构:',institutions, '\n 处罚时间:', punishTimes)
  385. punishDecision = self.get_punishDecision(text, punishType)
  386. # print('处罚决定:',punishDecision)
  387. punishWhether= self.get_punishWhether(punishDecision, text, punishType)
  388. # print('投诉是否成立:',punishWhether)
  389. complainants, punishPeople = self.get_complainant(punishType, list_sentences[0], list_entitys[0])
  390. # print('投诉人:%s 被投诉人:%s'%(complainants, punishPeople))
  391. punish_dic = {'punish_code':punish_code,
  392. 'punishType':punishType,
  393. 'punishDecision':punishDecision,
  394. 'complainants':complainants,
  395. 'punishPeople':punishPeople,
  396. 'punishWhether':punishWhether,
  397. 'institutions':institutions,
  398. 'punishTimes':punishTimes}
  399. return punish_dic
  400. if __name__ == "__main__":
  401. # punish = Punish_Extract(model_file='models/21-0.9990081295021194-0.3647936/model.ckpt')
  402. punish = Punish_Extract()
  403. import pandas as pd
  404. # with open('G:/失信数据/ALLDATA_re2-3.xlsx') as f:
  405. df = pd.read_excel('G:/失信数据/ALLDATA_re2-3.xlsx', index=0)[2:10]
  406. # i = 89
  407. # predict('2', df.loc[i, 'PAGE_TITLE'],df.loc[i, 'PAGE_CONTENT'])
  408. # i = 92
  409. # predict('2', df.loc[i, 'PAGE_TITLE'],df.loc[i, 'PAGE_CONTENT'])
  410. # t1 = time.time()
  411. # for i in df.index:
  412. # punish_code, punishType, punishDecision, complainants, punishPeople, punishWhether, institutions, punishTimes = \
  413. # get_punish_extracts(i, df.loc[i, 'PAGE_TITLE'], df.loc[i, 'PAGE_CONTENT'])
  414. # df.loc[i, '投诉人'] = complainants
  415. # df.loc[i, '被投诉人'] = punishPeople
  416. # df.loc[i, '执法机构'] = institutions
  417. # df.loc[i, '处罚时间'] = punishTimes
  418. # df.loc[i, '处罚编号'] = punish_code
  419. # print('完成第%d篇'%i)
  420. # # df.to_excel('G:/失信数据/ALLDATA_re2-4.xlsx', encoding='utf-8',columns=[['PAGE_TITLE', 'PAGE_CONTENT',
  421. # # '关键词', '类别', '处理决定', '投诉是否成立',
  422. # # 'DETAILLINK', 'sentences', 'PAGE_TIME', 'complainant', 'punishPeople',
  423. # # 'institution', 'punishTime', 'ner_test']])
  424. # t2 = time.time()
  425. # # df.to_excel('G:/失信数据/ALLDATA_re2-4.xlsx', encoding='utf-8',columns=['PAGE_TITLE', 'PAGE_CONTENT',
  426. # # '关键词', '类别', '处理决定', '投诉是否成立',
  427. # # 'DETAILLINK', 'sentences', 'PAGE_TIME', 'complainant', '投诉人', 'punishPeople', '被投诉人',
  428. # # 'institution', '执法机构', 'punishTime', '处罚时间', 'ner_test', '处罚编号'])
  429. # df.to_excel('G:/失信数据/ALLDATA_re2-4.xlsx', encoding='utf-8',columns=['PAGE_TITLE', 'PAGE_CONTENT',
  430. # '关键词', '类别', '处理决定', '投诉是否成立', '投诉人', '被投诉人','执法机构', '处罚时间', '处罚编号',
  431. # 'DETAILLINK', 'sentences', 'PAGE_TIME'])
  432. # t3 = time.time()
  433. # print('处理耗时:%.4f, 保存耗时:%.4f'%(t2-t1, t3-t2))
  434. s = '投诉处理公告,投诉人:张三。编号:厦财企〔2020〕12号,各有关单位:341号。处罚编号:厦财企〔2020〕12号,文章编号:京财采投字(2018)第42号。公告编号:闽建筑招〔2018〕5号。处罚编号:松公管监[2020]2号,'
  435. # list_sentences = [s.split('。')]
  436. # punish_code= punish.predict_punishCode( list_sentences)
  437. # print(punish_code)
  438. # punish_code, punishType, punishDecision, complainants, punishPeople, punishWhether, institutions, punishTimes = \
  439. # get_punish_extracts(text=s)
  440. list_articles, list_sentences, list_entitys, _ = Preprocessing.get_preprocessed([['', s, "", "", ""]],
  441. useselffool=True)
  442. punish_dic = punish.get_punish_extracts(list_sentences, list_entitys,text=s)
  443. print(punish_dic)