test_data_fjs.py 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. #coding:UTF-8
  2. # !/usr/bin/python
  3. # -*- coding: <utf-8> -*-
  4. import ast
  5. import copy
  6. import re
  7. import sys
  8. import os
  9. import time
  10. import codecs
  11. from datetime import datetime
  12. import psycopg2
  13. import pandas as pd
  14. sys.setrecursionlimit(1000000)
  15. sys.path.append(os.path.abspath("../.."))
  16. sys.path.append(os.path.abspath(".."))
  17. # 数据预处理,将数据转为BIO标注类型
  18. def Postgre2Data():
  19. # 连接postgresql数据库
  20. connect = psycopg2.connect(database="iepy", user="iepy_read", password="iepy_read", host="192.168.2.101",
  21. port="5432")
  22. cursor = connect.cursor()
  23. cursor1 = connect.cursor()
  24. # 执行语句:先筛选出已审核通过的用户和时间段
  25. # cursor1.execute("SELECT a.user, begin_time, end_time"
  26. # " FROM corpus_payroll a"
  27. # " ORDER BY a.user")
  28. #
  29. # rows1 = cursor1.fetchall()
  30. # # 循环,根据筛选条件循环查另一条SQL,并保存结果
  31. # result = []
  32. # for row in rows1:
  33. # # 执行语句:取语料库中的文章id,分词结果,句子分割index
  34. # cursor.execute("select human_identifier, tokens, sentences, text, edittime, edituser"
  35. # " from corpus_iedocument"
  36. # " where date(edittime) <= '" + row[2] + "'" +
  37. # " and date(edittime) >= '" + row[1] + "'" +
  38. # " and edituser = '" + row[0] + "'")
  39. # # + " limit 5")
  40. # # cursor.execute("select human_identifier, tokens, sentences, text, edittime, edituser"
  41. # # " from corpus_iedocument"
  42. # # " where date(edittime) >= '" + "2020-08-01" + "'" +
  43. # # " and date(edittime) <= '" + "2020-08-31" + "'" +
  44. # # " and edituser = '" + "test1" + "'" +
  45. # # " limit 10")
  46. #
  47. # # 获取SELECT 返回的元组
  48. # rows = cursor.fetchall()
  49. # for row in rows:
  50. # result.append(row)
  51. result = []
  52. cursor.execute("SELECT human_identifier, tokens, sentences, text, edittime, edituser"
  53. " FROM corpus_iedocument"
  54. " where edituser is not NULL")
  55. # + " limit 30")
  56. rows = cursor.fetchall()
  57. for row in rows:
  58. result.append(row)
  59. print(len(result))
  60. human_identifier = []
  61. tokens = []
  62. sentences = []
  63. text = []
  64. corpus_iedocument = []
  65. for row in result:
  66. human_identifier.append(row[0])
  67. s = row[1]
  68. s = s.replace("[", "").replace("]", "").replace("\'", "")
  69. ss = s.split(", ")
  70. sss = []
  71. for s1 in ss:
  72. sss.append(s1)
  73. tokens.append(sss)
  74. sentences.append(row[2])
  75. text.append(row[3])
  76. corpus_iedocument.append(human_identifier)
  77. corpus_iedocument.append(tokens)
  78. corpus_iedocument.append(sentences)
  79. corpus_iedocument.append(text)
  80. # print(corpus_iedocument[0])
  81. # 循环每个documentid,取出对应标注结果。
  82. # 返回二维列表,第一维是document,第二维是document_id和value
  83. brat_labeledbratannotation = []
  84. for i in range(len(corpus_iedocument[0])):
  85. document = []
  86. document_id = []
  87. value = []
  88. # 执行语句,取brat人工标注库中的文章id,标注的结果
  89. cursor.execute('select document_id, value from brat_bratannotation '
  90. + 'where document_id = \'' + corpus_iedocument[0][i] + '\'')
  91. rows = cursor.fetchall()
  92. for row in rows:
  93. if (row[1][0] != 'T'):
  94. continue
  95. # print(row[1][0])
  96. document_id.append(row[0])
  97. value.append(row[1])
  98. document.append(document_id)
  99. document.append(value)
  100. brat_labeledbratannotation.append(document)
  101. # 关闭游标
  102. cursor.close()
  103. # cursor1.close()
  104. # 关闭数据库连接
  105. connect.close()
  106. return corpus_iedocument, brat_labeledbratannotation
  107. def Text2Csv():
  108. corpus_iedocument, brat_labeledbratannotation = Postgre2Data()
  109. # text_df = pd.DataFrame(columns=("document_id", "text", "value"))
  110. text_list = []
  111. document_id_list = []
  112. manual_BIO_list = []
  113. category_list = []
  114. word_list = []
  115. # 循环:每篇Document
  116. for index in range(len(corpus_iedocument[3])):
  117. text = corpus_iedocument[3][index]
  118. document_id = brat_labeledbratannotation[index][0]
  119. manual_BIO = brat_labeledbratannotation[index][1]
  120. # 循环:处理人工标注的数据,结构化,取联系人类型和单词index,并对数组按单词index排序
  121. for j in range(len(manual_BIO)):
  122. categoryAndIndex = manual_BIO[j].replace(" ", " ").split(" ")[1:]
  123. category = categoryAndIndex[0]
  124. word = categoryAndIndex[-1]
  125. document_id_list.append(document_id[j])
  126. text_list.append(text)
  127. category_list.append(category)
  128. word_list.append(word)
  129. manual_BIO_list.append(categoryAndIndex)
  130. text_dict = {'document_id': document_id_list, 'text': text_list, 'word': word_list, 'category': category_list, 'categoryAndIndex': manual_BIO_list}
  131. text_df = pd.DataFrame(text_dict)
  132. # text_df.columns = ['document_id', 'text', 'word', 'category', 'categoryAndIndex']
  133. text_df.to_csv("C:\\Users\\admin\\Desktop\\text.csv")
  134. return
  135. def Csv2BidwayText():
  136. df = pd.read_csv("C:\\Users\\admin\\Desktop\\text.csv")
  137. df = df[df["category"] == "bidway"]
  138. df.columns = ["index", "category", "categoryAndIndex", "document_id", "text", "word"]
  139. df = df.reset_index()
  140. df = df[["document_id", "text", "categoryAndIndex", "word", "category"]]
  141. df.to_csv("C:\\Users\\admin\\Desktop\\bidway_text.csv")
  142. def Csv2ServiceTimeText():
  143. df = pd.read_csv("C:\\Users\\admin\\Desktop\\text.csv")
  144. df = df[df["category"] == "serviceTime"]
  145. df.columns = ["index", "category", "categoryAndIndex", "document_id", "text", "word"]
  146. df = df.reset_index()
  147. df = df[["document_id", "text", "categoryAndIndex", "word", "category"]]
  148. df.to_csv("C:\\Users\\admin\\Desktop\\serviceTime_text.csv")
  149. def data2BIOData():
  150. corpus_iedocument, brat_labeledbratannotation = Postgre2Data()
  151. # 单词list
  152. words_list = [0]
  153. words_list_all = []
  154. # 单词的BIO标注列表
  155. word_BIO_list_all = []
  156. # 句子列表
  157. sentences_list_all = []
  158. manual_BIO_list = []
  159. # 单词在句子中的index列表
  160. wordInSentencesIndex_list_all = []
  161. # 单词对应的句子的编号:0~句子条数
  162. wordInSentencesNumber_list_all = []
  163. # 单词对应的句子分词token列表
  164. wordInSentencesTokens_list_all = []
  165. # 循环:documment篇数
  166. for i in range(len(corpus_iedocument[0])):
  167. categoryAndIndex_list = []
  168. words_list[0] = corpus_iedocument[1][i]
  169. words_list_all.append(corpus_iedocument[1][i])
  170. manual_BIO_list = brat_labeledbratannotation[i][1]
  171. # 循环:处理人工标注的数据,结构化,取联系人类型和单词index,并对数组按单词index排序
  172. for data in manual_BIO_list:
  173. categoryAndIndex = data.replace(" ", " ").split(" ")[1:4]
  174. categoryAndIndex_list.append(categoryAndIndex)
  175. categoryAndIndex_list = sorted(categoryAndIndex_list, key=lambda c: int(c[1]), reverse=False)
  176. # 循环:将该篇Document的句子分出来
  177. index_begin = 0
  178. formatted_sentence_index = corpus_iedocument[2][i][1:-1].split(",")
  179. sentences_list = []
  180. for index in range(1, len(formatted_sentence_index)):
  181. s = corpus_iedocument[3][i][index_begin: int(formatted_sentence_index[index])]
  182. index_begin = int(formatted_sentence_index[index])
  183. sentences_list.append(s)
  184. sentences_list_all.append(sentences_list)
  185. # 处理数据,成为BIO标注类型,即每个单词都有一个对应的标注
  186. # 对每个人工标注循环找,并对index跨单词进行标注
  187. # 单个单词多个标注就用列表全部存储
  188. # 并对单词输出其所在句子的index,和所在句子的编号
  189. word_BIO_list = [[0]] * len(words_list[0])
  190. # 循环:一篇document中所有Label和下标
  191. for index in range(len(categoryAndIndex_list)):
  192. word_index = 0
  193. # 标识上一个标注是否为B,记录上个标识,并记录最后位置
  194. tag_flag = ""
  195. tag_index = 0
  196. # 单词index标识
  197. word_flag = 0
  198. # 循环:对一个Label和Index循环所有单词
  199. for word in words_list[0]:
  200. if word_index == int(categoryAndIndex_list[index][1]):
  201. # 如果原来有标注的类,就添加;没有则赋值
  202. if word_BIO_list[word_flag][0] != 0 \
  203. and ("B-" + categoryAndIndex_list[index][0]) not in word_BIO_list[word_flag]:
  204. word_BIO_list[word_flag].append("B-" + categoryAndIndex_list[index][0])
  205. else:
  206. word_BIO_list[word_flag] = ["B-" + categoryAndIndex_list[index][0]]
  207. tag_flag = categoryAndIndex_list[index][0]
  208. tag_index = int(categoryAndIndex_list[index][2])
  209. # print(word, " ", "B-"+categoryAndIndex_list[index][0])
  210. elif word_index < tag_index - 1 and tag_flag != "":
  211. if word_BIO_list[word_flag][0] != 0 \
  212. and ("I-" + tag_flag) not in word_BIO_list[word_flag]:
  213. word_BIO_list[word_flag].append("I-" + tag_flag)
  214. else:
  215. word_BIO_list[word_flag] = ["I-" + tag_flag]
  216. word_flag += 1
  217. word_index += len(word)
  218. # 有些空白word
  219. if word is None or word == "":
  220. word_index += 1
  221. # 循环:将其余Label置为O
  222. for index in range(len(word_BIO_list)):
  223. if word_BIO_list[index][0] == 0:
  224. word_BIO_list[index] = ["O"]
  225. word_BIO_list_all.append(word_BIO_list)
  226. # 输出每个单词在句子中的index和在第几条句子;之前的单词index是全文的index。
  227. # 并输出每个单词对应的句子的分词Tokens
  228. wordInSentencesIndex_list = []
  229. wordInSentencesNumber_list = []
  230. wordInSentencesTokens_list = []
  231. sentence_number = 0
  232. sentences_index_list = corpus_iedocument[2][i][1:-1].split(", ")
  233. word_index = 0
  234. # 循环:所有单词
  235. for index in range(len(words_list[0])):
  236. # 判断在第几个句子
  237. # print("word_index", word_index, sentence_number, len(sentences_index_list))
  238. if sentence_number + 1 >= len(sentences_index_list) or word_index < int(
  239. sentences_index_list[sentence_number + 1]):
  240. wordInSentencesNumber_list.append(sentence_number)
  241. else:
  242. sentence_number += 1
  243. if sentence_number >= len(sentences_index_list):
  244. break
  245. wordInSentencesNumber_list.append(sentence_number)
  246. # 输出该单词在该句子的index
  247. if sentences_index_list[sentence_number] == "":
  248. continue
  249. wordInSentence_begin_index = word_index - int(sentences_index_list[sentence_number])
  250. if words_list[0][index] is None or words_list[0][index] == "":
  251. wordInSentence_end_index = wordInSentence_begin_index + 1
  252. else:
  253. wordInSentence_end_index = wordInSentence_begin_index + len(words_list[0][index])
  254. wordInSentencesIndex_list.append(str(wordInSentence_begin_index) + "," + str(wordInSentence_end_index))
  255. # 根据句子编号输出句子Tokens
  256. if wordInSentencesNumber_list[index] < len(sentences_list):
  257. wordInSentencesTokens_list.append(sentences_list[wordInSentencesNumber_list[index]])
  258. else:
  259. wordInSentencesTokens_list.append(sentences_list[-1])
  260. # # 输出该单词在该句子的index
  261. # if sentences_index_list[sentence_number] == "":
  262. # # print("句子序号为'': ")
  263. # # print(sentences_index_list, len(sentences_index_list), sentence_number)
  264. # continue
  265. #
  266. # for j in range(len(sentences_list[wordInSentencesNumber_list[index]])):
  267. #
  268. # wordInSentence_begin_index = word_index - int(sentences_index_list[sentence_number])
  269. # if words_list[0][index] is None or words_list[0][index] == "":
  270. # wordInSentence_end_index = wordInSentence_begin_index + 1
  271. # else:
  272. # wordInSentence_end_index = wordInSentence_begin_index + len(words_list[0][index])
  273. # wordInSentencesIndex_list.append(str(wordInSentence_begin_index) + "," + str(wordInSentence_end_index))
  274. word_index += len(words_list[0][index])
  275. # 有些空白word
  276. if words_list[0][index] is None or words_list[0][index] == "":
  277. word_index += 1
  278. wordInSentencesIndex_list_all.append(wordInSentencesIndex_list)
  279. wordInSentencesNumber_list_all.append(wordInSentencesNumber_list)
  280. wordInSentencesTokens_list_all.append(wordInSentencesTokens_list)
  281. # print("wordInSentencesTokens_list", wordInSentencesTokens_list)
  282. return words_list_all, word_BIO_list_all, wordInSentencesIndex_list_all, wordInSentencesTokens_list_all
  283. def BIOData2TXT():
  284. words_list_all, word_BIO_list_all, \
  285. wordInSentencesIndex_list_all, wordInSentencesTokens_list_all = data2BIOData()
  286. print(words_list_all)
  287. print(type(word_BIO_list_all))
  288. print(len(wordInSentencesIndex_list_all))
  289. print(len(wordInSentencesTokens_list_all))
  290. file = open('C:\\Users\\admin\\Desktop\\BIOData_list.txt', 'w', encoding='utf-8')
  291. file.write(str([words_list_all, word_BIO_list_all, wordInSentencesIndex_list_all, wordInSentencesTokens_list_all]))
  292. file.close()
  293. return
  294. def TXT2BIOData():
  295. start_time = time.time()
  296. file = open('C:\\Users\\admin\\Desktop\\BIOData_list.txt', 'r', encoding='utf-8')
  297. str1 = file.read()
  298. list1 = ast.literal_eval(str1)
  299. file.close()
  300. # print(list1[0])
  301. # print(type(list1[1]))
  302. # print(len(list1[2]))
  303. # print(len(list1[3]))
  304. end_time = time.time()
  305. print("耗时:", end_time-start_time)
  306. return list1[0], list1[1], list1[2], list1[3]
  307. def BIOData2DataFrame():
  308. words_list_all, word_BIO_list_all, _, _ = data2BIOData()
  309. # print(words_list_all)
  310. # print(word_BIO_list_all)
  311. df = pd.DataFrame([words_list_all[0], word_BIO_list_all[0]])
  312. df = df.T
  313. for index in range(len(words_list_all)):
  314. if index == 0:
  315. continue
  316. df = df.append(pd.DataFrame([words_list_all[index], word_BIO_list_all[index]]).T)
  317. # print(df)
  318. df.columns = ["Word", "BIO"]
  319. df.to_csv("C:\\Users\\admin\\Desktop\\BIO.csv")
  320. def PersonBIOData2BIO_Sentence():
  321. words_list_all, word_BIO_list_all, _, _ = data2BIOData()
  322. # words_list_all, word_BIO_list_all, _, _ = TXT2BIOData()
  323. # df = pd.DataFrame([words_list_all[0], word_BIO_list_all[0]])
  324. # df = df.T
  325. df = pd.DataFrame()
  326. # 对每个Document
  327. for index in range(len(words_list_all)):
  328. list1 = word_BIO_list_all[index]
  329. new_list = []
  330. # 对每个BIO对
  331. for i in range(len(list1)):
  332. str1 = ""
  333. for j in range(len(list1[i])):
  334. if list1[i][j][2:8] == "person":
  335. if str1 == "":
  336. str1 = list1[i][j]
  337. elif str1 != "O":
  338. str1 = str1 + "," + list1[i][j]
  339. else:
  340. str1 = "O"
  341. new_list.append(str1)
  342. df = df.append(pd.DataFrame([words_list_all[index], new_list]).T)
  343. df.columns = ["Word", "BIO"]
  344. # 将I-person转为B-person,因为一个模型只判断一类
  345. # df["BIO"] = df["BIO"].apply(lambda x: "B" + x[1:] if x[0] == "I" else x)
  346. # print(df[df["BIO"]])
  347. # print(df)
  348. # df.to_csv("C:\\Users\\admin\\Desktop\\Person_BIO.csv")
  349. # 合并B-person和I-person为B-person
  350. tag_flag = ""
  351. delete_index_list = []
  352. df = df.reset_index()
  353. df = df[["Word", "BIO"]]
  354. for index, row in df.iterrows():
  355. if row["BIO"][0] == "B":
  356. tag_flag = row["BIO"]
  357. elif row["BIO"][0] == "I" and tag_flag != "":
  358. df["Word"].iloc[index-1] = df["Word"].iloc[index-1] + df["Word"].iloc[index]
  359. # df1["end_index"].iloc[index-1] = int(df1["end_index"].iloc[index-1]) + len(df["Word"].iloc[index])
  360. delete_index_list.append(index)
  361. else:
  362. tag_flag = ""
  363. df = df.drop(delete_index_list)
  364. # df1 = df1.drop(delete_index_list)
  365. # 取标注为person_person的词的前35个词,后3个词作为一个句子
  366. sentences = []
  367. for index in range(len(df["BIO"])):
  368. sentence = ""
  369. if df["BIO"].iloc[index] != "O":
  370. sentence1 = ""
  371. sentence2 = ""
  372. if index > 60 or len(df["BIO"]) - index < 60:
  373. for i in range(60, 0, -1):
  374. sentence1 = sentence1 + df["Word"].iloc[index - i] + " "
  375. for i in range(1, 61):
  376. sentence2 = sentence2 + df["Word"].iloc[index + i] + " "
  377. sentence = sentence + sentence1 + "||" + df["Word"].iloc[index] + "||" + sentence2
  378. else:
  379. sentence = None
  380. else:
  381. sentence = None
  382. sentences.append(sentence)
  383. df["Sentence"] = sentences
  384. # 舍弃BIO为O的行
  385. df = df.reset_index()
  386. df = df[["Word", "Sentence", "BIO"]]
  387. delete_index_list = []
  388. for index, row in df.iterrows():
  389. if row["BIO"] == "O":
  390. delete_index_list.append(index)
  391. df = df.drop(delete_index_list)
  392. df = df.reset_index()
  393. df = df[["Word", "Sentence", "BIO"]]
  394. # 判断类标签,0为人名,1为联系人,2为招标联系人,3为代理联系人,4为评审专家,5为其他非联系人
  395. df["Label"] = df["BIO"].apply(lambda x: 5 if x == "O" else (1 if x[9:] == "person" else (
  396. 2 if x[9:] == "tendereePerson" else (3 if x[9:] == "agencyPerson" else (0 if x[2:] == "person" else 4)))))
  397. df = df[["Word", "Label", "Sentence", "BIO"]]
  398. df.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_all_60.csv")
  399. # df["Sentence"] = df["BIO"].apply(lambda x: x if x[9:] == "person" else x)
  400. def BIOData2PersonData():
  401. words_list_all, word_BIO_list_all, \
  402. wordInSentencesIndex_list_all, wordInSentencesTokens_list_all = data2BIOData()
  403. df = pd.DataFrame()
  404. # 循环:对每个Document
  405. for index in range(len(words_list_all)):
  406. list1 = word_BIO_list_all[index]
  407. new_list = []
  408. # 循环:一篇Document中的每个BIO对,判断Label是person的
  409. for i in range(len(list1)):
  410. str1 = ""
  411. for j in range(len(list1[i])):
  412. if list1[i][j][2:8] == "person":
  413. # print("==", list1[i][j])
  414. if str1 == "":
  415. str1 = list1[i][j]
  416. elif str1 != "O":
  417. str1 = str1 + "," + list1[i][j]
  418. else:
  419. str1 = "O"
  420. new_list.append(str1)
  421. df = df.append(pd.DataFrame([words_list_all[index], new_list]).T)
  422. df.columns = ["Word", "BIO"]
  423. # 循环:对每个Document
  424. df1 = pd.DataFrame()
  425. for index in range(len(words_list_all)):
  426. # 循环:一篇Document中的单词的begin_index,end_index,tokens
  427. begin_index = []
  428. end_index = []
  429. tokens = []
  430. for i in range(len(wordInSentencesIndex_list_all[index])):
  431. ss = wordInSentencesIndex_list_all[index][i].split(",")
  432. begin_index.append(ss[0])
  433. end_index.append(ss[1])
  434. tokens.append(wordInSentencesTokens_list_all[index][i])
  435. df1 = df1.append(pd.DataFrame([tokens, begin_index, end_index]).T)
  436. df1.columns = ["tokens", "begin_index", "end_index"]
  437. # print("df1.shape ", df1.shape)
  438. # print("df.shape ", df.shape)
  439. # 将I-person转为B-person,因为一个模型只判断一类
  440. # df["BIO"] = df["BIO"].apply(lambda x: "B" + x[1:] if x[0] == "I" else x)
  441. # 判断类标签,0为人名,1为联系人,2为招标联系人,3为代理联系人,4为评审专家,5为其他非联系人
  442. df["Label"] = df["BIO"].apply(lambda x: 5 if x == "O" else (1 if x[9:] == "person" else (
  443. 2 if x[9:] == "tendereePerson" else (3 if x[9:] == "agencyPerson" else (0 if x[2:] == "person" else 4)))))
  444. # 重置索引
  445. df = df.reset_index()
  446. df1 = df1.reset_index()
  447. # 合并B-person和I-person为B-person
  448. tag_flag = ""
  449. delete_index_list = []
  450. for index, row in df.iterrows():
  451. if row["BIO"][0] == "B":
  452. tag_flag = row["BIO"]
  453. elif row["BIO"][0] == "I" and tag_flag != "":
  454. df["Word"].iloc[index-1] = df["Word"].iloc[index-1] + df["Word"].iloc[index]
  455. df1["end_index"].iloc[index-1] = int(df1["end_index"].iloc[index-1]) + len(df["Word"].iloc[index])
  456. delete_index_list.append(index)
  457. else:
  458. tag_flag = ""
  459. df = df.drop(delete_index_list)
  460. df1 = df1.drop(delete_index_list)
  461. # 重置索引
  462. df = df.reset_index()
  463. df1 = df1.reset_index()
  464. df1 = pd.concat([df["Word"], df["Label"], df1["tokens"], df1["begin_index"], df1["end_index"]], axis=1)
  465. df1.columns = ["Word", "Label", "tokens", "begin_index", "end_index"]
  466. # 舍弃Label为5的行
  467. delete_index_list = []
  468. for index, row in df1.iterrows():
  469. if row["Label"] == 5:
  470. delete_index_list.append(index)
  471. df1 = df1.drop(delete_index_list)
  472. df1.reset_index()
  473. # 拼接列begin_index,end_index,tokens
  474. # begin_index = []
  475. # end_index = []
  476. # for index in range(len(wordInSentencesIndex_list_all)):
  477. # ss = wordInSentencesIndex_list_all[index].split(",")
  478. # begin_index.append(ss[0])
  479. # end_index.append(ss[1])
  480. # df["begin_index"] = pd.DataFrame(begin_index)
  481. # df
  482. # print(df1)
  483. df1.to_csv("C:\\Users\\admin\\Desktop\\Person_Data_all.csv")
  484. def BIOData2Bidway():
  485. words_list_all, word_BIO_list_all, \
  486. wordInSentencesIndex_list_all, wordInSentencesTokens_list_all = TXT2BIOData()
  487. df = pd.DataFrame()
  488. # 循环:对每个Document
  489. for index in range(len(words_list_all)):
  490. list1 = word_BIO_list_all[index]
  491. new_list = []
  492. # 循环:一篇Document中的每个BIO对,判断Label是bidway的
  493. for i in range(len(list1)):
  494. str1 = ""
  495. for j in range(len(list1[i])):
  496. if list1[i][j][2:8] == "bidway":
  497. # print("==", list1[i][j])
  498. if str1 == "":
  499. str1 = list1[i][j]
  500. elif str1 != "O":
  501. str1 = str1 + "," + list1[i][j]
  502. else:
  503. str1 = "O"
  504. new_list.append(str1)
  505. df = df.append(pd.DataFrame([words_list_all[index], new_list]).T)
  506. df.columns = ["Word", "BIO"]
  507. df.to_csv("C:\\Users\\admin\\Desktop\\Bidway_BIO.csv")
  508. return
  509. def BIOData2ServiceTime():
  510. words_list_all, word_BIO_list_all, \
  511. wordInSentencesIndex_list_all, wordInSentencesTokens_list_all = TXT2BIOData()
  512. df = pd.DataFrame()
  513. # 循环:对每个Document
  514. for index in range(len(words_list_all)):
  515. list1 = word_BIO_list_all[index]
  516. new_list = []
  517. # 循环:一篇Document中的每个BIO对,判断Label是bidway的
  518. for i in range(len(list1)):
  519. str1 = ""
  520. for j in range(len(list1[i])):
  521. if list1[i][j][2:] == "serviceTime":
  522. # print("==", list1[i][j])
  523. if str1 == "":
  524. str1 = list1[i][j]
  525. elif str1 != "O":
  526. str1 = str1 + "," + list1[i][j]
  527. else:
  528. str1 = "O"
  529. new_list.append(str1)
  530. df = df.append(pd.DataFrame([words_list_all[index], new_list]).T)
  531. df.columns = ["Word", "BIO"]
  532. df.to_csv("C:\\Users\\admin\\Desktop\\ServiceTime_BIO.csv")
  533. return
  534. def duplicateData(label, sample_rate):
  535. df = pd.read_csv("C:\\Users\\admin\\Desktop\\Person_Data_all_OverSample.csv")
  536. print(df.shape)
  537. df1 = df[df["Label"] == label]
  538. df1 = df1.sample(frac=sample_rate)
  539. df = df.append(df1)
  540. df.to_csv("C:\\Users\\admin\\Desktop\\Person_Data_all_OverSample.csv")
  541. print(df.shape)
  542. def resetAndShuffleData():
  543. df = pd.read_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_Notest.csv")
  544. df = df.sample(frac=1).reset_index(drop=True)
  545. df = df.reset_index()
  546. # df = df[["Word", "Label", "tokens", "begin_index", "end_index"]]
  547. df = df[["Word", "Label", "Sentence", "BIO"]]
  548. df.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_Notest.csv")
  549. def re_bidway():
  550. df = pd.read_csv("C:\\Users\\admin\\Desktop\\bidway_text.csv")
  551. reg = re.compile(u'(采购方式|竞价方式|招标方式|询价类型|交易方式|寻源策略|招标形式|询价方式'
  552. u'|发包方式|发包类型|开展方式|招标类型)(.*)'
  553. u'(公开招标|竞争性磋商|竞争性谈判|公开采购|单一来源'
  554. u'|电子书面竞投|邀请招标|定向公开|询价采购|抽签摇号'
  555. u'|网上电子投标|比质比价|询单|询比采购|比选|单一来源采购'
  556. u'|网上招标|其他'
  557. u'|竞谈竞价|网上直购|公开竞谈'
  558. u'|库内邀请|库内公开发包)')
  559. # reg = re.compile(u'(公开招标|竞争性磋商|竞争性谈判采购|公开采购|单一来源|网络竞价'
  560. # u'|竞争性谈判|公开询价|邀请招标|公开招募|公开询比价|电子书面竞投'
  561. # u'|网上电子投标|比质比价|定向询单|国内比选|电子竞价'
  562. # u'|公开招租|公开竞标方式|网上招标|公开招标|国内竞争性谈判'
  563. # u'|国内竞争性磋商|公开竞谈|定向询价|网上询价|网上竞价|公开比选|磋商采购|网上直购'
  564. # u'|库内邀请|询价采购|询比采购|分散采购|单一来源采购)')
  565. reg2 = re.compile(u'(采用|以|)'
  566. u'(公开招标|竞争性磋商|竞争性谈判|公开采购|单一来源'
  567. u'|竞争性谈判|询价|电子书面竞投|电子竞价'
  568. u'|网上电子投标|比质比价|询单|询比采购|比选|单一来源采购'
  569. u'|网上招标|分散采购'
  570. u'|竞谈竞价|网上直购|公开竞谈'
  571. u'|库内邀请)'
  572. u'(采购方式|方式)')
  573. reg1 = re.compile(
  574. # u'(公开招标|竞争性磋商|竞争性谈判采购|公开采购|单一来源采购|网络竞价|公开招商方式'
  575. # u'|竞争性谈判|公开询价|询价采购|邀请招标|公开招募|公开询比|电子书面竞投'
  576. # u'|网上电子投标|比质比价|定向询单|询比采购|国内比选|单一来源|公开选取|库内公开发包'
  577. # u'|公开招租|公开竞标方式|网上招标|公开招标|竞争性谈判|公开招投标'
  578. # u'|国内竞争性磋商|公开竞谈|定向询价|网上询价|网上竞价|公开比选|磋商采购|网上直购'
  579. # u'|国际公开竞争性招标)'
  580. u'(公开招标|竞争性磋商|竞争性谈判|公开采购|单一来源'
  581. u'|竞争性谈判|询价|电子书面竞投'
  582. u'|网上电子投标|比质比价|询单|询比采购|比选|单一来源采购'
  583. u'|网上招标|分散采购'
  584. u'|竞谈竞价|网上直购|公开竞谈'
  585. u'|库内邀请)'
  586. )
  587. reg1_not = re.compile(u'及单一来源|询价小组成员|除单一来源|竞争性谈判邀请函|询价记录')
  588. reg3 = re.compile(u'(采购方式:邀请|采购方式:公开|采购方式:询价|分散采购|公开招标|竞价|磋商|询比|竞标|邀请招标|公开招募|公开招租)')
  589. reg_standard = re.compile(u'(公开招标|竞争性磋商|竞争性谈判|单一来源'
  590. u'|竞争性谈判|询价|邀请招标|公开招募|询比|电子书面竞投'
  591. u'|网上电子投标|比质比价|询单|比选'
  592. u'|公开招租|网上招标|分散采购'
  593. u'|网上直购|公开竞谈|采购方式:邀请|采购方式:公开|采购方式:询价)'
  594. )
  595. text_list = df["text"].to_list()
  596. output_list = []
  597. for index in range(len(text_list)):
  598. input_str = text_list[index]
  599. # 把一些混淆的词先替换掉
  600. input_str = re.sub(reg1_not, "", input_str)
  601. match = reg.search(input_str)
  602. output_str = None
  603. # 根据正则表达式匹配
  604. if match:
  605. # 判断长度,截断
  606. if len(match.group()) >= 15:
  607. ss = re.split(",|\.|,|。|;|;", match.group())
  608. # 判断所需的字符串在哪一段
  609. for i in range(len(ss)):
  610. if re.search(reg1, ss[i]):
  611. output_str = ss[i]
  612. break
  613. else:
  614. output_str = match.group()
  615. else:
  616. match2 = re.search(reg2, input_str)
  617. if match2:
  618. output_str = match2.group()
  619. else:
  620. match1 = re.search(reg1, input_str)
  621. if match1:
  622. output_str = match1.group()
  623. # 再判断一次长度
  624. if output_str is not None:
  625. if len(output_str) >= 15:
  626. match2 = re.search(reg2, input_str)
  627. if match2:
  628. output_str = match2.group()
  629. if len(output_str) >= 15:
  630. match1 = re.search(reg1, input_str)
  631. if match1:
  632. output_str = match1.group()
  633. # 最后输出还为空,匹配一些易混淆的词
  634. if output_str is None:
  635. match3 = re.search(reg3, input_str)
  636. if match3:
  637. output_str = match3.group()
  638. if output_str is not None:
  639. if not re.search("分散采购|采购方式:邀请", output_str):
  640. # 公开采购转为公开招标
  641. output_str = re.sub("公开采购", "公开招标", output_str)
  642. # 去掉第一个字符冒号
  643. ss = re.split("::|:|:", output_str)
  644. output_str = ss[-1]
  645. # 去掉采购、方式、采用
  646. output_str = re.sub("(采购|方式|采用|出售|进行|直接(|现就本次|招标为)", "", output_str)
  647. # 使用标准标签过滤
  648. match4 = re.search(reg_standard, output_str)
  649. if match4:
  650. output_str = match4.group()
  651. output_list.append(output_str)
  652. df["re"] = pd.DataFrame(output_list)
  653. df.to_csv("C:\\Users\\admin\\Desktop\\bidway_text1.csv")
  654. def re_serviceTime():
  655. df = pd.read_csv("C:\\Users\\admin\\Desktop\\serviceTime_text.csv")
  656. # reg = re.compile(u'(周期|工期|服务期|服务时限|交货时间|履行期限|服务周期|交货期|供货期|合格工期'
  657. # u'|投标工期|设计工期|合格服务周期|总工期|施工工期|服务时间|流转期限|维护期限'
  658. # u'|完成时间|交付|服务期限|中标工期|项目周期|计划工期'
  659. # u')'
  660. # u'(.*)(日止|日内|年|年度|天|月|日|周内|年内)')
  661. reg0 = re.compile(u'(服务时间:|服务期限:)'
  662. u'([^至到]*)'
  663. u'(至|到)'
  664. u'([^日时]*)'
  665. u'(日|时)'
  666. )
  667. reg = re.compile(u'(周期|工期|服务期|服务时限|履行期限|服务周期|交货期|供货期|合格工期'
  668. u'|投标工期|设计工期|合格服务周期|总工期|施工工期|服务时间|流转期限|维护期限'
  669. u'|完成时间|交付|服务期限|中标工期|项目周期|计划工期'
  670. u')'
  671. # u'([^日止|日内|年|年度|月|日|周内|年内|\d+]*)'
  672. u'([^年月日\d+]*)'
  673. u'([\d+|一|二|三|四|五|六|七|八|九|十])'
  674. u'(日止|日内|年|年度|月|日|周内|年内|日历天|工作日|\d+日|\d+|起)'
  675. u'(个月|\(日历天\)|)')
  676. reg_not = re.compile(u'(工期延误|工期节点|工期管理|合同履行日期:见|服务期限截止|交付使用'
  677. u'|服务期限:1、|工期\(交货期\):|工期、)')
  678. reg1 = re.compile(u'(合同签订|签订合同|合同履行日期)'
  679. u'([^\d]*)'
  680. u'(\d+|一|二|三|四|五|六|七|八|九|十)'
  681. u'(个|)'
  682. u'(日止|日内|年|年度|月|日历天|日|周内|年内|工作日)'
  683. )
  684. reg2 = re.compile(u'(服务期限|履行期限|工期|服务期|维护期限|服务周期|工期,\(日历天\),'
  685. u'|服务期\(日历天\)|预定工期\(日历天\)|期限要求)'
  686. u'(:|:|)+'
  687. u'(\d+|一|二|三|四|五|六|七|八|九|十|两|贰|叁)'
  688. u'(日止|日内|年|年度|月|日历天|日|周内|年内|工作日|天|)'
  689. )
  690. text_list = df["text"].to_list()
  691. output_list = []
  692. for index in range(len(text_list)):
  693. input_str = text_list[index]
  694. input_str = re.sub(reg_not, "", input_str)
  695. output_str = ""
  696. unit = ""
  697. match0 = re.findall(reg0, input_str)
  698. if match0:
  699. ss = ""
  700. for i in range(len(match0)):
  701. s = ""
  702. for j in range(len(match0[i])):
  703. s = s + match0[i][j]
  704. ss = ss + s
  705. if i < len(match0)-1:
  706. ss = ss + " "
  707. output_str = ss
  708. # 太长的裁剪
  709. if len(output_str) >= 40:
  710. sss = output_str.split(",")
  711. output_str = sss[0]
  712. print("0: ", output_str)
  713. else:
  714. match = reg.findall(input_str)
  715. if match:
  716. ss = ""
  717. for i in range(len(match)):
  718. s = ""
  719. if "天" in match[i]:
  720. unit = "天"
  721. if "月" in match[i]:
  722. unit = "月"
  723. for j in range(2, len(match[i])):
  724. s = s + match[i][j] + unit
  725. ss = ss + s
  726. if i < len(match)-1:
  727. ss = ss + " "
  728. output_str = ss
  729. print(output_str)
  730. else:
  731. match1 = re.findall(reg1, input_str)
  732. if match1:
  733. ss = ""
  734. for i in range(len(match1)):
  735. s = ""
  736. if "天" in match[i]:
  737. unit = "天"
  738. if "月" in match[i]:
  739. unit = "月"
  740. for j in range(2, len(match1[i])):
  741. s = s + match1[i][j] + unit
  742. ss = ss + s
  743. if i < len(match1)-1:
  744. ss = ss + " "
  745. output_str = ss
  746. print("1: ", output_str)
  747. else:
  748. match2 = re.findall(reg2, input_str)
  749. if match2:
  750. ss = ""
  751. for i in range(len(match2)):
  752. s = ""
  753. for j in range(2, len(match2[i])):
  754. s = s + match2[i][j]
  755. ss = ss + s
  756. if i < len(match2)-1:
  757. ss = ss + " "
  758. output_str = ss
  759. print("2: ", output_str)
  760. output_list.append(output_str)
  761. # for index in range(len(text_list)):
  762. # input_str = text_list[index]
  763. # match = reg.search(input_str)
  764. # output_str = None
  765. # # 根据正则表达式匹配
  766. # if match:
  767. # # 匹配成功,先匹配冒号,再分割冒号后的第一个标点
  768. # match2 = re.search(u':|:', match.group())
  769. # if match2:
  770. # ss = re.split(",|\.|,|。|;|;", match.group()[match2.span()[0]:])
  771. # output_str = match.group()[:match2.span()[0]] + ss[0]
  772. # else:
  773. # ss = re.split(",|\.|,|。|;|;", match.group())
  774. # output_str = ss[0]
  775. #
  776. # # 再匹配一些特殊情况
  777. # # 匹配出太长的,就是需要截断
  778. # if len(output_str) >= 40:
  779. # ss = re.split(",|\.|,|。|;|;", output_str)
  780. # output_str = ss[0]
  781. # # 错误分类的:服务期限:1、资金来源:自筹资金
  782. # if re.search(u"来源|1、|资金", output_str):
  783. # output_str = None
  784. # # 有完成、交货这些字眼分割
  785. # if output_str is not None:
  786. # ss = re.split("完工|质量", output_str)
  787. # if len(ss) > 1:
  788. # output_str = ss[0]
  789. # else:
  790. # match1 = re.search(reg1, input_str)
  791. # if match1:
  792. # # 匹配成功,先匹配冒号,再分割冒号后的第一个标点
  793. # match2 = re.search(u':|:', match1.group())
  794. # if match2:
  795. # ss = re.split(",|\.|,|。|;|;", match1.group()[match2.span()[0]:])
  796. # output_str = match1.group()[:match2.span()[0]] + ss[0]
  797. # else:
  798. # ss = re.split(",|\.|,|。|;|;", match1.group())
  799. # output_str = ss[0]
  800. # # 再匹配一些特殊情况
  801. # # 匹配出太长的,就是需要截断
  802. # if len(output_str) >= 40:
  803. # ss = re.split(",|\.|,|。|;|;", output_str)
  804. # output_str = ss[0]
  805. df["re"] = pd.DataFrame(output_list)
  806. df = df[["document_id", "text", "categoryAndIndex", "word", "category", "re"]]
  807. df.to_csv("C:\\Users\\admin\\Desktop\\serviceTime_text1.csv")
  808. return
  809. def re_serviceTime2():
  810. df = pd.read_csv("C:\\Users\\admin\\Desktop\\serviceTime_text.csv")
  811. text_list = df["text"].to_list()
  812. output_list = []
  813. keyword = u'(' \
  814. u'工期/交货期/服务期|项目周期|工期\(交货期\)|计划工期|工期要求:|服务期|服务时限|履行期限|服务周期|供货期|合格工期' \
  815. u'|投标工期|设计工期|合格服务周期|总工期|服务时间|流转期限|维护期限|服务时限|交货期' \
  816. u'|完成时间|服务期限|中标工期|项目周期|期限要求|周期|工期:' \
  817. u')'
  818. # 替换 易混淆关键词
  819. reg_not = re.compile(u'(工期延误|工期节点|工期管理|合同履行日期:见|服务期限截止|交付使用'
  820. u'|服务期限:1、|工期、)|截止|合同签订日期:|保证金在合同签订'
  821. u'|工期情况|签订合同前,|计划工期内|服务期内|服务期限应按')
  822. # 匹配 特定词 + 数字
  823. # reg0 = re.compile(u'(工期/交货期/服务期|服务期限|服务期)'
  824. # u'(:)'
  825. # u'(\d+)')
  826. # 匹配 匹配 关键词 + 年月日时至年月日时止|年月日至年月日
  827. reg0 = re.compile(u'(服务期|服务期限|服务周期|服务时间)'
  828. u'([^至]*)'
  829. u'(至)'
  830. u'([^日天止]*)'
  831. u'(日|天|止)')
  832. # 匹配 特定词 + 数字 + 年月周天
  833. reg1 = re.compile(u'(工期/交货期/服务期|服务期限|服务期|工期,|工期要求|中介服务时限)'
  834. u'([^天年月日]*[\d+一二三四五六七两叁贰壹肆伍])'
  835. u'(天|个月|个日历天|年|日历天|日|\(日历天\)|\(天\))')
  836. # 匹配 特定词 + 数字 + 年月周天
  837. reg2 = re.compile(u'(合同签订|签订合同|合同履行日期)'
  838. u'([^\d年]*)'
  839. u'(\d+|一|二|三|四|五|六|七|八|九|十)'
  840. u'(个|)'
  841. u'(日止|日内|年|年度|月|日历天|日|周内|年内|工作日|天内)'
  842. )
  843. # 匹配 特定词 + (天/日历天) + 数字
  844. reg3 = re.compile(u'(工期,|工期|服务时间|服务期)'
  845. u'(\(日历天\),|\(日历天\)|\(天\))'
  846. u'([^\d+]*)'
  847. u'(\d+)')
  848. # 匹配 特定词 + (年) + 数字
  849. reg6 = re.compile(u'(服务期限)'
  850. u'(\(年\))'
  851. u'([^\d+]*)'
  852. u'(\d+)')
  853. # 匹配 关键词 + 数字 + 年/月/天
  854. reg4 = re.compile(keyword +
  855. u'([^天年月日]*)'
  856. u'([\d+一二三四五六七两叁贰壹肆伍])'
  857. u'(,|)'
  858. u'(天|个月|年|个日历天|日历天|日|\(日历天\)|\(天\))')
  859. # 匹配 关键词 + 年月日时至年月日时止
  860. # reg5 = re.compile(keyword +
  861. # u'([^至]*)'
  862. # u'(至)'
  863. # u'([^止]*)'
  864. # u'(止)')
  865. # 匹配 关键词 + 年月日至年月日
  866. # reg6 = re.compile(keyword +
  867. # u'([^至]*)'
  868. # u'(至)'
  869. # u'([^日天]*)'
  870. # u'(日|天)')
  871. # 匹配 优先级低的词 + 年月日
  872. reg5 = re.compile(u'(服务要求|服务时限)'
  873. u'([^年日]*)'
  874. u'(年|日)')
  875. for index in range(len(text_list)):
  876. # 初始化
  877. output_str = ""
  878. input_str = text_list[index]
  879. # 替换
  880. input_str = re.sub(reg_not, "", input_str)
  881. # 匹配
  882. if output_str == "":
  883. output_str = re_findAllResult(reg3, input_str, unit="天", index=2)
  884. if output_str == "":
  885. output_str = re_findAllResult(reg6, input_str, unit="年", index=2)
  886. if output_str == "":
  887. output_str0 = re_findAllResult(reg0, input_str, index=1)
  888. output_str1 = re_findAllResult(reg1, input_str, index=1)
  889. # 同时匹配两个表达式,如果一个是空就选另一个,两个皆不为空,判断长度
  890. if output_str0 == "" and output_str1 == "":
  891. output_str = ""
  892. elif output_str0 == "":
  893. output_str = output_str1
  894. elif output_str1 == "":
  895. output_str = output_str0
  896. else:
  897. if len(output_str0) >= 100:
  898. output_str = output_str1
  899. elif len(output_str0) >= len(output_str1):
  900. output_str = output_str0
  901. else:
  902. output_str = output_str1
  903. if output_str == "":
  904. output_str = re_findAllResult(reg2, input_str, index=2)
  905. if output_str == "":
  906. output_str = re_findAllResult(reg4, input_str, index=1)
  907. if output_str == "":
  908. output_str = re_findAllResult(reg5, input_str, index=1)
  909. # 将冒号删掉
  910. output_str = re.sub(":|:|限|交货期/服务期|,|\)|\(", "", output_str)
  911. # 字符串中包含断句符号,裁剪
  912. ss = re.split("。|,|;", output_str)
  913. output_str = ss[0]
  914. # 添加
  915. output_list.append(output_str)
  916. df["re"] = pd.DataFrame(output_list)
  917. df = df[["document_id", "text", "categoryAndIndex", "word", "category", "re"]]
  918. df.to_csv("C:\\Users\\admin\\Desktop\\serviceTime_text2.csv")
  919. def re_serviceTime3():
  920. df = pd.read_csv("C:\\Users\\admin\\Desktop\\serviceTime_text.csv")
  921. text_list = df["text"].to_list()
  922. # 初始化
  923. output_list = []
  924. text_index_list = []
  925. before = '(?P<before>'\
  926. '工期/交货期/服务期|工期,\(日历天\)|工期\(交货期\)|合格工期\(天\)|服务期限\(年\)|工期\(天\)' \
  927. '|工期要求|项目周期|工期\(交货期\)|计划工期\(服务期限\)|服务时限|履行期限|服务周期|供货期' \
  928. '|合格工期|计划工期\(服务期\)|服务期\(日历天\)|服务,期|交货\(完工\)时间|交付\(服务、完工\)时间' \
  929. '|交货时间|工期\(日历天\)' \
  930. '|服务期限为|计划工期|工期要求|服务期限|服务期' \
  931. '|投标工期|设计工期|合格服务周期|总工期|服务时间|流转期限|维护期限|服务时限|交货期|服务要求' \
  932. '|完成时间|服务期限|中标工期|项目周期|期限要求|周期|工期|供货期|合同履行日期|计划周期' \
  933. ')'
  934. before2 = '(?P<before2>' \
  935. '合同签订后|合同签订之日起|约|自合同签订之日起|开工后|不超过|签订合同后|系统开发' \
  936. '|合同签订之日起至|自合同签订之日|合同签定后|自签订合同之日起|自合同签订起' \
  937. '|自合同签订生效之日起|自合同签订后不超过|中选后|均为|合同签订日至|本项目合同期|' \
  938. ')'
  939. charac = '(?P<charac>' \
  940. '[::,,]*' \
  941. ')'
  942. center = '(?P<center>' \
  943. '[自]?\d+年\d+月\d+日至\d+年\d+月\d+日|\d+年\d+月\d+日|[\d一二三四五六七两叁贰壹肆伍]+' \
  944. ')'
  945. after = '(?P<after>' \
  946. '天|个月|年|个日历天|日历天|日|\(日历天\)|\(天\)|周内|,日历天|' \
  947. ')'
  948. reg = re.compile(before + charac + before2 + center + after)
  949. reg1 = re.compile(before + charac + '(.*?止)')
  950. reg_not = re.compile(u'(工期延误|工期节点|工期管理|交付使用'
  951. u'|工期、)'
  952. u'|工期情况|划工期内|服务期内')
  953. reg_not1 = re.compile(u'(履行日期:见|服务期限应按|签订合同前,|服务期限应按'
  954. u'|务期限:1、|同签订日期:|证金在合同签|服务期限截止'
  955. u')')
  956. reg_not2 = re.compile(u'截止|1\.|1、')
  957. for index in range(len(text_list)):
  958. # 初始化
  959. output_str = ""
  960. input_str = text_list[index]
  961. # 替换混淆词
  962. input_str = re.sub(reg_not, "####", input_str)
  963. input_str = re.sub(reg_not1, "######", input_str)
  964. input_str = re.sub(reg_not2, "##", input_str)
  965. output_str, text_index = re_findAllResult(reg, input_str)
  966. if len(text_index) == 0:
  967. output_str, text_index = re_findAllResult(reg1, input_str)
  968. # 添加
  969. output_list.append(output_str)
  970. text_index_list.append(str(text_index))
  971. df["text_index"] = pd.DataFrame(text_index_list)
  972. index_to_word = []
  973. for index, row in df.iterrows():
  974. i_list = ast.literal_eval(row["text_index"])
  975. word = ""
  976. for i in range(len(i_list)):
  977. word = word + row["text"][i_list[i][0]:i_list[i][1]]
  978. if i != len(i_list) - 1:
  979. word = word + " "
  980. if len(word) >= 120:
  981. word = ""
  982. df["text_index"].iloc[index] = []
  983. index_to_word.append(word)
  984. df["re"] = pd.DataFrame(index_to_word)
  985. df = df[["document_id", "text", "categoryAndIndex", "word", "category", "re", "text_index"]]
  986. df.to_csv("C:\\Users\\admin\\Desktop\\serviceTime_text4.csv")
  987. def re_findAllResult(reg, input, unit="", index=0):
  988. '''
  989. :param reg: 正则表达式
  990. :param input: 待匹配句子
  991. :param unit: 需要加的单位
  992. :param index: 字符串拼接的开始位置
  993. :return: 正则后的字符串
  994. '''
  995. match = re.findall(reg, input)
  996. output = ""
  997. if match:
  998. ss = ""
  999. for i in range(len(match)):
  1000. s = ""
  1001. for j in range(index, len(match[i])):
  1002. s = s + match[i][j]
  1003. if unit != "" and j == len(match[i])-1:
  1004. s = s + unit
  1005. ss = ss + s
  1006. if i < len(match)-1:
  1007. ss = ss + " "
  1008. output = ss
  1009. # 全文下标
  1010. text_index = []
  1011. match1 = re.finditer(reg, input)
  1012. for i in match1:
  1013. d = i.groupdict()
  1014. print(d)
  1015. if d.get("before") is not None:
  1016. front_len = len(d.get("before")) + len(d.get("charac"))
  1017. else:
  1018. front_len = 0
  1019. text_index.append([i.start()+front_len, i.end()])
  1020. return output, text_index
  1021. def calculateLen(ss, i):
  1022. front_len = 0
  1023. back_len = 0
  1024. print("------")
  1025. print(i)
  1026. print(ss)
  1027. for index in range(i):
  1028. print(ss[index], len(ss[index]))
  1029. front_len += len(ss[index])
  1030. for index in range(i+1, len(ss)):
  1031. back_len += len(ss[index])
  1032. return front_len, back_len
  1033. def test_re():
  1034. keyword = u'(' \
  1035. u'工期/交货期/服务期|项目周期|工期\(交货期\)|计划工期|工期要求:|服务期|服务时限|履行期限|服务周期|供货期|合格工期' \
  1036. u'|投标工期|设计工期|合格服务周期|总工期|服务时间|流转期限|维护期限|服务时限|交货期' \
  1037. u'|完成时间|服务期限|中标工期|项目周期|期限要求|周期|工期:' \
  1038. u')'
  1039. reg0 = re.compile(u'(服务时间:|服务期限:)'
  1040. u'([^至到]*)'
  1041. u'(至|到)'
  1042. u'([^日时]*)'
  1043. u'(日|时)'
  1044. )
  1045. reg = re.compile(u'(周期|工期|服务期|服务时限|履行期限|服务周期|交货期|供货期|合格工期'
  1046. u'|投标工期|设计工期|合格服务周期|总工期|施工工期|服务时间|流转期限|维护期限'
  1047. u'|完成时间|交付|服务期限|中标工期|项目周期|计划工期'
  1048. u')'
  1049. # u'([^日止|日内|年|年度|月|日|周内|年内|\d+]*)'
  1050. u'([^年月日\d+]*)'
  1051. u'([\d+|一|二|三|四|五|六|七|八|九|十])'
  1052. u'(日止|日内|年|年度|月|日|周内|年内|日历天|工作日|\d+日|\d+|起*)'
  1053. u'(个月|\(日历天\)|)')
  1054. reg1 = re.compile(u'(工期/交货期/服务期:|服务期限|服务期|工期,|工期要求|中介服务时限)'
  1055. u'([^天年月日]*[\d+一二三四五六七两叁贰壹肆伍])'
  1056. u'(天|个月|个日历天|年|日历天|日|\(日历天\)|\(天\))')
  1057. reg2 = re.compile(u'(服务期限|履行期限|工期|服务期|维护期限|服务周期|工期,\(日历天\),)'
  1058. u'(:|:)+'
  1059. u'(\d+|一|二|三|四|五|六|七|八|九|十|两|贰|叁)'
  1060. u'(日止|日内|年|年度|月|日历天|日|周内|年内|工作日)'
  1061. )
  1062. s = u'(项目周期|周期|工期/交货期/服务期|服务期|服务时限|履行期限|服务周期|交货期|供货期|合格工期' \
  1063. u'|投标工期|设计工期|合格服务周期|总工期|施工工期|服务时间|流转期限|维护期限' \
  1064. u'|完成时间|交付|服务期限|中标工期|项目周期|计划工期)'
  1065. reg3 = re.compile(s +
  1066. u'([^天年月日]*)'
  1067. u'([\d+一二三四五六七两叁贰壹肆伍])'
  1068. u'(,|)'
  1069. u'(天|个月|年|日历天|日|\(日历天\)|\(天\))')
  1070. reg_00 = re.compile(u'(服务期限|工期|服务时间)'
  1071. u'([^至]*)'
  1072. u'(至)'
  1073. u'([^止]*)'
  1074. u'(止)')
  1075. reg_01 = re.compile(u'(服务期限|工期|服务时间)'
  1076. u'([^至]*)'
  1077. u'(至)'
  1078. u'([^日]*)'
  1079. u'(日)')
  1080. reg4 = re.compile(keyword +
  1081. u'([^天年月日]*)'
  1082. u'([\d+一二三四五六七两叁贰壹肆伍])'
  1083. u'(,|)'
  1084. u'(天|个月|年|个日历天|日历天|日|\(日历天\)|\(天\))')
  1085. reg5 = re.compile(u'(服务要求|服务时限)'
  1086. u'([^年日]*)'
  1087. u'(年|日)')
  1088. test_text0 = "保险服务期限:自2020年1月1日零时起至2021年12月31日24时止的自然年度" \
  1089. " 服务时间:2020年05月25日至2020年08月08日"
  1090. test_text = ",中标候选人公示快照。北京北方车辆集团有限公司原试验工段改扩建工程中标候选人公示,原试验工段改扩建工程,(招标项目编号:C1100000096007025006),于2020年05月21日在北京市市辖区西城区西便门内大街79号4号楼409进行了开标、评标等工作,现将本次评标结果推荐中标候选人公示如下:" \
  1091. "标段(包)编号:C1100000096007025006001,标段(包)名称:原试验工段改扩建工程,第一名:北京永兴丰源建筑工程有限公司,投标报价(元):2,010,700.02,质量标准:合格工期(天):90,项目负责人姓名:周秋红相关证书名称:二级建造师相关证书编号:京211141545754,建筑工程施工总承包壹级,建筑装修装饰工程专业承包贰级," \
  1092. "钢结构工程专业承包叁级,第二名:北京市九方弘业建筑工程有限责任公司,投标报价(元):1,988,322.19,质量标准:合格工期(天):90,项目负责人姓名:任敬科相关证书名称:二级建造师相关证书编号:01453994,建筑工程施工总承包叁级,钢结构工程专业承包叁级,第三名:河南德恒建设工程有限公司,投标报价(元):1,996,228.17,质量" \
  1093. "标准:合格工期(天):90,项目负责人姓名:张献军相关证书名称:二级建造师相关证书编号:豫241141449543,建筑工程施工总承包贰级,公示期:2020年05月26日-2020年05月28日,特此公示!,对评标结果如有异议,请于2020年05月28日前在中国兵器电子招标投标交易平台上进行提出。联系人:" \
  1094. "李茜,联系电话:13910533516,北京五环国际工程管理有限公司,2020年05月25日,"
  1095. test_text1 = "服务时间:合同签订之日起90日历天,联系电话:13910533516,北京五环国际工程管理有限公司,2020年05月25日"
  1096. test_text2 = "服务期限:两日 服务要求:1年 服务时限:中选后15个工作日完成"
  1097. test_text3 = "工期/交货期/服务期:30天 标准:合格工期(天) 服务期限:两年。具体采购内容和要求详见招标文件年 项目周期:40日历天"
  1098. test_text4 = u'''
  1099. ,大庆禾工煤炭分质清洁利用项目-临时用电二期工程设备、物资采购中标候选人公示,更多咨询报价请点击:,大庆禾工煤炭分质清洁利用顶目-临时用电二期工程设备、物资釆购中标候选人,(招标编号:XYwZ-20200309-5),公示结束时间:2020年04月03日,、评标情况,标段(包)[001大庆禾工煤嶽分质清洁利用项目-临时用屯二期工程设备、物资采购,中标候选人基本情况,
  1100. 中标候选人第1名:哈尔滨龙网电力设备有限公司,投标报价:19.98万元,质量,合格,工期/交货期/服务期:30天,中标候选人第2名:
  1101. 哈尔滨昊龙电气没备制造有限公司,投标报价:19.87万元,质,量:合格,工期/交货期/服务期:30天,
  1102. 中标侯选人第3名:江南电气有限公司,投标报价:20.13万元,质量:合格,工期,交货期/服务期:30天:2、中标候选人按照招标文件要求承诘的项目伉责人情况,中标侯选人(哈尔滨龙网电力设备有限公司)的项目负贵人:宋环宇身份证,10398309240912;,中标候选人(哈尔滨昊龙电气设各制造有限公司)的项目负贵人:尹水生身份证,2:0227197902120112,中标候选人(江南电气有限公司)的项目负贵人:秦世亮身份证,230104197410012334;,3、中标候选人响应招标文
  1103. 件要求的资格能力条件,中标候选人(哈尔滨龙网电力设备有限公司)的资格能力条件:完全响应招标公告;中标选人(哈尔滨昊龙电气没备制造有公司)的资格能力条件:完伞响应招标公,告,中标候选人(江南电气有限公司)的资格能力条件:完仝响应招标公告,、提出异议的渠道和方式,以上结果公示三日,公示期间投标人或者其他利害关系人如有异议请以书面形式向招标,人提出;如无异议,预中标人即为中标人。三、其他,项目编号:-20200309-5,项目名称:大庆禾工煤炭分质清
  1104. 沽划用项目-临时电二期工程设备、物资采购,计划供货期:合同签订后30日内供货,交货地点:施工现场地面交货,质量标准:符合国家及国家电网行业合格标准,招邡方式:公开招标,开标时间:2020华3月3日9时30分,公示起止日期:2020年4月1日至2020年±月3日,经评标委员会评审,中标候选人由高到低排序前三名为:第一名:晗尔滨龙网电力设备有限公司,第二名:晗尔滨昊龙电气设备制造有限公司,第三名:江南电气有限公司,点标有,经评标委员会评审,依法确定排名第一的
  1105. 中标候选人为预中标人。预中标人为:晗尔滨龙网电力设备有限公司,颀中标价:¥199,800.00元,以上结果公示三日,公小期间投标人或者其他利害关系人如有异议请以书面形式向招标入提,出;如无异议,预中标人即为中标人。监督部门及联系方式:黑龙江北星电力有跟公罰、0459-6504811,四、监督部门,本招标项目的监督部门为黑龙江北星电力有限公司。五、联系方式,招标人:黑龙江北星电力有限公司,地址:大庆市让胡路区中买大街南段28号,联系人:卜先生,电话:0459-
  1106. 6604811,电子邮件:418864qgq.com,招标代理机构:黑龙江省信亿招标有限公司,地址:哈尔滨市香坊区红滨大街1号516室,联系人:张海洋,电话;0451-55151625,电子邮件:xyzb5164163.com,招标人或其招标代理机构主要负责人(项目负贲人,(签名),1,招标人或其招标代理机构:与,盖章),
  1107. '''
  1108. s = re.finditer(reg4, test_text4)
  1109. # s = re.sub(reg5, "", test_text2)
  1110. # print(s)
  1111. # print(s.span())
  1112. # s = re.match("交货期/服务期:", "交货期/服务期:365天")
  1113. # print(s.span())
  1114. # if s:
  1115. # print(s)
  1116. # print("计划工期:3个月 工期:3个月".split(" "))
  1117. for ss in s:
  1118. # sss = (0, 0)
  1119. print(ss.group())
  1120. print(ss.span())
  1121. # print(sss[1])
  1122. def re_Accuracy(filename):
  1123. df = pd.read_csv("C:\\Users\\admin\\Desktop\\"+filename+".csv")
  1124. flag = []
  1125. flag_1 = 0
  1126. flag_0 = 0
  1127. for index, row in df.iterrows():
  1128. if row["word"] == row["re"]:
  1129. flag.append(1)
  1130. flag_1 += 1
  1131. elif str(row["re"]) in row["word"] or row["word"] in str(row["re"]):
  1132. flag.append(1)
  1133. flag_1 += 1
  1134. else:
  1135. flag.append(0)
  1136. flag_0 += 1
  1137. print("Accuracy: ", flag_1/(flag_1+flag_0))
  1138. df["correct"] = flag
  1139. df.to_csv("C:\\Users\\admin\\Desktop\\"+filename+".csv")
  1140. def getTestData():
  1141. df = pd.read_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_all_re_washed2.csv")
  1142. number0 = 500
  1143. number1 = 1500
  1144. number2 = 600
  1145. number3 = 600
  1146. number4 = 500
  1147. df0 = df[df["Label"] == 0][:number0]
  1148. df0_deleted = df[df["Label"] == 0][number0:]
  1149. df1 = df[df["Label"] == 1][:number1]
  1150. df1_deleted = df[df["Label"] == 1][number1:]
  1151. df2 = df[df["Label"] == 2][:number2]
  1152. df2_deleted = df[df["Label"] == 2][number2:]
  1153. df3 = df[df["Label"] == 3][:number3]
  1154. df3_deleted = df[df["Label"] == 3][number3:]
  1155. df4 = df[df["Label"] == 4][:number4]
  1156. df4_deleted = df[df["Label"] == 4][number4:]
  1157. df_test = pd.concat([df0, df1, df2, df3, df4])
  1158. df_deleted = pd.concat([df0_deleted, df1_deleted, df2_deleted, df3_deleted, df4_deleted])
  1159. df_test.columns = ["index", "Word", "Label", "Sentence", "BIO"]
  1160. df_test = df_test.reset_index()
  1161. df_test = df_test[["Word", "Label", "Sentence", "BIO"]]
  1162. df_deleted.columns = ["index", "Word", "Label", "Sentence", "BIO"]
  1163. df_deleted = df_deleted.reset_index()
  1164. df_deleted = df_deleted[["Word", "Label", "Sentence", "BIO"]]
  1165. df_test.to_csv("C:\\Users\\admin\\Desktop\\test2000.csv")
  1166. df_deleted.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_Notest.csv")
  1167. def washData():
  1168. df = pd.read_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_all_re_washed2.csv")
  1169. # Agency_person
  1170. # reg1 = re.compile(u'(代理 机构 :|代理 机构 名称 :|代理 机构 联系 方式 :|采购 代理 机构|询价 代理 :'
  1171. # u'|代理 公司 :|招标 代理 :).*(联系人).*(\|\|)')
  1172. # # reg1 = re.compile(u'(代理 机构 联系 方式 :|招标 代理 单位 :|代理 机构 名称 :|采购 代理 机构 信息'
  1173. # # u'|交易 代理 机构).*(\|\|)')
  1174. # reg2 = re.compile(u'招标 人|质疑|采购|报名|监督|发布|出售|技术|中标|项目 联系人|项目 负责人'
  1175. # u'|招标 联系人|招标 单位 联系人')
  1176. # reg3 = re.compile(u'地址。*地址')
  1177. # Tenderee_person
  1178. # reg1 = re.compile(u'(采购 人 :|招标 人 :|采购 单位 :|采购 单位 名称 :|采购 人 名称 :|采购 单位 联系 方式 :'
  1179. # u').*(联系人|联系 方式 :).*(\|\|)')
  1180. # reg1 = re.compile(u'(招标 联系人 :|招标 人 联系 方式|招标 联系人 及 地址 :|招标 人 联系人 :'
  1181. # u'|招标 单位 :|采购 人 信息|采购 人 名称 :|采购 单位 联系人 :|采购 人 联系人).*(\|\|)')
  1182. # reg1 = re.compile(u'(技术 部分|商务 部分).*(\|\|)')
  1183. # reg2 = re.compile(u'代理|质疑|供应商|法人|发布|监督|项目|技术|投诉|服务 中心|文件 编制|部门|组织')
  1184. # # 表格型的数据被压成一行,前后分别为招标联系人、代理联系人
  1185. # reg3 = re.compile(u'(联系人 :).*(联系人 :)')
  1186. # 评审专家
  1187. # reg1 = re.compile(u'(评审 专家 :|评审 专家 名单|专家 名单 :|专家 :|评审 委员会 成员 名单 :'
  1188. # u'|评委 姓名 :|评审 委员会).*(\|\|)')
  1189. # reg2 = re.compile(u'招标 人|质疑')
  1190. # person_person
  1191. # reg1 = re.compile(u'(项目 联系人 :|监督 管理 部门|出让 人 :|监督 :|中标 单位 :|竞价 开启|质疑|商务 咨询 :|项目 名称 :'
  1192. # u'|招标 管理 办公室|负责人 姓名 :|技术 负责人|项目 负责人|法定 代表人|发布人 :|招标 人员 :'
  1193. # u'|项目 负责人 联系 电话 :|项目 经理 :|代理 人员 :|商务 联系人|法人|咨询 电话 :|投诉 电话 :'
  1194. # u'|受理人 :|收件人 :|联络人 :|项目 咨询 联系人 :|项目 报名 联系 :|收货人 :|交易 单位 :'
  1195. # u'|质疑 答复 联系人 :|现场 联系人|项目 总监 :|质疑 联系人|联系 确认|标的 查看|接收人|联系人 :'
  1196. # u'|技术 支持|项目 总工|审核 人|监理 工程师 :).*(\|\|)')
  1197. # reg1 = re.compile(u'(项目 联系人 :|项目 单位 :|监督 管理 部门 名称 :|质疑 答复 联系人 :|成交 单位 :'
  1198. # u'|项目 负责人|供应商 地址 :|机构 联络人 :|技术 负责人 :|采购 管理 机构 :'
  1199. # u'|项目 联系人).*(\|\|)')
  1200. # reg1 = re.compile(u'(项目 单位 :|招标 服务 中心|采购 管理 办公室|项目 名称 :|采购 管理 机构 :'
  1201. # u'|发包 单位 :).*(联系人).*(\|\|)')
  1202. # reg1 = re.compile(u'(招标 组织 单位 :|审核 人 :|采管 办 联系 方式 :|采购 项目 联系 方式'
  1203. # u'|询价 书 名称|疑问|资格 审查|提出|采购 文件|公众 号|项目 联系人 :|技术 负责人'
  1204. # u'|发布 人 :|联系 确认).*(\|\|)')
  1205. # reg1 = re.compile(u'(法定 代表人 :|委托 代理人 :).*(\|\|)')
  1206. # reg1 = re.compile(u'(备注 :).*(\|\|)')
  1207. # reg2 = re.compile(u'磋商|编 制|公证|审核|谈判|评委|代理 机构 名称|代理 机构'
  1208. # u'|采购 人 :|招标 人|采购 单位|采购 单位 名称 :|采购 人 名称 :|采购 单位 联系 方式 :|招标 单位 :'
  1209. # u'|采购 人|招标 代理|从业|施工员|资料员|公证员|受让方|采购员|招标 单位|招标 联系人|釆购 单位'
  1210. # u'|姓名|习近平|开户 名称')
  1211. # reg1 = re.compile(u'(联系人 :).*(联系人 :).*(\|\|)')
  1212. reg1 = re.compile(u'(联系人 :).*(\|\|).*(联系人 :)')
  1213. reg2 = re.compile(u'代理|公司|地址|采购|电话|商务|招标|技术|项目|联系 方式|监督')
  1214. # person
  1215. # reg1 = re.compile(u'(备注 :|受让方|受让 单位 :|从业 人员 :|姓名 :|施工员|资料员|公证员 :|采购员 :|开户 名称).*(\|\|)')
  1216. # reg1 = re.compile(u'(安全员|施工员|材料员|质量员|质量检查员|质检员|造价员|资料员).*(\|\|)')
  1217. # reg2 = re.compile(u'招标|项目|负责')
  1218. ## 从其他类筛选出该类
  1219. # 查看筛选出的数据
  1220. # df = df[df["Label"] == 2]
  1221. # wash_list = []
  1222. # for index, row in df.iterrows():
  1223. # match = reg1.search(row["Sentence"])
  1224. # if match:
  1225. # match2 = reg2.search(match.group())
  1226. # # if not match2:
  1227. # if not match2:
  1228. # wash_list.append(row)
  1229. # df1 = pd.DataFrame(wash_list)
  1230. # df1.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_all_temp.csv")
  1231. # 改标签
  1232. for index, row in df.iterrows():
  1233. if row["Label"] == 2:
  1234. match = reg1.search(row["Sentence"])
  1235. if match:
  1236. match2 = reg2.search(match.group())
  1237. if not match2:
  1238. # row["Label"] = 3
  1239. df["Label"].iloc[index] = 1
  1240. df = df[["Word", "Label", "Sentence", "BIO"]]
  1241. df.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_all_re_washed2.csv")
  1242. ## 从该类筛选出不属于该类的
  1243. # 查看筛选出的数据
  1244. # df = df[df["Label"] == 1]
  1245. # wash_list = []
  1246. # for index, row in df.iterrows():
  1247. # match = reg1.search(row["Sentence"])
  1248. # if match:
  1249. # match2 = reg2.search(match.group())
  1250. # # if not match2:
  1251. # if not match2:
  1252. # # match3 = reg3.search(match.group())
  1253. # match3 = reg3.search(row["Sentence"])
  1254. # if not match3:
  1255. # wash_list.append(row)
  1256. # df1 = pd.DataFrame(wash_list)
  1257. # df1.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_all_temp.csv")
  1258. # 改标签
  1259. # for index, row in df.iterrows():
  1260. # if row["Label"] == 1:
  1261. # match = reg1.search(row["Sentence"])
  1262. # if match:
  1263. # match2 = reg2.search(match.group())
  1264. # if not match2:
  1265. # # match3 = reg3.search(match.group())
  1266. # match3 = reg3.search(row["Sentence"])
  1267. # if not match3:
  1268. # df["Label"].iloc[index] = 3
  1269. #
  1270. # df = df[["Word", "Label", "Sentence", "BIO"]]
  1271. # df.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_all_re_washed2.csv")
  1272. def relabel():
  1273. df = pd.read_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_Notest.csv")
  1274. # df = pd.read_csv("C:\\Users\\admin\\Desktop\\test2000.csv")
  1275. df1 = df
  1276. for index, row in df.iterrows():
  1277. if row["Label"] == 1:
  1278. df1["Label"][index] == 3
  1279. if row["Label"] == 2:
  1280. df1["Label"][index] == 1
  1281. if row["Label"] == 3:
  1282. df1["Label"][index] == 2
  1283. df2 = df1
  1284. for index, row in df1.iterrows():
  1285. if row["Label"] == 1:
  1286. ss = row["Sentence"].split("||")
  1287. forward = ss[0][-30:]
  1288. if "。 联系人" in forward or ", 联系人" in forward \
  1289. or ", 联系 方式" in forward or "。 联系 方式" in forward:
  1290. df2["Label"][index] = 3
  1291. if row["Label"] == 2:
  1292. ss = row["Sentence"].split("||")
  1293. forward = ss[0][-30:]
  1294. if "。 联系人" in forward or ", 联系人" in forward \
  1295. or ", 联系 方式" in forward or "。 联系 方式" in forward:
  1296. df2["Label"][index] = 3
  1297. df2 = df2[["Word", "Label", "Sentence", "BIO"]]
  1298. df2.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_Notest_new.csv")
  1299. # df2.to_csv("C:\\Users\\admin\\Desktop\\test2000_new.csv")
  1300. def relabel2():
  1301. df = pd.read_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_Notest_new.csv")
  1302. # df = pd.read_csv("C:\\Users\\admin\\Desktop\\test2000.csv")
  1303. df1 = df
  1304. for index, row in df1.iterrows():
  1305. if row["Label"] == 3:
  1306. ss = row["Sentence"].split("||")
  1307. forward = ss[0][-20:]
  1308. if "采购 " in forward and "窗口" not in forward and "公司" not in forward \
  1309. and "窗口" not in forward and "文件" not in forward \
  1310. and "质疑" not in forward and "中心" not in forward\
  1311. and "处" not in forward:
  1312. # if "招标 " in forward:
  1313. print(forward)
  1314. df1["Label"][index] = 1
  1315. df1 = df1[["Word", "Label", "Sentence", "BIO"]]
  1316. # print(df1)
  1317. # df1.to_csv("C:\\Users\\admin\\Desktop\\Person_Sentence_Notest_new.csv")
  1318. if __name__ == "__main__":
  1319. # Postgre2Data()
  1320. # data2BIOData()
  1321. # BIOData2DataFrame()
  1322. # start_time = time.time()
  1323. # print("开始:", start_time)
  1324. # PersonBIOData2BIO_Sentence()
  1325. # end_time = time.time()
  1326. # print("耗时:", end_time-start_time)
  1327. # start_time = time.time()
  1328. # print("开始:", start_time)
  1329. # BIOData2PersonData()
  1330. # end_time = time.time()
  1331. # print("耗时:", end_time-start_time)
  1332. # print(datetime.strptime("2018-02-02", '%Y-%m-%d'))
  1333. # print(len("二、公示期:2020年05月25日至2020年06月03日,三、该宗地双方已签订成交确认书,在30日内签订出让合同,"
  1334. # "相关事宜在合同中约定,四、联系方式,联系单位:惠州市公共资源交易中心仲恺分中心,单位地址:惠州仲恺高新区和畅五"
  1335. # "路人才服务大厦10楼,邮政编码:联系电话:0752-3278419,联系人:"))
  1336. # duplicateData(3, 0.5)
  1337. # resetAndShuffleData()
  1338. # start_time = time.time()
  1339. # BIOData2TXT()
  1340. # end_time = time.time()
  1341. # print("耗时:", end_time-start_time)
  1342. # TXT2BIOData()
  1343. # BIOData2Bidway()
  1344. # BIOData2ServiceTime()
  1345. # Text2Csv()
  1346. # Csv2ServiceTimeText()
  1347. # Csv2BidwayText()
  1348. # re_serviceTime()
  1349. # re_bidway()
  1350. # Postgre2Data()
  1351. # getTestData()
  1352. # washData()
  1353. # re_serviceTime2()
  1354. # re_Accuracy("serviceTime_text1")
  1355. # test_re()
  1356. # re_serviceTime3()
  1357. # relabel()
  1358. relabel2()