Browse Source

Merge remote-tracking branch 'origin/master' into master

Jiasheng 4 năm trước cách đây
mục cha
commit
e40e59019b

BIN
BiddingKG/dl/interface/codename_savedmodel_tf/saved_model.pb


BIN
BiddingKG/dl/interface/codename_savedmodel_tf/variables/variables.data-00000-of-00001


BIN
BiddingKG/dl/interface/codename_savedmodel_tf/variables/variables.index


BIN
BiddingKG/dl/interface/codename_w2v_matrix.pk


+ 25 - 22
BiddingKG/dl/interface/predictor.py

@@ -218,7 +218,7 @@ class CodeNamePredict():
 
         result = []
         index_unk = self.word2index.get("<unk>")
-        index_pad = self.word2index.get("<pad>")
+        # index_pad = self.word2index.get("<pad>")
         if list_entitys is None:
             list_entitys = [[] for _ in range(len(list_sentences))]
         for list_sentence,list_entity in zip(list_sentences,list_entitys):
@@ -249,8 +249,8 @@ class CodeNamePredict():
                 _LEN = MAX_AREA//MAX_LEN
                 #预测
 
-                # x = [[self.word2index.get(word,index_pad)for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
-                x = [[getIndexOfWord(word) for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
+                x = [[self.word2index.get(word,index_unk)for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
+                # x = [[getIndexOfWord(word) for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
                 x_len = [len(_x) if len(_x) < MAX_LEN else MAX_LEN for _x in x]
                 x = pad_sequences(x,maxlen=MAX_LEN,padding="post",truncating="post")
 
@@ -1239,8 +1239,12 @@ def getBiLSTMCRFModel(MAX_LEN,vocab,EMBED_DIM,BiRNN_UNITS,chunk_tags,weights):
 
 from tensorflow.contrib.crf import crf_log_likelihood
 from tensorflow.contrib.layers.python.layers import initializers
-def BiLSTM_CRF_tfmodel(sess,weights):
-    BiRNN_Units = 200
+def BiLSTM_CRF_tfmodel(sess,embedding_weights):
+    '''
+    :param embedding_weights: 预训练的字向量矩阵
+
+    '''
+    BiRNN_Unit = 100
     chunk_tags = {
         'O': 0,
         'PN_B': 1,
@@ -1252,31 +1256,32 @@ def BiLSTM_CRF_tfmodel(sess,weights):
     }
 
     def embedding_layer(input,keepprob):
-        embedding = tf.get_variable("embedding",initializer=np.array(weights,dtype=np.float32) if weights is not None else None,dtype=tf.float32)
+        # 加载预训练的字向量矩阵
+        embedding = tf.get_variable(name="embedding",initializer=np.array(embedding_weights, dtype=np.float32),dtype=tf.float32)
         embedding = tf.nn.embedding_lookup(params=embedding,ids=input)
-        embedding = tf.nn.dropout(embedding,keepprob)
-        return embedding
+        embedding_drop = tf.nn.dropout(embedding,keepprob)
+        return embedding_drop
 
     def BiLSTM_Layer(input,length):
         with tf.variable_scope("BiLSTM"):
-            forward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Units//2,state_is_tuple=True)
-            backward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Units//2,state_is_tuple=True)
+            forward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Unit,state_is_tuple=True)
+            backward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Unit,state_is_tuple=True)
         output, _ = tf.nn.bidirectional_dynamic_rnn(forward_cell,backward_cell,input,dtype=tf.float32,sequence_length=length)
         output = tf.concat(output,2)
         return output
 
-    def CRF_layer(input,num_tags,BiRNN_Units,time_step,keepprob):
+    def CRF_layer(input,num_tags,BiRNN_Unit,time_step,keepprob):
         with tf.variable_scope("CRF"):
             with tf.variable_scope("hidden"):
-                w_hidden = tf.get_variable(name='w_hidden',shape=(BiRNN_Units,BiRNN_Units//2),dtype=tf.float32,
+                w_hidden = tf.get_variable(name='w_hidden',shape=(BiRNN_Unit*2,BiRNN_Unit),dtype=tf.float32,
                                            initializer=initializers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(0.001))
-                b_hidden = tf.get_variable(name='b_hidden',shape=(BiRNN_Units//2),dtype=tf.float32,initializer=tf.zeros_initializer())
+                b_hidden = tf.get_variable(name='b_hidden',shape=(BiRNN_Unit),dtype=tf.float32,initializer=tf.zeros_initializer())
                 # print(input)
-                input_reshape = tf.reshape(input,shape=(-1,BiRNN_Units))
+                input_reshape = tf.reshape(input,shape=(-1,BiRNN_Unit*2))
                 hidden = tf.tanh(tf.nn.xw_plus_b(input_reshape,w_hidden,b_hidden))
                 hidden = tf.nn.dropout(hidden,keepprob)
             with tf.variable_scope("output"):
-                w_output = tf.get_variable(name='w_output',shape=(BiRNN_Units//2,num_tags),dtype=tf.float32,initializer=initializers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(0.001))
+                w_output = tf.get_variable(name='w_output',shape=(BiRNN_Unit,num_tags),dtype=tf.float32,initializer=initializers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(0.001))
                 b_output = tf.get_variable(name='b_output',shape=(num_tags),dtype=tf.float32,initializer=tf.zeros_initializer())
                 pred = tf.nn.xw_plus_b(hidden,w_output,b_output)
                 logits_ = tf.reshape(pred,shape=(-1,time_step,num_tags),name='logits')
@@ -1299,7 +1304,7 @@ def BiLSTM_CRF_tfmodel(sess,weights):
         batch_size = _shape[0]
         step_size = _shape[-1]
         bilstm = BiLSTM_Layer(_embedding,length)
-        _logits = CRF_layer(bilstm,num_tags=len(chunk_tags),BiRNN_Units=BiRNN_Units,time_step=step_size,keepprob=keepprob)
+        _logits = CRF_layer(bilstm,num_tags=len(chunk_tags),BiRNN_Unit=BiRNN_Unit,time_step=step_size,keepprob=keepprob)
         crf_loss,trans = layer_loss(_logits,true_target=target,num_tags=len(chunk_tags),length=length)
         global_step = tf.Variable(0,trainable=False)
         with tf.variable_scope("optimizer"):
@@ -1403,13 +1408,12 @@ def initialize_uninitialized(sess):
       
 def save_codename_model():
     # filepath = "../projectCode/models/model_project_"+str(60)+"_"+str(200)+".hdf5"
-    filepath = "models_tf/32-L0.565985563055-F0.8640033553528363-P0.85770792130738-R0.8703918876095912/model.ckpt"
+    filepath = "../projectCode/models_tf/59-L0.471516189943-F0.8802154826344823-P0.8789179683459191-R0.8815168335321886/model.ckpt"
     vocabpath = "../projectCode/models/vocab.pk"
     classlabelspath = "../projectCode/models/classlabels.pk"
     # vocab = load(vocabpath)
     # class_labels = load(classlabelspath)
-    vocab_model = getModel_word()
-    vocab, w2v_matrix = getVocabAndMatrix(vocab_model, Embedding_size=60)
+    w2v_matrix = load('codename_w2v_matrix.pk')
     graph = tf.get_default_graph()
     with graph.as_default() as g:
         ''''''
@@ -1426,7 +1430,7 @@ def save_codename_model():
         saver = tf.train.Saver()
         saver.restore(sess, filepath)
 
-        print("logits",sess.run(logits))
+        # print("logits",sess.run(logits))
         
         # print("#",sess.run("time_distributed_1/kernel:0"))
 
@@ -1558,7 +1562,6 @@ def save_timesplit_model():
 if __name__=="__main__":
     #save_role_model()
     # save_codename_model()
-    save_codename_model()
     #save_money_model()
     #save_person_model()
     #save_form_model()
@@ -1578,4 +1581,4 @@ if __name__=="__main__":
         _data = np.transpose(x,[1,0,2,3])
         y = sess.run(outputs,feed_dict={input0:_data[0],input1:_data[1]})
         print(np.argmax(y,-1))
-    '''
+    '''

BIN
BiddingKG/dl/projectCode/data/old_datas.pk


BIN
BiddingKG/dl/projectCode/data/old_datas2.pk


+ 61 - 0
BiddingKG/dl/projectCode/models_tf/59-L0.471516189943-F0.8802154826344823-P0.8789179683459191-R0.8815168335321886/checkpoint

@@ -0,0 +1,61 @@
+model_checkpoint_path: "model.ckpt"
+all_model_checkpoint_paths: "../0-L10.006233995-F0.03876257920238539-P0.09609856262833676-R0.02427763656170566/model.ckpt"
+all_model_checkpoint_paths: "../1-L6.84851114722-F0.5330160618679358-P0.5871193210184723-R0.488042745240442/model.ckpt"
+all_model_checkpoint_paths: "../2-L4.77856940716-F0.6571756216261573-P0.7130371449380918-R0.6094309280489703/model.ckpt"
+all_model_checkpoint_paths: "../3-L3.93697547972-F0.7137330238266764-P0.7348093616086144-R0.6938320278051564/model.ckpt"
+all_model_checkpoint_paths: "../4-L3.39920926508-F0.7495491615751083-P0.77892145893936-R0.7223115630025418/model.ckpt"
+all_model_checkpoint_paths: "../5-L2.97453830155-F0.7764748706481892-P0.7783569641367807-R0.7746018571354464/model.ckpt"
+all_model_checkpoint_paths: "../6-L2.57735574128-F0.7894283701635836-P0.7940898593323535-R0.7848212896197541/model.ckpt"
+all_model_checkpoint_paths: "../7-L2.27607896494-F0.8001250032552932-P0.8033678485514067-R0.7969082326088084/model.ckpt"
+all_model_checkpoint_paths: "../8-L1.99757229899-F0.8073182878561507-P0.8159372681996397-R0.798879493697152/model.ckpt"
+all_model_checkpoint_paths: "../9-L1.84951582204-F0.8122270742358079-P0.8091536243822076-R0.8153239611972818/model.ckpt"
+all_model_checkpoint_paths: "../10-L1.65668609769-F0.8183186752238333-P0.8335306144409771-R0.8036520205426155/model.ckpt"
+all_model_checkpoint_paths: "../11-L1.56750526948-F0.8164458001179095-P0.8069517632752331-R0.8261658971831717/model.ckpt"
+all_model_checkpoint_paths: "../12-L1.36027826321-F0.8230691504521078-P0.8221532091097309-R0.823987134927634/model.ckpt"
+all_model_checkpoint_paths: "../13-L1.28698327816-F0.8240014402921737-P0.8171291573148337-R0.8309902993204337/model.ckpt"
+all_model_checkpoint_paths: "../14-L1.16556329351-F0.8312013130813122-P0.8349209672354234-R0.8275146547699331/model.ckpt"
+all_model_checkpoint_paths: "../15-L1.08147207643-F0.8329565827422247-P0.8344091618948465-R0.8315090522384189/model.ckpt"
+all_model_checkpoint_paths: "../16-L1.0146959307-F0.8337136929460581-P0.833583985894311-R0.8338434403693521/model.ckpt"
+all_model_checkpoint_paths: "../17-L0.961632904206-F0.8367626177875306-P0.8328707986432315-R0.8406909788867563/model.ckpt"
+all_model_checkpoint_paths: "../18-L0.913268781957-F0.8380274424842671-P0.8333418825339831-R0.8427659905586969/model.ckpt"
+all_model_checkpoint_paths: "../19-L0.850334277787-F0.8402594033722439-P0.840346598868884-R0.8401722259687711/model.ckpt"
+all_model_checkpoint_paths: "../20-L0.822678584247-F0.8435514694488921-P0.839907431216251-R0.8472272656533693/model.ckpt"
+all_model_checkpoint_paths: "../21-L0.790172839941-F0.843918918918919-P0.8455449669322501-R0.8422991129325103/model.ckpt"
+all_model_checkpoint_paths: "../22-L0.758756984737-F0.8459801147426079-P0.8467054666389524-R0.8452560045650257/model.ckpt"
+all_model_checkpoint_paths: "../23-L0.756163409967-F0.84577917849608-P0.8493408662900188-R0.8422472376407117/model.ckpt"
+all_model_checkpoint_paths: "../24-L0.708014662135-F0.8456414111304078-P0.845093772666045-R0.846189759817399/model.ckpt"
+all_model_checkpoint_paths: "../25-L0.686367843132-F0.8489429120565165-P0.8500910273081924-R0.847797893863153/model.ckpt"
+all_model_checkpoint_paths: "../26-L0.657114656661-F0.8522927233205673-P0.8503123870501368-R0.8542823053379676/model.ckpt"
+all_model_checkpoint_paths: "../27-L0.648682179654-F0.8494626481330635-P0.860702875399361-R0.8385122166312186/model.ckpt"
+all_model_checkpoint_paths: "../28-L0.611405575699-F0.852578826477344-P0.8579984239558708-R0.8472272656533693/model.ckpt"
+all_model_checkpoint_paths: "../29-L0.635048215166-F0.8497140436348231-P0.8677735114379969-R0.8323909321989936/model.ckpt"
+all_model_checkpoint_paths: "../30-L0.596159002403-F0.8514374066406352-P0.8603431839847474-R0.8427141152668983/model.ckpt"
+all_model_checkpoint_paths: "../31-L0.597045630448-F0.8536629236580414-P0.8457739307535641-R0.8617004720651553/model.ckpt"
+all_model_checkpoint_paths: "../32-L0.606067512029-F0.8501524390243902-P0.8330926654384305-R0.8679255070809774/model.ckpt"
+all_model_checkpoint_paths: "../33-L0.563953632895-F0.8556004694223497-P0.8602894902454373-R0.8509622866628624/model.ckpt"
+all_model_checkpoint_paths: "../34-L0.571696721763-F0.8542524096545498-P0.875-R0.8344659438709343/model.ckpt"
+all_model_checkpoint_paths: "../35-L0.545786692095-F0.8557780635678852-P0.866075223119422-R0.8457228821912123/model.ckpt"
+all_model_checkpoint_paths: "../36-L0.553951665368-F0.8603841691375531-P0.8552901373795366-R0.8655392436582455/model.ckpt"
+all_model_checkpoint_paths: "../37-L0.555397928401-F0.8595798452061287-P0.8726281468811802-R0.8469160139025782/model.ckpt"
+all_model_checkpoint_paths: "../38-L0.533997088596-F0.8607837055417701-P0.8578125804955953-R0.863775483737096/model.ckpt"
+all_model_checkpoint_paths: "../39-L0.541332862553-F0.8596840975310536-P0.8472970930525467-R0.8724386574674483/model.ckpt"
+all_model_checkpoint_paths: "../40-L0.531230105091-F0.8630179827990618-P0.8671764521028649-R0.8588992063080355/model.ckpt"
+all_model_checkpoint_paths: "../41-L0.535626546963-F0.860876013193894-P0.8488301734570391-R0.8732686621362246/model.ckpt"
+all_model_checkpoint_paths: "../42-L0.492028302599-F0.8663548715804876-P0.8727626868814488-R0.8600404627276028/model.ckpt"
+all_model_checkpoint_paths: "../43-L0.529730340127-F0.8607368394140057-P0.8486085904416213-R0.873216786844426/model.ckpt"
+all_model_checkpoint_paths: "../44-L0.516164395271-F0.86547728903927-P0.8554345072206739-R0.8757586761425533/model.ckpt"
+all_model_checkpoint_paths: "../45-L0.492590245116-F0.8684380406434786-P0.8645758354755784-R0.8723349068838512/model.ckpt"
+all_model_checkpoint_paths: "../46-L0.492299298722-F0.86872516685278-P0.8697867914716588-R0.8676661306219847/model.ckpt"
+all_model_checkpoint_paths: "../47-L0.479341025987-F0.8723470499714597-P0.8726187386452116-R0.8720755304248586/model.ckpt"
+all_model_checkpoint_paths: "../48-L0.505483413852-F0.8712250712250713-P0.8699632752288833-R0.8724905327592468/model.ckpt"
+all_model_checkpoint_paths: "../49-L0.499564943958-F0.8717264044366847-P0.8629962889532815-R0.8806349535716138/model.ckpt"
+all_model_checkpoint_paths: "../50-L0.473125779116-F0.8697128824098023-P0.8760526315789474-R0.863464231986305/model.ckpt"
+all_model_checkpoint_paths: "../51-L0.492250957025-F0.869896849164299-P0.8585792239288601-R0.8815168335321886/model.ckpt"
+all_model_checkpoint_paths: "../52-L0.476339325369-F0.8720430384308476-P0.8654710811363172-R0.8787155677750688/model.ckpt"
+all_model_checkpoint_paths: "../53-L0.526518405863-F0.8675383158918201-P0.8559527415934566-R0.879441821860248/model.ckpt"
+all_model_checkpoint_paths: "../54-L0.488412128798-F0.8744224465037042-P0.8702219482120839-R0.8786636924832703/model.ckpt"
+all_model_checkpoint_paths: "../55-L0.474271196838-F0.8756879667192062-P0.8723743822075782-R0.8790268195258598/model.ckpt"
+all_model_checkpoint_paths: "../56-L0.473662256611-F0.8749903043152262-P0.8722164948453608-R0.8777818125226955/model.ckpt"
+all_model_checkpoint_paths: "../57-L0.491734801614-F0.8746457828842289-P0.8687375262269075-R0.8806349535716138/model.ckpt"
+all_model_checkpoint_paths: "../58-L0.471698897114-F0.8778131690251162-P0.8836268068331143-R0.8720755304248586/model.ckpt"
+all_model_checkpoint_paths: "model.ckpt"

BIN
BiddingKG/dl/projectCode/models_tf/59-L0.471516189943-F0.8802154826344823-P0.8789179683459191-R0.8815168335321886/model.ckpt.data-00000-of-00001


BIN
BiddingKG/dl/projectCode/models_tf/59-L0.471516189943-F0.8802154826344823-P0.8789179683459191-R0.8815168335321886/model.ckpt.index


BIN
BiddingKG/dl/projectCode/models_tf/59-L0.471516189943-F0.8802154826344823-P0.8789179683459191-R0.8815168335321886/model.ckpt.meta


+ 796 - 0
BiddingKG/dl/projectCode/projectCodeAndName_tf.py

@@ -0,0 +1,796 @@
+import tensorflow as tf
+from tensorflow.contrib.crf import crf_log_likelihood
+from tensorflow.contrib.layers.python.layers import initializers
+import numpy as np
+import pandas as pd
+import os
+import psycopg2
+import re
+import pickle
+from BiddingKG.dl.common.Utils import *
+from keras.preprocessing.sequence import pad_sequences
+
+def get_data():
+    # with open("viewTrain.txt",'r',encoding='utf-8') as f1,open("viewTest.txt",'r',encoding='utf-8') as f2:
+    #     rows1 = f1.readlines()
+    #     rows2 = f2.readlines()
+    #     rows = rows1 + rows2
+    #     sentence = []
+    #     sentence_label = []
+    #     sentences_and_labels = []
+    #     for row in rows:
+    #         if row[1]!='#':
+    #             sentence.append(row[0])
+    #             sentence_label.append(row[2:-1])
+    #         else:
+    #             sentences_and_labels.append((sentence,sentence_label))
+    #             sentence = []
+    #             sentence_label = []
+    #     print(sentences_and_labels)
+    #     save(sentences_and_labels,"data/old_datas.pk")
+    conn = psycopg2.connect(dbname="iepy",user="postgres",password="postgres",host="192.168.2.101")
+    user_list = [
+        ["test1","2020-08-01","2020-11-25"],
+        ["test11","2020-08-01","2020-11-25"],
+        ["test12","2020-08-01","2020-11-25"],
+        ["test17","2020-08-01","2020-10-31"],
+        ["test19","2020-08-01","2020-11-25"],
+        ["test2","2020-08-01","2020-11-25"],
+        ["test3","2020-08-01","2020-11-25"],
+        ["test7","2020-08-01","2020-11-25"],
+        ["test8","2020-08-01","2020-11-25"],
+        ["test9","2020-08-01","2020-11-25"],
+    ]
+    db_data = []
+    for u in user_list:
+        cur1 = conn.cursor()
+        sql = "SELECT B.document_id,A.text,A.sentences,B.value " \
+              "FROM corpus_iedocument A,brat_bratannotation B " \
+              "WHERE A.human_identifier = B.document_id " \
+              "AND A.edituser = '%s' " \
+              "AND A.edittime >= '%s':: date " \
+              "AND A.edittime <= '%s':: date "
+              # "ORDER  BY B.document_id"
+        cur1.execute(sql % (u[0], u[1], u[2]))
+        db_data.extend(cur1.fetchall())
+        cur1.close()
+    # print(len(db_data))
+    # print(db_data[0])
+    columns = ['document_id','text', 'sentences', 'value']
+    df = pd.DataFrame(db_data, columns=columns)
+    df = df[df['value'].str.contains('^T')]
+    # df = df[df['value'].str.contains('code|name|org|company')]
+    df = df[df['value'].str.contains('code|name')]
+    df = df.reset_index(drop=True)
+    value_split = df['value'].str.split(expand=True)
+    value_split.columns = ['_', 'entity_type', 'begin', 'end', 'entity_text']
+    value_split = value_split.drop('_', axis=1)
+    df = pd.concat([df, value_split], axis=1)
+    df = df.drop('value', axis=1)
+    df['begin'] = [int(_) for _ in df['begin']]
+    df['end'] = [int(_) for _ in df['end']]
+    code_left_list = []
+    for begin,text,entity_type in zip(df['begin'],df['text'],df['entity_type']):
+        code_left = ''
+        if entity_type == 'code':
+            code_left = text[max(0,begin-8):begin]
+        code_left_list.append(code_left)
+    df['code_left'] = code_left_list
+    df.to_excel("C:\\Users\\admin\\Desktop\\项目编号和名称\\Code&Name_dbData.xlsx")
+    conn.close()
+
+def data_process():
+    data = pd.read_excel("C:\\Users\\admin\\Desktop\\项目编号和名称\\Code&Name_dbData.xlsx",index_col=0)
+    data['sentences'] = [sentences[1:-1].split(',') for sentences in data['sentences']]
+    data['sentences'] = [[int(s) for s in sentences] for sentences in data['sentences']]
+    memory_set = set()
+    id_list = []
+    text_list = []
+    text_tagLabels = dict()
+    for _id, _text, _sentences in zip(data['document_id'], data['text'], data['sentences']):
+        if _id not in memory_set:
+            memory_set.add(_id)
+            text_list.append(_text)
+            id_list.append(_id)
+            text_tagLabels[_id] = [[],[]]
+    re_drop = re.compile("((?:公开)?招标?|中标(?:结果)?|结果|公[告示]?|招标公告?|中标公[告示]?|候选人公[告示]|终止|"
+                         "[流废]标|资格预审|预审|成交(?:结果)?|交易|交易信息|入围|合同|通知书)$")
+    re_errorCode = re.compile("账号|身份证|机构编号|代理机构|品目|单位编[号码]|索引号|标准[^项目]*$|资产编号|型号|序列号"
+                              "|宗地编号|地块编号|监测编号|不动产证")
+    # |备案[^,.;,。;]*[号码]
+    for id, text, sentences, entity_type, begin, end, entity_text, code_left in zip(data['document_id'], data['text'],
+                                                                             data['sentences'], data['entity_type'],
+                                                                             data['begin'], data['end'],
+                                                                             data['entity_text'], data['code_left']):
+        if entity_type == 'name':
+            if re_drop.search(entity_text):
+                name_2 = re_drop.sub('', re_drop.sub('', entity_text))
+                entity_text = name_2
+            text_tagLabels[id][0].append(entity_text)
+        if entity_type == 'code':
+            if not re_errorCode.search(str(code_left)):
+                text_tagLabels[id][1].append(entity_text)
+    train_data = []
+    max_len = 400
+    def hasNotBeenLabeled(items,code_begin,code):
+        for i in range(code_begin,code_begin+len(code)):
+            if items[i]!="O":
+                return False
+        return True
+    count = 0
+    for id,text in zip(id_list,text_list):
+        count += 1
+        print(count)
+        names = text_tagLabels[id][0]
+        names = list(set(names))
+        names.sort(key=lambda x: len(x), reverse=True)
+        codes = text_tagLabels[id][1]
+        codes = list(set(codes))
+        codes.sort(key=lambda x: len(x), reverse=True)
+        sentences = text.split('。')
+        for sentence in sentences:
+            l = len(sentence)
+            if l==0:
+                continue
+            elif l > max_len:
+                l = max_len
+                sentence = sentence[:400]
+            sentence_label = ['O']*l
+            code_find_flag = False
+            name_find_flag = False
+            if names:
+                for name in names:
+                    name_begins = findAllIndex(name,sentence)
+                    for name_begin in name_begins:
+                        if hasNotBeenLabeled(sentence_label,name_begin,name):
+                            for j in range(name_begin,name_begin+len(name)):
+                                if j==name_begin:
+                                    sentence_label[j] = "PN_B"
+                                elif j==name_begin+len(name)-1:
+                                    sentence_label[j] = "PN_E"
+                                else:
+                                    sentence_label[j] = "PN_M"
+                        name_find_flag = True
+            if codes:
+                for code in codes:
+                    code_begins = findAllIndex(code,sentence)
+                    for code_begin in code_begins:
+                        if hasNotBeenLabeled(sentence_label,code_begin,code):
+                            for j in range(code_begin,code_begin+len(code)):
+                                if j==code_begin:
+                                    sentence_label[j] = "PC_B"
+                                elif j==code_begin+len(code)-1:
+                                    sentence_label[j] = "PC_E"
+                                else:
+                                    sentence_label[j] = "PC_M"
+                        code_find_flag = True
+            if code_find_flag or name_find_flag:
+                train_data.append([sentence,sentence_label])
+            else:
+                if np.random.random() <= 0.75:
+                    train_data.append([sentence,sentence_label])
+    print(len(train_data))
+    save(train_data,'train_data_new.pk')
+
+def add_data_process():
+    def hasNotBeenLabeled(items, code_begin, code):
+        for i in range(code_begin, code_begin + len(code)):
+            if items[i] != "O":
+                return False
+        return True
+    train_data = []
+    max_len = 400
+    data_path = "C:\\Users\\admin\\Desktop\\项目编号和名称\\补充数据\\data_"
+    data_names = ["合同编号","出让公告","询价编号","询价单编号","出让成交公示",
+                  "的通知","公告编号","交易编号","询价单号","房产英文类项目名称",
+                  "挂牌编号","申购单号","订单编号","询价书编号"]
+    for data_name in data_names:
+        data = pd.read_csv(data_path+data_name+"_process.csv",index_col=0,encoding='utf-8')
+        count = 0
+        for text,_name,_code in zip(data['text'],data['pj_name'],data['pj_code']):
+            count += 1
+            print(count)
+            names = str(_name).split('+')
+            names.sort(key=lambda x: len(x), reverse=True)
+            codes = str(_code).split('+')
+            codes.sort(key=lambda x: len(x), reverse=True)
+            sentences = text.split('。')
+            for sentence in sentences:
+                l = len(sentence)
+                if l == 0:
+                    continue
+                elif l > max_len:
+                    l = max_len
+                    sentence = sentence[:400]
+                sentence_label = ['O'] * l
+                if names:
+                    for name in names:
+                        name_begins = findAllIndex(name, sentence)
+                        for name_begin in name_begins:
+                            if hasNotBeenLabeled(sentence_label, name_begin, name):
+                                for j in range(name_begin, name_begin + len(name)):
+                                    if j == name_begin:
+                                        sentence_label[j] = "PN_B"
+                                    elif j == name_begin + len(name) - 1:
+                                        sentence_label[j] = "PN_E"
+                                    else:
+                                        sentence_label[j] = "PN_M"
+                if codes:
+                    for code in codes:
+                        code_begins = findAllIndex(code, sentence)
+                        for code_begin in code_begins:
+                            if hasNotBeenLabeled(sentence_label, code_begin, code):
+                                for j in range(code_begin, code_begin + len(code)):
+                                    if j == code_begin:
+                                        sentence_label[j] = "PC_B"
+                                    elif j == code_begin + len(code) - 1:
+                                        sentence_label[j] = "PC_E"
+                                    else:
+                                        sentence_label[j] = "PC_M"
+                train_data.append([sentence, sentence_label])
+    d = load('train_data_new.pk')
+    print(len(d))
+    train_data = d + train_data
+    print(len(train_data))
+    print('ok')
+    save(train_data, 'train_data_new2.pk')
+
+def train2():
+    chunk_tags = {
+        'O':0,
+        'PN_B':1,
+        'PN_M':2,
+        'PN_E':3,
+        'PC_B':4,
+        'PC_M':5,
+        'PC_E':6,
+    }
+    # 获取预训练的字向量矩阵
+    w2v_matrix = load('w2v_matrix.pk')
+    # print(w2v_matrix[:3])
+    vocab = load('codename_vocab.pk')
+    word2index = dict((w, i) for i, w in enumerate(np.array(vocab)))
+    print(vocab[:2])
+    MAXLEN = 400
+
+    data_x = []
+    data_y = []
+    data1 = load('train_data_new2.pk')
+    for _data in data1:
+        _x = list(_data[0])
+        _x = [word2index.get(_,word2index.get('<unk>')) for _ in _x]
+        _y = _data[1]
+        data_x.append(_x)
+        data_y.append(_y)
+    # 旧的标注数据加入
+    old_datas = load("data/old_datas2.pk")
+    for old_data in old_datas:
+        data_x.append([word2index.get(word,word2index.get('<unk>')) for word in old_data[0]])
+        data_y.append(old_data[1])
+    print("数据量:",len(data_x))
+    data_x = np.array([np.array(x) for x in data_x])
+    x_len = [MAXLEN if len(x) > MAXLEN else len(x) for x in data_x]
+    data_y = np.array([np.array([chunk_tags[_] for _ in y]) for y in data_y])
+    data_x = pad_sequences(data_x, maxlen=MAXLEN, padding="post", truncating="post")
+    data_y = pad_sequences(data_y, maxlen=MAXLEN, padding="post", truncating="post")
+    indices = np.random.permutation(data_x.shape[0])
+    count = len(data_x)
+    test_count = int(0.2 * count)
+    test_idx, train_idx = indices[:test_count], indices[test_count:]
+    train_x, test_x = data_x[train_idx, :], data_x[test_idx, :]
+    train_y, test_y = data_y[train_idx, :], data_y[test_idx, :]
+    train_x_len = np.array([x_len[idx] for idx in train_idx])
+    test_x_len = np.array([x_len[idx] for idx in test_idx])
+    print("训练数据量:",len(train_x))
+    print("训练数据量:",len(test_x))
+    # save([test_x,test_y,test_x_len],'my_test_data.pk')
+    with tf.Session(graph=tf.Graph()) as sess:
+        char_input,logits,target,keepprob,length,crf_loss,trans,train_op = BiLSTM_CRF_tfmodel(sess,embedding_weights=w2v_matrix)
+        sess.run(tf.global_variables_initializer())
+        epochs = 150
+        saver = tf.train.Saver(max_to_keep=max(epochs,10))
+        batch_size = 1024
+        _test_loss = 10000.
+        _test_f1 = 0.
+        for epoch in range(epochs):
+            batch_nums = 0
+            for x_batch,y_batch,x_len_batch in batch_iter(train_x,train_y,train_x_len,batch_size=batch_size):
+                train_loss,_ = sess.run([crf_loss,train_op],feed_dict={char_input:x_batch,target:y_batch,length:x_len_batch,keepprob:0.7})
+                batch_nums += 1
+                print("--epoch:" + str(epoch))
+                print("--"+str(batch_nums)+"batch_train--", "loss:", train_loss)
+            test_loss_sum = 0.
+            test_sum = 0
+            acc_sum = 0.
+            precision_1 = 0
+            precision_2 = 0
+            recall_1 = 0
+            recall_2 = 0
+            for test_xbatch,test_ybatch,test_xlen in batch_iter(test_x,test_y,test_x_len,batch_size=batch_size):
+                test_loss,_logits,_trans = sess.run([crf_loss,logits,trans],feed_dict={char_input:test_xbatch,target:test_ybatch,length:test_xlen,keepprob:1.0})
+                acc,_precision,_recall = getAcc(test_ybatch, _logits, _trans, test_xlen)
+                batch_len = len(test_xbatch)
+                test_sum += batch_len
+                acc_sum += acc*batch_len
+                precision_1 += _precision[0]
+                precision_2 += _precision[1]
+                recall_1 += _recall[0]
+                recall_2 += _recall[1]
+                test_loss_sum += test_loss*batch_len
+            print("==>epoch:" + str(epoch)+"have_done")
+            epoch_test_loss = test_loss_sum/test_sum
+            epoch_test_acc = acc_sum/test_sum
+            test_precision = precision_1/precision_2
+            test_recall = recall_1/recall_2
+            test_f1 = ner_f1_score(test_precision,test_recall)
+            print("--test --"," acc:",epoch_test_acc,'test_loss:',epoch_test_loss)
+            print('test_precision:',test_precision,'test_recall',test_recall,'test_f1',test_f1)
+            # if test_f1 > _test_f1:
+            #     _test_f1 = test_f1
+            print("Saving-"+str(epoch)+"-model,test_loss:"+str(epoch_test_loss),'test_f1',test_f1)
+            saver.save(sess,"models_tf/"+str(epoch)+"-L"+str(epoch_test_loss)+"-F"+str(test_f1)+"-P"+str(test_precision)+"-R"+str(test_recall)+"/model.ckpt")
+
+def BiLSTM_CRF_tfmodel(sess,embedding_weights):
+    '''
+    :param embedding_weights: 预训练的字向量矩阵
+
+    '''
+    BiRNN_Unit = 100
+    chunk_tags = {
+        'O': 0,
+        'PN_B': 1,
+        'PN_M': 2,
+        'PN_E': 3,
+        'PC_B': 4,
+        'PC_M': 5,
+        'PC_E': 6,
+    }
+
+    def embedding_layer(input,keepprob):
+        # 加载预训练的字向量矩阵
+        embedding = tf.get_variable(name="embedding",initializer=np.array(embedding_weights, dtype=np.float32),dtype=tf.float32)
+        embedding = tf.nn.embedding_lookup(params=embedding,ids=input)
+        embedding_drop = tf.nn.dropout(embedding,keepprob)
+        return embedding_drop
+
+    def BiLSTM_Layer(input,length):
+        with tf.variable_scope("BiLSTM"):
+            forward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Unit,state_is_tuple=True)
+            backward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Unit,state_is_tuple=True)
+        output, _ = tf.nn.bidirectional_dynamic_rnn(forward_cell,backward_cell,input,dtype=tf.float32,sequence_length=length)
+        output = tf.concat(output,2)
+        return output
+
+    def CRF_layer(input,num_tags,BiRNN_Unit,time_step,keepprob):
+        with tf.variable_scope("CRF"):
+            with tf.variable_scope("hidden"):
+                w_hidden = tf.get_variable(name='w_hidden',shape=(BiRNN_Unit*2,BiRNN_Unit),dtype=tf.float32,
+                                           initializer=initializers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(0.001))
+                b_hidden = tf.get_variable(name='b_hidden',shape=(BiRNN_Unit),dtype=tf.float32,initializer=tf.zeros_initializer())
+                # print(input)
+                input_reshape = tf.reshape(input,shape=(-1,BiRNN_Unit*2))
+                hidden = tf.tanh(tf.nn.xw_plus_b(input_reshape,w_hidden,b_hidden))
+                hidden = tf.nn.dropout(hidden,keepprob)
+            with tf.variable_scope("output"):
+                w_output = tf.get_variable(name='w_output',shape=(BiRNN_Unit,num_tags),dtype=tf.float32,initializer=initializers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(0.001))
+                b_output = tf.get_variable(name='b_output',shape=(num_tags),dtype=tf.float32,initializer=tf.zeros_initializer())
+                pred = tf.nn.xw_plus_b(hidden,w_output,b_output)
+                logits_ = tf.reshape(pred,shape=(-1,time_step,num_tags),name='logits')
+        return logits_
+
+    def layer_loss(input,true_target,num_tags,length):
+        with tf.variable_scope("crf_loss"):
+            trans = tf.get_variable(name='transitons',shape=(num_tags,num_tags),dtype=tf.float32,initializer=initializers.xavier_initializer())
+            log_likelihood,trans = crf_log_likelihood(inputs=input,tag_indices=true_target,transition_params=trans,sequence_lengths=length)
+            return tf.reduce_mean(-log_likelihood),trans
+
+    with sess.graph.as_default():
+        char_input = tf.placeholder(name='char_input',shape=(None,None),dtype=tf.int32)
+        target = tf.placeholder(name='target',shape=(None,None),dtype=tf.int32)
+        length = tf.placeholder(name='length',shape=(None,),dtype=tf.int32)
+        keepprob = tf.placeholder(name='keepprob',dtype=tf.float32)
+
+        _embedding = embedding_layer(char_input,keepprob)
+        _shape = tf.shape(char_input)
+        batch_size = _shape[0]
+        step_size = _shape[-1]
+        bilstm = BiLSTM_Layer(_embedding,length)
+        _logits = CRF_layer(bilstm,num_tags=len(chunk_tags),BiRNN_Unit=BiRNN_Unit,time_step=step_size,keepprob=keepprob)
+        crf_loss,trans = layer_loss(_logits,true_target=target,num_tags=len(chunk_tags),length=length)
+        global_step = tf.Variable(0,trainable=False)
+        with tf.variable_scope("optimizer"):
+            opt = tf.train.AdamOptimizer(0.002)
+            grads_vars = opt.compute_gradients(crf_loss)
+            capped_grads_vars = [[tf.clip_by_value(g,-5,5),v] for g,v in grads_vars]
+            train_op = opt.apply_gradients(capped_grads_vars,global_step)
+            return char_input,_logits,target,keepprob,length,crf_loss,trans,train_op
+
+
+def batch_iter(x, y,x_len, batch_size=256):
+    '''
+    :param x: content2id
+    :param y: label2id
+    :param batch_size: 每批参与训练的句子数量
+    :return:
+    '''
+    data_len = len(x)
+    num_batch = int((data_len - 1) / batch_size) + 1 #计算一个epoch,需要多少次batch
+    indices = np.random.permutation(data_len) #生成随机数列
+    x = x[indices]
+    y = y[indices]
+    x_len = x_len[indices]
+    for i in range(num_batch):
+        start_id = batch_size * i
+        end_id = min(batch_size*(i+1), data_len)
+        yield x[start_id:end_id], y[start_id:end_id],x_len[start_id:end_id]
+from sklearn.metrics import accuracy_score
+def getAcc(y_batch,logits,trans,lengths):
+    index = 0
+    small = -1000.0
+
+    preds = []
+    true_tags = []
+    for score, length in zip(logits, lengths):
+        score = score[:length]
+        path, _ = tf.contrib.crf.viterbi_decode(score, trans)
+        preds += path[0:]
+        index += 1
+
+    for y, length in zip(y_batch, lengths):
+        y = y.tolist()
+        true_tags += y[: length]
+    _preds = list(preds)
+    _true_tags = list(true_tags)
+    acc = accuracy_score(np.reshape(true_tags,(-1)), np.reshape(preds,(-1)))
+    precision_1,precision_2,_ = ner_precision(_preds,_true_tags)
+    recall_1,recall_2,_ = ner_recall(_preds,_true_tags)
+
+    return acc,[precision_1,precision_2],[recall_1,recall_2]
+
+
+def decode(logits, trans, sequence_lengths, tag_num):
+    viterbi_sequences = []
+    for logit, length in zip(logits, sequence_lengths):
+        score = logit[:length]
+        viterbi_seq, viterbi_score = viterbi_decode(score, trans)
+        viterbi_sequences.append(viterbi_seq)
+    return viterbi_sequences
+
+
+def new_process():
+    data = pd.read_csv("C:\\Users\\admin\\Desktop\\项目编号和名称\\data_询价书编号.csv",index_col=0,encoding='utf-8')
+    text_list = []
+    for id,text in zip(data['id'],data['text']):
+        # id_list.append(id)
+        text_list.append(text)
+    page_content = get_article1(text_list)
+    data['text'] = page_content
+    data.to_csv('C:\\Users\\admin\\Desktop\\项目编号和名称\\data_询价书编号_process.csv')
+
+def new_test_code():
+    data = pd.read_csv("C:\\Users\\admin\\Desktop\\code_test_process2.csv",index_col=0)
+    sentences_list = []
+    for text in data['text']:
+        sentences = text.split("。")
+        sentences_list.append(sentences)
+    model_path = "models_tf/27-0.984184712668-0.598231307426/model.ckpt"
+    name_list,code_list = predict_CodeName(sentences_list,model_path)
+    data['code'] = code_list
+    data['name'] = name_list
+    data.to_csv("C:\\Users\\admin\\Desktop\\code_test结果2-3.csv")
+
+
+def predict_CodeName(articles,model_path):
+
+    w2v_matrix = load('w2v_matrix.pk')
+    vocab = load('codename_vocab.pk')
+    word2index = dict((w, i) for i, w in enumerate(np.array(vocab)))
+
+    model_path = model_path
+    sess = tf.Session(graph=tf.Graph())
+    with sess:
+        char_input, logits, target, keepprob,length, crf_loss, trans, train_op = BiLSTM_CRF_tfmodel(sess, w2v_matrix)
+        sess.run(tf.global_variables_initializer())
+        saver = tf.train.Saver()
+        saver.restore(sess, model_path)
+        re_name = re.compile("12*3")
+        re_code = re.compile("45*6")
+        article_name_list = []
+        article_code_list = []
+        count = 0
+        for sentences in articles:
+            if len(sentences)>500:
+                sentences = sentences[:500]
+            # print(len(sentences))
+            count += 1
+            print(count)
+            sentence_len = [ min(len(sentence),2000) for sentence in sentences]
+            # maxlen = max(sentence_len)
+            maxlen = max(sentence_len)
+            sentences_x = []
+            for sentence in sentences:
+                sentence = list(sentence)
+                sentence2id = [word2index.get(word,word2index.get('<unk>')) for word in sentence]
+                sentences_x.append(sentence2id)
+            sentences_x = pad_sequences(sentences_x,maxlen=maxlen,padding="post", truncating="post")
+            sentences_x = [np.array(x) for x in sentences_x]
+
+            _logits,_trans = sess.run([logits,trans],feed_dict={char_input:np.array(sentences_x),length:sentence_len,keepprob:1.0})
+
+            viterbi_sequence = decode(logits=_logits,trans=_trans,sequence_lengths=sentence_len,tag_num=7)
+            # print("==",_logits)
+            name_list = []
+            code_list = []
+            sentence_index = 0
+            for _seq,sentence in zip(viterbi_sequence,sentences):
+                seq_id = ''.join([str(s) for s in _seq])
+                if re_name.search(seq_id):
+                    for _name in re_name.finditer(seq_id):
+                        start = _name.start()
+                        end = _name.end()
+                        n = sentence[start:end]
+                        name_list.append((n,start + sentence_index,end + sentence_index))
+                if re_code.search(seq_id):
+                    for _code in re_code.finditer(seq_id):
+                        start = _code.start()
+                        end = _code.end()
+                        c = sentence[start:end]
+                        # print(n,'<==>',start,end)
+                        code_list.append((c,start + sentence_index,end + sentence_index))
+                sentence_index += len(sentence)
+            article_name_list.append(name_list)
+            article_code_list.append(code_list)
+    return article_name_list,article_code_list
+from BiddingKG.dl.interface.Preprocessing import *
+# 网页公告处理
+def get_article1(articles,cost_time = dict(),useselffool=True):
+    '''
+    :param articles: 待处理的article source html
+    :param useselffool: 是否使用selffool
+    :return: list_articles
+    '''
+
+    list_articles = []
+    for article in articles:
+        a_time = time.time()
+        sourceContent = article
+        #表格处理
+        key_preprocess = "tableToText"
+        start_time = time.time()
+        article_processed = segment(tableToText(BeautifulSoup(sourceContent,"lxml")))
+
+        # log(article_processed)
+
+        if key_preprocess not in cost_time:
+            cost_time[key_preprocess] = 0
+        cost_time[key_preprocess] += time.time()-start_time
+
+        #article_processed = article[1]
+        list_articles.append(article_processed)
+        print(time.time()-a_time)
+    return list_articles
+# 分句处理
+def get_sentences1(list_articles,useselffool=True,cost_time=dict()):
+    '''
+
+    :param list_articles: 经过预处理的article text
+    :return: list_sentences
+    '''
+
+    list_sentences = []
+    for article in list_articles:
+        a_time = time.time()
+        list_sentences_temp = []
+        #表格处理
+        key_preprocess = "tableToText"
+        start_time = time.time()
+        article_processed = article
+
+
+        if key_preprocess not in cost_time:
+            cost_time[key_preprocess] = 0
+        cost_time[key_preprocess] += time.time()-start_time
+
+        #nlp处理
+        if article_processed is not None and len(article_processed)!=0:
+            split_patten = "。"
+            sentences = []
+            _begin = 0
+            sentences_set = set()
+            for _iter in re.finditer(split_patten,article_processed):
+                _sen = article_processed[_begin:_iter.span()[1]]
+                if len(_sen)>0 and _sen not in sentences_set:
+                    sentences.append(_sen)
+                    sentences_set.add(_sen)
+                _begin = _iter.span()[1]
+            _sen = article_processed[_begin:]
+            if len(_sen)>0 and _sen not in sentences_set:
+                sentences.append(_sen)
+                sentences_set.add(_sen)
+
+
+            '''
+            tokens_all = fool.cut(sentences)
+            #pos_all = fool.LEXICAL_ANALYSER.pos(tokens_all)
+            #ner_tag_all = fool.LEXICAL_ANALYSER.ner_labels(sentences,tokens_all)
+            ner_entitys_all = fool.ner(sentences)
+            '''
+            #限流执行
+            key_nerToken = "nerToken"
+            start_time = time.time()
+            # tokens_all = getTokens(sentences,useselffool=useselffool)
+            if key_nerToken not in cost_time:
+                cost_time[key_nerToken] = 0
+            cost_time[key_nerToken] += time.time()-start_time
+
+
+            for sentence_index in range(len(sentences)):
+
+                sentence_text = sentences[sentence_index]
+
+
+                list_sentences_temp.append(sentence_text)
+
+        if len(list_sentences_temp)==0:
+            list_sentences_temp.append(sentence_text)
+        list_sentences.append(list_sentences_temp)
+        print('2:',time.time()-a_time)
+    return list_sentences
+
+def _find_tag(labels,B_label,M_label,E_label):
+    result = []
+    ner_begin = 0
+    ner_end = 0
+    for num in range(len(labels)):
+        if labels[num] == B_label:
+            ner_begin = num
+            continue
+        if labels[num] == M_label and labels[num-1] == B_label:
+            continue
+        if labels[num] == M_label and labels[num-1] == M_label:
+            continue
+        if labels[num] == E_label:
+            if labels[num-1] == M_label or labels[num-1] == B_label:
+                ner_end = num+1
+                result.append((ner_begin,ner_end))
+        ner_begin = 0
+        ner_end = 0
+    return result
+
+
+def find_all_tag(labels):
+    # tags = [("PN_B","PN_M","PN_E"),("PC_B","PC_M","PC_E")]
+    tags = [(1,2,3),(4,5,6)]
+    result = []
+    for tag in tags:
+        res = _find_tag(labels,B_label=tag[0],M_label=tag[1],E_label=tag[2])
+        result.append(res)
+    return result
+
+
+def ner_precision(pre_labels,true_labels):
+    '''
+    :param pre_tags: list
+    :param true_tags: list
+    :return:
+    '''
+    pre = []
+
+    pre_result = find_all_tag(pre_labels)
+    for item in pre_result:
+        for _item in item:
+            if pre_labels[_item[0]:_item[1]] == true_labels[_item[0]:_item[1]]:
+                pre.append(1)
+            else:
+                pre.append(0)
+    _sum = sum(pre)
+    _l = len(pre)
+    if not _l:
+        _l = 0.0001
+    return _sum,_l,_sum/_l
+
+
+
+
+def ner_recall(pre_labels,true_labels):
+    '''
+    :param pre_tags: list
+    :param true_tags: list
+    :return:
+    '''
+    recall = []
+
+    true_result = find_all_tag(true_labels)
+    for item in true_result:
+        for _item in item:
+            if pre_labels[_item[0]:_item[1]] == true_labels[_item[0]:_item[1]]:
+                recall.append(1)
+            else:
+                recall.append(0)
+    _sum = sum(recall)
+    _l = len(recall)
+    if not _l:
+        _l = 0.0001
+    return _sum, _l, _sum/_l
+
+
+def ner_f1_score(precision,recall):
+    _temp = precision+recall
+    if not _temp:
+        _temp = 0.0001
+    return (2*precision*recall)/(_temp)
+
+def old_data_update():
+    data = load('data/old_datas.pk')
+    # print(len(data))
+    re_code = re.compile("(?:(?:公告|合同)[^,,。:;]{,3}编号[::]*|寻源单据?号|计划[编文]?号|交易编[号码]|询价单编?[码号]|采购项目编号)([\-\d\w\(\)\(\)\[\]\【\】号]{3,})",re.A)
+    index = 0
+    updat_list = []
+    for d in data:
+        sentence = ''.join(d[0])
+        label = d[1]
+        if re_code.search(sentence):
+            for item in re_code.finditer(sentence):
+                begin,end = item.span()
+                # print(sentence[max(0,begin-8):end])
+                # print(sentence[begin:end])
+                la = label[begin:end]
+                if 'PC_B' not in la:
+                    updat_list.append(index)
+        index += 1
+    updat_list = list(set(updat_list))
+    print(len(updat_list))
+    for u in updat_list:
+        item = data[u]
+        sentence = ''.join(item[0])
+        label = item[1]
+        re_res = re_code.findall(sentence)
+        for res in re_res:
+            begin = findAllIndex(res,sentence)
+            for b in begin:
+                e = b + len(res)
+                label[b] = 'PC_B'
+                label[e-1] = 'PC_E'
+                for i in range(b+1,e-1):
+                    label[i] = 'PC_M'
+        data[u] = (item[0],label)
+        # print(sentence)
+        # print('---')
+        # print(label)
+        save(data,'data/old_datas2.pk')
+
+def get_word_matrix():
+    # 获取预训练的字向量
+    vocab_model = getModel_word()
+    _, w2v_matrix = getVocabAndMatrix(vocab_model, Embedding_size=60)
+    # 去除第一行<pad>全0.行
+    w2v_matrix = w2v_matrix[1:]
+    # <pad>
+    pad_0 = np.zeros((1, w2v_matrix.shape[1]), dtype=float)
+    # <unk>
+    unk_1 = np.random.normal(-0.25, 0.25, (1, w2v_matrix.shape[1]))
+    w2v_matrix = np.concatenate((pad_0, unk_1, w2v_matrix), axis=0)
+    print(w2v_matrix[:3])
+    save(w2v_matrix,"w2v_matrix.pk")
+
+if __name__ == '__main__':
+    # get_data()
+    # data_process()
+    # add_data_process()
+    # train2()
+    # test2()
+    # new_test()
+    # new_process()
+    # new_test_code()
+    # get_word_matrix()
+    # old_data_update()
+
+    # model_path = "models_tf/76-L0.472526232355-F0.8848208266348597-P0.8845455959355073-R0.8850962286662862/model.ckpt"
+    model_path = "models_tf/59-L0.471516189943-F0.8802154826344823-P0.8789179683459191-R0.8815168335321886/model.ckpt"
+    text = '''[X2002185]2020年11月麻城市生活垃圾焚烧发电项目厂前区零星计划
+    '''
+    name_list, code_list = predict_CodeName([text.split('。')], model_path)
+    print(name_list)
+    print(code_list)
+
+    pass

BIN
BiddingKG/dl/projectCode/train_data_new2.pk


BIN
BiddingKG/dl/projectCode/w2v_matrix.pk