123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185 |
- #!/usr/bin/python3
- # -*- coding: utf-8 -*-
- # @Author : bidikeji
- # @Time : 2021/7/28 0028 11:32
- import os
- import sys
- import h5py
- from keras import models,layers,losses,optimizers
- sys.path.append(os.path.abspath("../../.."))
- import pandas as pd
- import math
- from keras.callbacks import ModelCheckpoint
- from BiddingKG.dl.interface.modelFactory import Model_role_classify_word
- from BiddingKG.dl.common.Utils import *
- import tensorflow as tf
- seq_len = 20
- lb2id = {'招标人':0,
- '代理人':1,
- '中标人':2,
- '第二候选人':3,
- '第三候选人':4,
- '其他角色':5}
- def getBiLSTMModel(input_shape,vocab,embedding_weights,classes,use_am=False):
- # assert len(input_shape)==3
- list_input = []
- for i in range(input_shape[0]):
- list_input.append(layers.Input(shape=(input_shape[1],),dtype=tf.int32,name="input%d"%(i)))
- list_embedding = []
- embedding_input = list_input
- embedding = layers.Embedding(len(vocab),input_shape[2],
- weights=[embedding_weights] if embedding_weights is not None else None,
- mask_zero=True,trainable=True,name="char_embeding")
- for i in range(len(embedding_input)):
- list_embedding.append(embedding(embedding_input[i]))
- list_w2v = list_embedding
- list_lstm = []
- list_lstm.append(layers.Bidirectional(layers.LSTM(64, dropout=0.2, recurrent_dropout=0.5))(list_w2v[0])) #dropout=0.2, recurrent_dropout=0.5
- list_lstm.append(layers.Bidirectional(layers.LSTM(8, dropout=0.2, recurrent_dropout=0.5))(list_w2v[1]))
- list_lstm.append(layers.Bidirectional(layers.LSTM(16, dropout=0.2, recurrent_dropout=0.5))(list_w2v[2]))
- concat = layers.concatenate(list_lstm)
- concat = layers.Dropout(0.5)(concat)
- out = layers.Dense(classes,activation="softmax")(concat)
- model = models.Model(list_input,out)
- model.compile(optimizer=optimizers.Adam(lr=0.001),loss=losses.categorical_crossentropy,metrics=[precision,recall,f1_score])
- model.summary()
- return model
- def labeling(label, out_len=6):
- out = np.zeros((out_len))
- out[label] = 1
- return out
- def word2id(df, seq_len=seq_len):
- train_x = []
- train_y = []
- test_x = []
- test_y = []
- # print(set(df['label']))
- # print(set(lb2id))
- if set(df['label']) == set(lb2id):
- df['label'] = df['label'].apply(lambda x:lb2id[x])
- for before, text, after, label in zip(df["left"], df["center"], df["right"], df["label"]):
- before = str(before) if str(before) != "nan" else ""
- text = str(text)
- after = str(after) if str(after) != "nan" else ""
- x = encodeInput([before, text, after], word_len=seq_len, word_flag=True, userFool=False)
- y = labeling(label)
- train_x.append(x)
- train_y.append(y)
- return np.transpose(np.array(train_x), (1, 0, 2)), np.array(train_y)
- # return train_x, np.array(train_y)
- def train():
- df_train = pd.read_excel('traindata/df_train.xlsx')
- df_test = pd.read_excel('traindata/df_test.xlsx')
- train_x, train_y = word2id(df_train)
- test_x, test_y = word2id(df_test)
- with tf.Session() as sess:
- vocab, matrix = getVocabAndMatrix(getModel_word())
- model = getBiLSTMModel(input_shape=(3, seq_len, 60), vocab=vocab, embedding_weights=matrix, classes=6)
- print("loading weights")
- # model.load_weights("log/ep378-loss0.178-val_loss0.117-f1_score0.965.h5",by_name=True, skip_mismatch=True)
- callback = ModelCheckpoint(
- filepath="log/" + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}-f1_score{val_f1_score:.3f}.h5",
- monitor="val_loss", save_best_only=True, save_weights_only=True, mode="min")
- model.fit(x=[train_x[0], train_x[1], train_x[2]], y=train_y, batch_size=128, epochs=600, callbacks=[callback],
- validation_data=[[test_x[0], test_x[1], test_x[2]], test_y])
- def test():
- # df_val = pd.read_excel('traindata/df_val.xlsx')
- # df_val = pd.read_excel('traindata/兼职标注数据_test29.xlsx')
- # df_val = pd.read_excel('traindata/兼职标注数据_test3_predict.xlsx')
- df_val = pd.read_excel('traindata/兼职标注数据_test22_待测试数据.xlsx')
- df_val.reset_index(drop=True, inplace=True)
- val_x, val_y = word2id(df_val, seq_len=seq_len)
- # val_x = np.transpose(np.array(train_x), (1, 0, 2))
- old_x, old_y = word2id(df_val, seq_len=50)
- old_x = np.transpose(np.array(old_x), (1, 0, 2))
- role_old = Model_role_classify_word()
- with tf.Session() as sess:
- vocab, matrix = getVocabAndMatrix(getModel_word())
- model = getBiLSTMModel(input_shape=(3, seq_len, 60), vocab=vocab, embedding_weights=matrix, classes=6)
- print("loading weights")
- # model.load_weights("log/ep378-loss0.178-val_loss0.117-f1_score0.965.h5",by_name=True, skip_mismatch=True)
- # model.load_weights("log/ep006-loss0.174-val_loss0.234-f1_score0.917.h5",by_name=True, skip_mismatch=True)
- # model.load_weights("log/ep010-loss0.107-val_loss0.114-f1_score0.966.h5",by_name=True, skip_mismatch=True)
- model.load_weights("log/ep014-loss0.091-val_loss0.110-f1_score0.968.h5",by_name=True, skip_mismatch=True)
- lg_old = role_old.predict(old_x)
- df_val['pred_old'] = pd.DataFrame(np.argmax(lg_old, axis=1))
- df_val['prob_old'] = pd.DataFrame(np.amax(lg_old, axis=1))
- logit = model.predict([val_x[0], val_x[1], val_x[2]])
- print('新模型预测结果',logit[:3])
- print('旧模型预测结果:',lg_old[:3])
- df_val['pred_new'] = pd.DataFrame(np.argmax(logit, axis=-1))
- df_val['prob_new'] = pd.DataFrame(np.amax(logit, axis=1))
- # df_val['new=new3'] = df_val.apply(lambda x: 1 if x['pred_new3'] == x['pred_new2'] else 0, axis=1)
- df_val['new=old'] = df_val.apply(lambda x: 1 if x['pred_new'] == x['pred_old'] else 0, axis=1)
- df_val['old=lb'] = df_val.apply(lambda x: 1 if x['label'] == x['pred_old'] else 0, axis=1)
- df_val['new=lb'] = df_val.apply(lambda x: 1 if x['pred_new'] == x['label'] else 0, axis=1)
- # df_val.to_excel('traindata/df_val_predict.xlsx')
- # df_val.to_excel('traindata/兼职标注数据_test29_predict.xlsx')
- # df_val.to_excel('traindata/兼职标注数据_test3_predict.xlsx')
- df_val.to_excel('traindata/兼职标注数据_test22_待测试数据_predict.xlsx')
- print('')
- def get_savedModel():
- sess = tf.Session(graph=tf.Graph())
- with sess.as_default():
- with sess.graph.as_default():
- vocab, matrix = getVocabAndMatrix(getModel_word())
- model = getBiLSTMModel(input_shape=(3, seq_len, 60), vocab=vocab, embedding_weights=matrix, classes=6)
- sess.run(tf.global_variables_initializer())
- # model.load_weights(filepath="log/ep009-loss0.057-val_loss0.076-f1_score0.978.h5")
- # model.load_weights(filepath="log/ep010-loss0.107-val_loss0.114-f1_score0.966.h5") #7月30日训练最优模型20字
- model.load_weights(filepath="../../dl_dev/role/log/ep015-loss0.090-val_loss0.113-f1_score0.967.h5") #8月5日调整部分招标人标注后重新训练结果20字
- tf.saved_model.simple_save(session=sess,
- export_dir="role_savedmodel2021-8-5",
- inputs={"input0": model.input[0],
- "input1": model.input[1],
- "input2": model.input[2]},
- outputs={"outputs": model.output})
- def predict_pb():
- df_val = pd.read_excel('traindata/df_val.xlsx')
- old_x, old_y = word2id(df_val, seq_len=20)
- # old_x = np.transpose(np.array(old_x), (1, 0, 2))
- sess_role = tf.Session()
- with sess_role.as_default() as sess:
- with sess_role.graph.as_default():
- meta_graph_def = tf.saved_model.loader.load(sess=sess_role, tags=["serve"],
- export_dir="role_savedmodel2021-8-5")
- signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
- signature_def = meta_graph_def.signature_def
- input0 = sess_role.graph.get_tensor_by_name(signature_def[signature_key].inputs["input0"].name)
- input1 = sess_role.graph.get_tensor_by_name(signature_def[signature_key].inputs["input1"].name)
- input2 = sess_role.graph.get_tensor_by_name(signature_def[signature_key].inputs["input2"].name)
- output = sess_role.graph.get_tensor_by_name(
- signature_def[signature_key].outputs["outputs"].name)
- model_role = [[input0, input1, input2], output]
- lg_old = sess_role.run(output, feed_dict={input0:old_x[0],
- input1:old_x[1],
- input2:old_x[2]})
- print(lg_old[:3])
- if __name__ == "__main__":
- # train()
- # test()
- get_savedModel()
- # predict_pb()
|