123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240 |
- #!/usr/bin/python3
- # -*- coding: utf-8 -*-
- # @Author : bidikeji
- # @Time : 2021/1/13 0013 10:12
- # from BiddingKG.dl.common.Utils import getVocabAndMatrix,getModel_word
- from BiddingKG.dl.product.data_util import matrix,vocab,input_from_line,result_to_json,get_ner
- import tensorflow as tf
- import numpy as np
- from tensorflow.contrib.crf import crf_log_likelihood
- from tensorflow.contrib.crf import viterbi_decode
- from tensorflow.contrib.layers.python.layers import initializers
- # word_model = getModel_word()
- class Product_Model(object):
- def __init__(self):
- self.char_dim = 60
- self.lstm_dim = 128
- self.num_tags = 4
- self.lr = 0.001
- self.clip = 5.0
- self.dropout_rate = 0.5
- # vocab, matrix = getVocabAndMatrix(word_model, Embedding_size=60)
- self.matrix = matrix
- # self.word2id = {k:v for v,k in enumerate(self.vocab)}
- self.num_chars = len(vocab)+1
- self.emb_matrix = np.random.random((self.num_chars, self.char_dim))
- self.emb_matrix[:self.num_chars-1:,:] = self.matrix
- self.globel_step = tf.Variable(0, trainable=False)
- self.best_dev_f1 = tf.Variable(0.0, trainable=False)
- self.initializer = initializers.xavier_initializer()
- self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None],name='CharInputs')
- self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None],name='Targets')
- self.dropout = tf.placeholder(dtype=tf.float32, name='Dropout')
- used = tf.sign(tf.abs(self.char_inputs))
- length = tf.reduce_sum(used, reduction_indices=1)
- self.lengths = tf.cast(length, tf.int32)
- self.batch_size = tf.shape(self.char_inputs)[0]
- self.num_steps = tf.shape(self.char_inputs)[1]
- with tf.variable_scope("char_embedding"):
- self.char_lookup = tf.get_variable(
- name="char_embedding",
- # shape=[self.num_chars, self.char_dim],
- initializer=np.array(self.emb_matrix,dtype=np.float32)
- )
- embed = tf.nn.embedding_lookup(self.char_lookup, self.char_inputs)
- with tf.variable_scope("char_BiLSTM"):
- lstm_cell = {}
- for direction in ["forward", "backward"]:
- with tf.variable_scope(direction):
- lstm_cell[direction] = tf.contrib.rnn.BasicLSTMCell(self.lstm_dim, state_is_tuple=True)
- outputs, final_states = tf.nn.bidirectional_dynamic_rnn(
- lstm_cell["forward"],
- lstm_cell["backward"],
- embed,
- dtype=tf.float32,
- sequence_length=self.lengths
- )
- outputs = tf.concat(outputs, axis=2)
- with tf.variable_scope("project"):
- with tf.variable_scope("hidden"):
- W = tf.get_variable("W", shape=[self.lstm_dim*2, self.lstm_dim],
- dtype=tf.float32,initializer=self.initializer)
- b = tf.get_variable("b", shape=[self.lstm_dim],
- dtype=tf.float32, initializer=self.initializer)
- output = tf.reshape(outputs, shape=[-1, 2*self.lstm_dim])
- hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
- hidden = tf.nn.dropout(hidden, keep_prob=self.dropout) # 添加dropout
- with tf.variable_scope("logits"):
- W = tf.get_variable("W", shape=[self.lstm_dim, self.num_tags],
- dtype=tf.float32, initializer=self.initializer)
- b = tf.get_variable("b", shape=[self.num_tags])
- pred = tf.nn.xw_plus_b(hidden, W, b)
- self.logits = tf.reshape(pred, [-1, self.num_steps, self.num_tags])
- with tf.variable_scope("crf_loss"):
- small = -1000.0
- start_logits = tf.concat(
- [small*tf.ones(shape=[self.batch_size,1,self.num_tags]), tf.zeros(shape=[self.batch_size,1,1])], axis=-1
- )
- pad_logits = tf.cast(small*tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
- logits = tf.concat([self.logits, pad_logits], axis=-1)
- logits = tf.concat([start_logits, logits], axis=1)
- targets = tf.concat([tf.cast(self.num_tags*tf.ones([self.batch_size,1]),tf.int32), self.targets], axis=-1)
- self.trans = tf.get_variable(
- name="transitions",
- shape=[self.num_tags+1, self.num_tags+1],
- initializer=self.initializer
- )
- log_likelihood, self.trans = crf_log_likelihood(
- inputs=logits,
- tag_indices=targets,
- transition_params=self.trans,
- sequence_lengths=self.lengths+1
- )
- self.loss = tf.reduce_mean(-log_likelihood)
- with tf.variable_scope("optimizer"):
- self.opt = tf.train.AdamOptimizer(learning_rate=self.lr)
- grads_vars = self.opt.compute_gradients(self.loss)
- capped_grads_vars = [[tf.clip_by_value(g, -self.clip, self.clip), v] for g,v in grads_vars]
- self.train_op = self.opt.apply_gradients(capped_grads_vars, self.globel_step)
- self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
- def create_feed_dict(self, run_type, batch): #is_train
- '''
- :param is_train: Flag, True for train batch
- :param batch: list train/evaluate data
- :return: structured data to feed
- '''
- _, chars, tags = batch
- feed_dict = {
- self.char_inputs:np.asarray(chars),
- self.dropout:1.0
- }
- assert run_type in ['train', 'dev', 'predict']
- if run_type=='train':
- feed_dict[self.targets] = np.asarray(tags)
- feed_dict[self.dropout] = self.dropout_rate
- elif run_type=='dev':
- feed_dict[self.targets] = np.asarray(tags)
- return feed_dict
- def run_step(self, sess, run_type, batch):
- assert run_type in ['train', 'dev', 'predict']
- feed_dict = self.create_feed_dict(run_type, batch)
- if run_type=='train':
- global_step, loss, _ = sess.run(
- [self.globel_step, self.loss, self.train_op],
- feed_dict=feed_dict
- )
- return global_step, loss
- elif run_type=='dev':
- lengths ,logits, loss = sess.run([self.lengths, self.logits, self.loss], feed_dict)
- return lengths, logits, loss
- else:
- lengths ,logits = sess.run([self.lengths, self.logits], feed_dict)
- return lengths, logits
- def run_step_backup(self, sess, is_train, batch):
- feed_dict = self.create_feed_dict(is_train, batch)
- if is_train:
- global_step, loss, _ = sess.run(
- [self.globel_step, self.loss, self.train_op],
- feed_dict=feed_dict
- )
- return global_step, loss
- else:
- lengths ,logits, loss = sess.run([self.lengths, self.logits, self.loss], feed_dict)
- return lengths, logits, loss
- def decode(self, logits, lengths, matrix):
- paths = []
- small = -1000.0
- start = np.asarray([[small]*self.num_tags+[0]])
- for score, length in zip(logits, lengths):
- score = score[:length]
- pad = small * np.ones([length, 1])
- logits = np.concatenate([score, pad], axis=1)
- logits = np.concatenate([start, logits], axis=0)
- path, _ = viterbi_decode(logits, matrix)
- paths.append(path[1:])
- return paths
- def evaluate(self, sess, data_manager, id_to_tag):
- results = []
- trans = self.trans.eval()
- Precision = []
- Recall = []
- F1 = []
- loss = []
- pred_num = 0
- gold_num = 0
- equal_num = 0
- for batch in data_manager.iter_batch():
- strings = batch[0]
- tags = batch[-1]
- # lengths, scores, batch_loss = self.run_step(sess, False, batch)
- lengths, scores, batch_loss = self.run_step(sess, 'dev', batch)
- loss.append(batch_loss)
- batch_paths = self.decode(scores, lengths, trans)
- for i in range(len(strings)):
- result = []
- string = strings[i][:lengths[i]]
- gold = [id_to_tag[int(x)] for x in tags[i][:lengths[i]]]
- pred = [id_to_tag[int(x)] for x in batch_paths[i][:lengths[i]]]
- gold_ner = get_ner("".join(gold))
- pred_ner = get_ner("".join(pred))
- # print('标签实体:',gold_ner)
- # print('预测实体:',pred_ner)
- pred_num += len(pred_ner)
- gold_num += len(gold_ner)
- equal_num += len(gold_ner&pred_ner)
- # precision_temp = len(gold_ner&pred_ner)/(len(pred_ner)+1e-10)
- # recall_temp = len(gold_ner&pred_ner)/(len(gold_ner)+1e-10)
- # f1_temp = 2*(precision_temp*recall_temp)/(precision_temp+recall_temp+1e-10)
- # Precision.append(precision_temp)
- # Recall.append(recall_temp)
- # F1.append(f1_temp)
- # for char, gold, pred in zip(string, gold, pred):
- # result.append(" ".join([char, gold, pred]))
- # results.append(result)
- # with open('evaluate_result.txt','w', encoding='utf-8') as f:
- # for rs in results:
- # for line in rs:
- # f.write(line+'\n')
- # f.write('\n')
- # return sum(F1)/len(F1),sum(Precision)/len(Precision),sum(Recall)/len(Recall)
- precision = equal_num/(pred_num+1e-10)
- recall = equal_num/(gold_num+1e-10)
- f1 = 2*(precision*recall)/(precision+recall+1e-10)
- return f1, precision, recall, np.mean(loss)
- def evaluate_line(self, sess, line):
- trans = self.trans.eval(session=sess)
- # lengths, scores = self.run_step(sess, False, input_from_line(line))
- lengths, scores = self.run_step(sess, 'predict', input_from_line(line))
- batch_paths = self.decode(scores, lengths, trans)
- tags = batch_paths[0] # batch_paths[0][:lengths] 错误
- return result_to_json(line, tags)
|