product_model.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. #!/usr/bin/python3
  2. # -*- coding: utf-8 -*-
  3. # @Author : bidikeji
  4. # @Time : 2021/1/13 0013 10:12
  5. # from BiddingKG.dl.common.Utils import getVocabAndMatrix,getModel_word
  6. from BiddingKG.dl.product.data_util import matrix,vocab,input_from_line,result_to_json,get_ner
  7. import tensorflow as tf
  8. import numpy as np
  9. from tensorflow.contrib.crf import crf_log_likelihood
  10. from tensorflow.contrib.crf import viterbi_decode
  11. from tensorflow.contrib.layers.python.layers import initializers
  12. # word_model = getModel_word()
  13. class Product_Model(object):
  14. def __init__(self):
  15. self.char_dim = 60
  16. self.lstm_dim = 128
  17. self.num_tags = 4
  18. self.lr = 0.001
  19. self.clip = 5.0
  20. self.dropout_rate = 0.5
  21. # vocab, matrix = getVocabAndMatrix(word_model, Embedding_size=60)
  22. self.matrix = matrix
  23. # self.word2id = {k:v for v,k in enumerate(self.vocab)}
  24. self.num_chars = len(vocab)+1
  25. self.emb_matrix = np.random.random((self.num_chars, self.char_dim))
  26. self.emb_matrix[:self.num_chars-1:,:] = self.matrix
  27. self.globel_step = tf.Variable(0, trainable=False)
  28. self.best_dev_f1 = tf.Variable(0.0, trainable=False)
  29. self.initializer = initializers.xavier_initializer()
  30. self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None],name='CharInputs')
  31. self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None],name='Targets')
  32. self.dropout = tf.placeholder(dtype=tf.float32, name='Dropout')
  33. used = tf.sign(tf.abs(self.char_inputs))
  34. length = tf.reduce_sum(used, reduction_indices=1)
  35. self.lengths = tf.cast(length, tf.int32)
  36. self.batch_size = tf.shape(self.char_inputs)[0]
  37. self.num_steps = tf.shape(self.char_inputs)[1]
  38. with tf.variable_scope("char_embedding"):
  39. self.char_lookup = tf.get_variable(
  40. name="char_embedding",
  41. # shape=[self.num_chars, self.char_dim],
  42. initializer=np.array(self.emb_matrix,dtype=np.float32)
  43. )
  44. embed = tf.nn.embedding_lookup(self.char_lookup, self.char_inputs)
  45. with tf.variable_scope("char_BiLSTM"):
  46. lstm_cell = {}
  47. for direction in ["forward", "backward"]:
  48. with tf.variable_scope(direction):
  49. lstm_cell[direction] = tf.contrib.rnn.BasicLSTMCell(self.lstm_dim, state_is_tuple=True)
  50. outputs, final_states = tf.nn.bidirectional_dynamic_rnn(
  51. lstm_cell["forward"],
  52. lstm_cell["backward"],
  53. embed,
  54. dtype=tf.float32,
  55. sequence_length=self.lengths
  56. )
  57. outputs = tf.concat(outputs, axis=2)
  58. with tf.variable_scope("project"):
  59. with tf.variable_scope("hidden"):
  60. W = tf.get_variable("W", shape=[self.lstm_dim*2, self.lstm_dim],
  61. dtype=tf.float32,initializer=self.initializer)
  62. b = tf.get_variable("b", shape=[self.lstm_dim],
  63. dtype=tf.float32, initializer=self.initializer)
  64. output = tf.reshape(outputs, shape=[-1, 2*self.lstm_dim])
  65. hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
  66. hidden = tf.nn.dropout(hidden, keep_prob=self.dropout) # 添加dropout
  67. with tf.variable_scope("logits"):
  68. W = tf.get_variable("W", shape=[self.lstm_dim, self.num_tags],
  69. dtype=tf.float32, initializer=self.initializer)
  70. b = tf.get_variable("b", shape=[self.num_tags])
  71. pred = tf.nn.xw_plus_b(hidden, W, b)
  72. self.logits = tf.reshape(pred, [-1, self.num_steps, self.num_tags])
  73. with tf.variable_scope("crf_loss"):
  74. small = -1000.0
  75. start_logits = tf.concat(
  76. [small*tf.ones(shape=[self.batch_size,1,self.num_tags]), tf.zeros(shape=[self.batch_size,1,1])], axis=-1
  77. )
  78. pad_logits = tf.cast(small*tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
  79. logits = tf.concat([self.logits, pad_logits], axis=-1)
  80. logits = tf.concat([start_logits, logits], axis=1)
  81. targets = tf.concat([tf.cast(self.num_tags*tf.ones([self.batch_size,1]),tf.int32), self.targets], axis=-1)
  82. self.trans = tf.get_variable(
  83. name="transitions",
  84. shape=[self.num_tags+1, self.num_tags+1],
  85. initializer=self.initializer
  86. )
  87. log_likelihood, self.trans = crf_log_likelihood(
  88. inputs=logits,
  89. tag_indices=targets,
  90. transition_params=self.trans,
  91. sequence_lengths=self.lengths+1
  92. )
  93. self.loss = tf.reduce_mean(-log_likelihood)
  94. with tf.variable_scope("optimizer"):
  95. self.opt = tf.train.AdamOptimizer(learning_rate=self.lr)
  96. grads_vars = self.opt.compute_gradients(self.loss)
  97. capped_grads_vars = [[tf.clip_by_value(g, -self.clip, self.clip), v] for g,v in grads_vars]
  98. self.train_op = self.opt.apply_gradients(capped_grads_vars, self.globel_step)
  99. self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
  100. def create_feed_dict(self, run_type, batch): #is_train
  101. '''
  102. :param is_train: Flag, True for train batch
  103. :param batch: list train/evaluate data
  104. :return: structured data to feed
  105. '''
  106. _, chars, tags = batch
  107. feed_dict = {
  108. self.char_inputs:np.asarray(chars),
  109. self.dropout:1.0
  110. }
  111. assert run_type in ['train', 'dev', 'predict']
  112. if run_type=='train':
  113. feed_dict[self.targets] = np.asarray(tags)
  114. feed_dict[self.dropout] = self.dropout_rate
  115. elif run_type=='dev':
  116. feed_dict[self.targets] = np.asarray(tags)
  117. return feed_dict
  118. def run_step(self, sess, run_type, batch):
  119. assert run_type in ['train', 'dev', 'predict']
  120. feed_dict = self.create_feed_dict(run_type, batch)
  121. if run_type=='train':
  122. global_step, loss, _ = sess.run(
  123. [self.globel_step, self.loss, self.train_op],
  124. feed_dict=feed_dict
  125. )
  126. return global_step, loss
  127. elif run_type=='dev':
  128. lengths ,logits, loss = sess.run([self.lengths, self.logits, self.loss], feed_dict)
  129. return lengths, logits, loss
  130. else:
  131. lengths ,logits = sess.run([self.lengths, self.logits], feed_dict)
  132. return lengths, logits
  133. def run_step_backup(self, sess, is_train, batch):
  134. feed_dict = self.create_feed_dict(is_train, batch)
  135. if is_train:
  136. global_step, loss, _ = sess.run(
  137. [self.globel_step, self.loss, self.train_op],
  138. feed_dict=feed_dict
  139. )
  140. return global_step, loss
  141. else:
  142. lengths ,logits, loss = sess.run([self.lengths, self.logits, self.loss], feed_dict)
  143. return lengths, logits, loss
  144. def decode(self, logits, lengths, matrix):
  145. paths = []
  146. small = -1000.0
  147. start = np.asarray([[small]*self.num_tags+[0]])
  148. for score, length in zip(logits, lengths):
  149. score = score[:length]
  150. pad = small * np.ones([length, 1])
  151. logits = np.concatenate([score, pad], axis=1)
  152. logits = np.concatenate([start, logits], axis=0)
  153. path, _ = viterbi_decode(logits, matrix)
  154. paths.append(path[1:])
  155. return paths
  156. def evaluate(self, sess, data_manager, id_to_tag):
  157. results = []
  158. trans = self.trans.eval()
  159. Precision = []
  160. Recall = []
  161. F1 = []
  162. loss = []
  163. pred_num = 0
  164. gold_num = 0
  165. equal_num = 0
  166. for batch in data_manager.iter_batch():
  167. strings = batch[0]
  168. tags = batch[-1]
  169. # lengths, scores, batch_loss = self.run_step(sess, False, batch)
  170. lengths, scores, batch_loss = self.run_step(sess, 'dev', batch)
  171. loss.append(batch_loss)
  172. batch_paths = self.decode(scores, lengths, trans)
  173. for i in range(len(strings)):
  174. result = []
  175. string = strings[i][:lengths[i]]
  176. gold = [id_to_tag[int(x)] for x in tags[i][:lengths[i]]]
  177. pred = [id_to_tag[int(x)] for x in batch_paths[i][:lengths[i]]]
  178. gold_ner = get_ner("".join(gold))
  179. pred_ner = get_ner("".join(pred))
  180. # print('标签实体:',gold_ner)
  181. # print('预测实体:',pred_ner)
  182. pred_num += len(pred_ner)
  183. gold_num += len(gold_ner)
  184. equal_num += len(gold_ner&pred_ner)
  185. # precision_temp = len(gold_ner&pred_ner)/(len(pred_ner)+1e-10)
  186. # recall_temp = len(gold_ner&pred_ner)/(len(gold_ner)+1e-10)
  187. # f1_temp = 2*(precision_temp*recall_temp)/(precision_temp+recall_temp+1e-10)
  188. # Precision.append(precision_temp)
  189. # Recall.append(recall_temp)
  190. # F1.append(f1_temp)
  191. # for char, gold, pred in zip(string, gold, pred):
  192. # result.append(" ".join([char, gold, pred]))
  193. # results.append(result)
  194. # with open('evaluate_result.txt','w', encoding='utf-8') as f:
  195. # for rs in results:
  196. # for line in rs:
  197. # f.write(line+'\n')
  198. # f.write('\n')
  199. # return sum(F1)/len(F1),sum(Precision)/len(Precision),sum(Recall)/len(Recall)
  200. precision = equal_num/(pred_num+1e-10)
  201. recall = equal_num/(gold_num+1e-10)
  202. f1 = 2*(precision*recall)/(precision+recall+1e-10)
  203. return f1, precision, recall, np.mean(loss)
  204. def evaluate_line(self, sess, line):
  205. trans = self.trans.eval(session=sess)
  206. # lengths, scores = self.run_step(sess, False, input_from_line(line))
  207. lengths, scores = self.run_step(sess, 'predict', input_from_line(line))
  208. batch_paths = self.decode(scores, lengths, trans)
  209. tags = batch_paths[0] # batch_paths[0][:lengths] 错误
  210. return result_to_json(line, tags)