product_model.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. #!/usr/bin/python3
  2. # -*- coding: utf-8 -*-
  3. # @Author : bidikeji
  4. # @Time : 2021/1/13 0013 10:12
  5. # from BiddingKG.dl.common.Utils import getVocabAndMatrix,getModel_word
  6. from BiddingKG.dl.product.data_util import matrix,vocab,input_from_line,result_to_json,get_ner
  7. import tensorflow as tf
  8. import numpy as np
  9. from tensorflow.contrib.crf import crf_log_likelihood
  10. from tensorflow.contrib.crf import viterbi_decode
  11. from tensorflow.contrib.layers.python.layers import initializers
  12. # word_model = getModel_word()
  13. class Product_Model(object):
  14. def __init__(self):
  15. self.char_dim = 60
  16. self.lstm_dim = 120#128 120
  17. # self.num_tags = 4
  18. self.num_tags = 7
  19. self.lr = 0.001
  20. self.clip = 5.0
  21. self.dropout_rate = 0.5
  22. # vocab, matrix = getVocabAndMatrix(word_model, Embedding_size=60)
  23. self.matrix = matrix
  24. # self.word2id = {k:v for v,k in enumerate(self.vocab)}
  25. self.num_chars = len(vocab)+1
  26. self.emb_matrix = np.random.random((self.num_chars, self.char_dim))
  27. self.emb_matrix[:self.num_chars-1:,:] = self.matrix
  28. self.globel_step = tf.Variable(0, trainable=False)
  29. self.best_dev_f1 = tf.Variable(0.0, trainable=False)
  30. self.initializer = initializers.xavier_initializer()
  31. self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None],name='CharInputs')
  32. self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None],name='Targets')
  33. self.dropout = tf.placeholder(dtype=tf.float32, name='Dropout')
  34. # self.lengths = tf.placeholder(dtype=tf.int32, shape=[None],name='lengths')
  35. used = tf.sign(tf.abs(self.char_inputs))
  36. length = tf.reduce_sum(used, reduction_indices=1)
  37. self.lengths = tf.cast(length, tf.int32)
  38. self.batch_size = tf.shape(self.char_inputs)[0]
  39. self.num_steps = tf.shape(self.char_inputs)[1]
  40. with tf.variable_scope("char_embedding"):
  41. self.char_lookup = tf.get_variable(
  42. name="char_embedding",
  43. # shape=[self.num_chars, self.char_dim],
  44. initializer=np.array(self.emb_matrix,dtype=np.float32)
  45. )
  46. embed = tf.nn.embedding_lookup(self.char_lookup, self.char_inputs)
  47. with tf.variable_scope("char_BiLSTM"):
  48. lstm_cell = {}
  49. for direction in ["forward", "backward"]:
  50. with tf.variable_scope(direction):
  51. lstm_cell[direction] = tf.contrib.rnn.BasicLSTMCell(self.lstm_dim, state_is_tuple=True)
  52. outputs, final_states = tf.nn.bidirectional_dynamic_rnn(
  53. lstm_cell["forward"],
  54. lstm_cell["backward"],
  55. embed,
  56. dtype=tf.float32,
  57. sequence_length=self.lengths
  58. )
  59. outputs = tf.concat(outputs, axis=2)
  60. with tf.variable_scope("project"):
  61. with tf.variable_scope("hidden"):
  62. W = tf.get_variable("W", shape=[self.lstm_dim*2, self.lstm_dim],
  63. dtype=tf.float32,initializer=self.initializer)
  64. b = tf.get_variable("b", shape=[self.lstm_dim],
  65. dtype=tf.float32, initializer=self.initializer)
  66. output = tf.reshape(outputs, shape=[-1, 2*self.lstm_dim])
  67. hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
  68. hidden = tf.nn.dropout(hidden, keep_prob=self.dropout) # 添加dropout
  69. with tf.variable_scope("logits"):
  70. W = tf.get_variable("W", shape=[self.lstm_dim, self.num_tags],
  71. dtype=tf.float32, initializer=self.initializer)
  72. b = tf.get_variable("b", shape=[self.num_tags])
  73. pred = tf.nn.xw_plus_b(hidden, W, b)
  74. self.logits = tf.reshape(pred, [-1, self.num_steps, self.num_tags])
  75. with tf.variable_scope("crf_loss"):
  76. small = -1000.0
  77. start_logits = tf.concat(
  78. [small*tf.ones(shape=[self.batch_size,1,self.num_tags]), tf.zeros(shape=[self.batch_size,1,1])], axis=-1
  79. )
  80. pad_logits = tf.cast(small*tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
  81. logits = tf.concat([self.logits, pad_logits], axis=-1)
  82. logits = tf.concat([start_logits, logits], axis=1)
  83. targets = tf.concat([tf.cast(self.num_tags*tf.ones([self.batch_size,1]),tf.int32), self.targets], axis=-1)
  84. self.trans = tf.get_variable(
  85. name="transitions",
  86. shape=[self.num_tags+1, self.num_tags+1],
  87. initializer=self.initializer
  88. )
  89. log_likelihood, self.trans = crf_log_likelihood(
  90. inputs=logits,
  91. tag_indices=targets,
  92. transition_params=self.trans,
  93. sequence_lengths=self.lengths+1
  94. )
  95. self.loss = tf.reduce_mean(-log_likelihood)
  96. with tf.variable_scope("optimizer"):
  97. self.opt = tf.train.AdamOptimizer(learning_rate=self.lr)
  98. grads_vars = self.opt.compute_gradients(self.loss)
  99. capped_grads_vars = [[tf.clip_by_value(g, -self.clip, self.clip), v] for g,v in grads_vars]
  100. self.train_op = self.opt.apply_gradients(capped_grads_vars, self.globel_step)
  101. self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
  102. def create_feed_dict(self, run_type, batch): #is_train
  103. '''
  104. :param is_train: Flag, True for train batch
  105. :param batch: list train/evaluate data
  106. :return: structured data to feed
  107. '''
  108. _, chars, tags = batch
  109. feed_dict = {
  110. self.char_inputs:np.asarray(chars),
  111. self.dropout:1.0
  112. }
  113. assert run_type in ['train', 'dev', 'predict']
  114. if run_type=='train':
  115. feed_dict[self.targets] = np.asarray(tags)
  116. feed_dict[self.dropout] = self.dropout_rate
  117. elif run_type=='dev':
  118. feed_dict[self.targets] = np.asarray(tags)
  119. return feed_dict
  120. def run_step(self, sess, run_type, batch):
  121. assert run_type in ['train', 'dev', 'predict']
  122. feed_dict = self.create_feed_dict(run_type, batch)
  123. if run_type=='train':
  124. global_step, loss, _ = sess.run(
  125. [self.globel_step, self.loss, self.train_op],
  126. feed_dict=feed_dict
  127. )
  128. return global_step, loss
  129. elif run_type=='dev':
  130. lengths ,logits, loss = sess.run([self.lengths, self.logits, self.loss], feed_dict)
  131. return lengths, logits, loss
  132. else:
  133. lengths ,logits = sess.run([self.lengths, self.logits], feed_dict)
  134. return lengths, logits
  135. def run_step_backup(self, sess, is_train, batch):
  136. feed_dict = self.create_feed_dict(is_train, batch)
  137. if is_train:
  138. global_step, loss, _ = sess.run(
  139. [self.globel_step, self.loss, self.train_op],
  140. feed_dict=feed_dict
  141. )
  142. return global_step, loss
  143. else:
  144. lengths ,logits, loss = sess.run([self.lengths, self.logits, self.loss], feed_dict)
  145. return lengths, logits, loss
  146. def decode(self, logits, lengths, matrix):
  147. paths = []
  148. small = -1000.0
  149. start = np.asarray([[small]*self.num_tags+[0]])
  150. for score, length in zip(logits, lengths):
  151. score = score[:length]
  152. pad = small * np.ones([length, 1])
  153. logits = np.concatenate([score, pad], axis=1)
  154. logits = np.concatenate([start, logits], axis=0)
  155. path, _ = viterbi_decode(logits, matrix)
  156. paths.append(path[1:])
  157. return paths
  158. def evaluate(self, sess, data_manager, id_to_tag):
  159. results = []
  160. trans = self.trans.eval()
  161. Precision = []
  162. Recall = []
  163. F1 = []
  164. loss = []
  165. pred_num = 0
  166. gold_num = 0
  167. equal_num = 0
  168. for batch in data_manager.iter_batch():
  169. strings = batch[0]
  170. tags = batch[-1]
  171. # lengths, scores, batch_loss = self.run_step(sess, False, batch)
  172. lengths, scores, batch_loss = self.run_step(sess, 'dev', batch)
  173. loss.append(batch_loss)
  174. batch_paths = self.decode(scores, lengths, trans)
  175. for i in range(len(strings)):
  176. result = []
  177. string = strings[i][:lengths[i]]
  178. gold = [id_to_tag[int(x)] for x in tags[i][:lengths[i]]]
  179. pred = [id_to_tag[int(x)] for x in batch_paths[i][:lengths[i]]]
  180. gold_ner = get_ner("".join(gold))
  181. pred_ner = get_ner("".join(pred))
  182. # print('标签实体:',gold_ner)
  183. # print('预测实体:',pred_ner)
  184. pred_num += len(pred_ner)
  185. gold_num += len(gold_ner)
  186. equal_num += len(gold_ner&pred_ner)
  187. # precision_temp = len(gold_ner&pred_ner)/(len(pred_ner)+1e-10)
  188. # recall_temp = len(gold_ner&pred_ner)/(len(gold_ner)+1e-10)
  189. # f1_temp = 2*(precision_temp*recall_temp)/(precision_temp+recall_temp+1e-10)
  190. # Precision.append(precision_temp)
  191. # Recall.append(recall_temp)
  192. # F1.append(f1_temp)
  193. if gold_ner!=pred_ner:
  194. for char, gold, pred in zip(string, gold, pred):
  195. result.append(" ".join([char, gold, pred]))
  196. # print(result)
  197. results.append(result)
  198. with open('evaluate_result.txt','w', encoding='utf-8') as f:
  199. for rs in results:
  200. for line in rs:
  201. f.write(line+'\n')
  202. f.write('\n')
  203. # return sum(F1)/len(F1),sum(Precision)/len(Precision),sum(Recall)/len(Recall)
  204. precision = equal_num/(pred_num+1e-10)
  205. recall = equal_num/(gold_num+1e-10)
  206. f1 = 2*(precision*recall)/(precision+recall+1e-10)
  207. return f1, precision, recall, np.mean(loss)
  208. def evaluate_line(self, sess, line):
  209. trans = self.trans.eval(session=sess)
  210. # lengths, scores = self.run_step(sess, False, input_from_line(line))
  211. lengths, scores = self.run_step(sess, 'predict', input_from_line(line))
  212. batch_paths = self.decode(scores, lengths, trans)
  213. tags = batch_paths[0] # batch_paths[0][:lengths] 错误
  214. return result_to_json(line, tags)