bi_lstm_crf.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. #!/usr/bin/env python
  2. # -*-coding:utf-8-*-
  3. import tensorflow as tf
  4. # from tensorflow.contrib import rnn
  5. # from tensorflow.contrib.crf import crf_log_likelihood
  6. # from tensorflow.contrib.layers.python.layers import initializers
  7. import numpy as np
  8. from BiddingKG.dl.common.Utils import viterbi_decode
  9. from zipfile import ZipFile
  10. import json
  11. from BiddingKG.dl.common.Utils import *
  12. import os
  13. class BiLSTM(object):
  14. def __init__(self):
  15. config = {'lstm_dim':100,
  16. 'num_chars':6591,
  17. 'num_tags':25,
  18. 'char_dim':100,
  19. 'lr':0.00002,
  20. 'input_dropout_keep':1.0,
  21. 'optimizer':'adam',
  22. 'clip':5}
  23. self.config = config
  24. self.lstm_dim = config["lstm_dim"]
  25. self.num_chars = config["num_chars"]
  26. self.num_tags = config["num_tags"]
  27. self.char_dim = config["char_dim"]
  28. self.lr = config["lr"]
  29. self.graph = tf.get_default_graph()
  30. self.sess = tf.Session(graph=self.graph)
  31. self.char_to_id, self.id_to_seg = _load_map_file(os.path.dirname(__file__)+"/data/map.zip", "char_map", "ner_map")
  32. self.id_to_tag = {int(k):v for k,v in self.id_to_seg.items()}
  33. self.tag_to_id = {v:int(k) for k,v in self.id_to_seg.items()}
  34. #self.char_embeding = tf.get_variable(name="char_embeding", initializer=embeddings)
  35. self.char_embeding = tf.get_variable(name="char_embeding",shape=(self.num_chars,self.char_dim))
  36. #添加一串全0的坑,fool发行版和源代码不一样
  37. self.const = tf.constant(value=0,dtype=tf.float32,shape=[1,100])
  38. self.char_embeding = tf.concat([self.const,self.char_embeding],0)
  39. self.global_step = tf.Variable(0, trainable=False)
  40. self.initializer = initializers.xavier_initializer()
  41. self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name="char_inputs")
  42. self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name="targets")
  43. self.dropout = tf.placeholder(dtype=tf.float32, name="dropout")
  44. self.lengths = tf.placeholder(dtype=tf.int32, shape=[None, ], name="lengths")
  45. # self.middle_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name="middle_dropout_keep_prob")
  46. # self.hidden_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name="hidden_dropout_keep_prob")
  47. self.input_dropout_keep_prob = tf.placeholder_with_default(config["input_dropout_keep"], [], name="input_dropout_keep_prob")
  48. self.batch_size = tf.shape(self.char_inputs)[0]
  49. self.num_steps = tf.shape(self.char_inputs)[-1]
  50. # forward
  51. embedding = self.embedding_layer(self.char_inputs)
  52. lstm_inputs = tf.nn.dropout(embedding, self.input_dropout_keep_prob)
  53. ## bi-directional lstm layer
  54. lstm_outputs = self.bilstm_layer(lstm_inputs)
  55. ## logits for tags
  56. self.project_layer(lstm_outputs)
  57. ## loss of the model
  58. self.loss = self.loss_layer(self.logits, self.lengths)
  59. with tf.variable_scope("optimizer"):
  60. optimizer = self.config["optimizer"]
  61. if optimizer == "sgd":
  62. self.opt = tf.train.GradientDescentOptimizer(self.lr)
  63. elif optimizer == "adam":
  64. self.opt = tf.train.AdamOptimizer(self.lr)
  65. elif optimizer == "adgrad":
  66. self.opt = tf.train.AdagradOptimizer(self.lr)
  67. else:
  68. raise KeyError
  69. grads_vars = self.opt.compute_gradients(self.loss)
  70. capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v] for g, v in grads_vars]
  71. self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)
  72. def embedding_layer(self, char_inputs):
  73. with tf.variable_scope("char_embedding"), tf.device('/cpu:0'):
  74. embed = tf.nn.embedding_lookup(self.char_embeding, char_inputs)
  75. return embed
  76. def bilstm_layer(self, lstm_inputs, name=None):
  77. with tf.variable_scope("char_bilstm" if not name else name):
  78. lstm_fw_cell = rnn.BasicLSTMCell(self.lstm_dim, state_is_tuple=True)
  79. lstm_bw_cell = rnn.BasicLSTMCell(self.lstm_dim, state_is_tuple=True)
  80. outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, lstm_inputs, dtype=tf.float32, sequence_length=self.lengths)
  81. return tf.concat(outputs, axis=2)
  82. def project_layer(self, lstm_outputs, name=None):
  83. """
  84. """
  85. with tf.variable_scope("project" if not name else name):
  86. with tf.variable_scope("hidden"):
  87. w_tanh = tf.get_variable("w_tanh", shape=[self.lstm_dim * 2, self.lstm_dim],
  88. dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))
  89. b_tanh = tf.get_variable("b_tanh", shape=[self.lstm_dim], dtype=tf.float32,
  90. initializer=tf.zeros_initializer())
  91. output = tf.reshape(lstm_outputs, shape=[-1, self.lstm_dim * 2])
  92. hidden = tf.tanh(tf.nn.xw_plus_b(output, w_tanh, b_tanh))
  93. drop_hidden = tf.nn.dropout(hidden, self.dropout)
  94. # project to score of tags
  95. with tf.variable_scope("output"):
  96. w_out = tf.get_variable("w_out", shape=[self.lstm_dim, self.num_tags],
  97. dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))
  98. b_out = tf.get_variable("b_out", shape=[self.num_tags], dtype=tf.float32,
  99. initializer=tf.zeros_initializer())
  100. pred = tf.nn.xw_plus_b(drop_hidden, w_out, b_out, name="pred")
  101. self.logits = tf.reshape(pred, [-1, self.num_steps, self.num_tags], name="logits")
  102. def loss_layer(self, project_logits, lengths, name=None):
  103. with tf.variable_scope("crf_loss" if not name else name):
  104. small = -1000.0
  105. start_logits = tf.concat(
  106. [small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])],
  107. axis=-1)
  108. pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
  109. logits = tf.concat([project_logits, pad_logits], axis=-1)
  110. logits = tf.concat([start_logits, logits], axis=1)
  111. targets = tf.concat(
  112. [tf.cast(self.num_tags * tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
  113. self.trans = tf.get_variable(
  114. "transitions",
  115. shape=[self.num_tags + 1, self.num_tags + 1],
  116. initializer=self.initializer)
  117. log_likelihood, self.trans = crf_log_likelihood(
  118. inputs=logits,
  119. tag_indices=targets,
  120. transition_params=self.trans,
  121. sequence_lengths=lengths + 1)
  122. return tf.reduce_mean(-log_likelihood)
  123. def getNodes(self):
  124. return self.char_inputs,self.targets,self.lengths,self.dropout,self.logits,self.trans,self.loss,self.train_op
  125. def initVariables(self):
  126. dict_tensor_values = getTensorValues()
  127. with self.graph.as_default():
  128. init_op = tf.global_variables_initializer()
  129. self.sess.run(init_op)
  130. '''
  131. trainable_variables = tf.trainable_variables()
  132. for item in trainable_variables:
  133. print(item.name,"prefix/"+item.name in dict_tensor_values.keys())
  134. self.sess.run(tf.assign(item,dict_tensor_values["prefix/"+item.name]))
  135. print((self.sess.run(item)==dict_tensor_values["prefix/"+item.name]).all())
  136. '''
  137. ''''''
  138. for _key in dict_tensor_values.keys():
  139. self.sess.run(tf.assign(self.graph.get_tensor_by_name(_key[7:]),dict_tensor_values[_key]))
  140. #print(self.sess.run(tf.nn.embedding_lookup(self.char_embeding, np.array([[1]], dtype=np.int32))))
  141. #print(self.sess.run(self.char_embeding))
  142. return self
  143. def restore(self):
  144. print("restore weights")
  145. saver = tf.train.Saver()
  146. path_add = "0-11/"
  147. saver.restore(self.sess, os.path.dirname(__file__)+'/model/'+path_add+'model.ckpt')
  148. '''
  149. path_add = "0-4/"
  150. saver.restore(self.sess, os.path.dirname(__file__)+'/model-server/'+path_add+'model.ckpt')
  151. '''
  152. return self
  153. def predict(self,sess,sents):
  154. inputs = []
  155. lengths = [len(text) for text in sents]
  156. max_len = max(lengths)
  157. for sent in sents:
  158. sent_ids = [self.char_to_id.get(w) if w in self.char_to_id else self.char_to_id.get("<OOV>") for w in sent]
  159. padding = [0] * (max_len - len(sent_ids))
  160. sent_ids += padding
  161. inputs.append(sent_ids)
  162. inputs = np.array(inputs, dtype=np.int32)
  163. feed_dict = {
  164. self.char_inputs: inputs,
  165. self.lengths: lengths,
  166. self.dropout: 1.0
  167. }
  168. logits, trans = sess.run([self.logits, self.trans], feed_dict=feed_dict)
  169. path = decode(logits, trans, lengths, self.num_tags)
  170. labels = [[self.id_to_tag.get(l) for l in p] for p in path]
  171. return labels
  172. def ner(self, text_list):
  173. text_list = _check_input(text_list)
  174. ner_labels = self.predict(self.sess,text_list)
  175. #print(ner_labels)
  176. all_entitys = []
  177. for ti, text in enumerate(text_list):
  178. ens = []
  179. entity = ""
  180. i = 0
  181. ner_label = ner_labels[ti]
  182. chars = list(text)
  183. for label, word in zip(ner_label, chars):
  184. i += 1
  185. if label == "O":
  186. continue
  187. lt = label.split("_")[1]
  188. lb = label.split("_")[0]
  189. if lb == "S":
  190. ens.append((i, i + 1, lt, word))
  191. elif lb == "B":
  192. entity = ""
  193. entity += word
  194. elif lb == "M":
  195. entity += word
  196. elif lb == "E":
  197. entity += word
  198. ens.append((i - len(entity), i + 1, lt, entity))
  199. entity = ""
  200. if entity:
  201. ens.append((i - len(entity), i + 1, lt, entity))
  202. all_entitys.append(ens)
  203. return all_entitys
  204. def _check_input(text, ignore=False):
  205. if not text:
  206. return []
  207. if not isinstance(text, list):
  208. text = [text]
  209. null_index = [i for i, t in enumerate(text) if not t]
  210. if null_index and not ignore:
  211. raise Exception("null text in input ")
  212. return text
  213. def _load_map_file(path, char_map_name, id_map_name):
  214. with ZipFile(path) as myzip:
  215. with myzip.open('all_map.json') as myfile:
  216. content = myfile.readline()
  217. content = content.decode()
  218. data = json.loads(content)
  219. return data.get(char_map_name), data.get(id_map_name)
  220. def decode(logits, trans, sequence_lengths, tag_num):
  221. viterbi_sequences = []
  222. small = -1000.0
  223. start = np.asarray([[small] * tag_num + [0]])
  224. for logit, length in zip(logits, sequence_lengths):
  225. score = logit[:length]
  226. pad = small * np.ones([length, 1])
  227. score = np.concatenate([score, pad], axis=1)
  228. score = np.concatenate([start, score], axis=0)
  229. viterbi_seq, viterbi_score = viterbi_decode(score, trans)
  230. viterbi_sequences.append(viterbi_seq[1:])
  231. return viterbi_sequences
  232. def load_graph(path = "D://Anaconda3.4//envs//dl_nlp//fool//ner.pb"):
  233. with tf.gfile.GFile(path, mode='rb') as f:
  234. graph_def = tf.GraphDef()
  235. graph_def.ParseFromString(f.read())
  236. with tf.Graph().as_default() as graph:
  237. tf.import_graph_def(graph_def, name="prefix")
  238. return graph
  239. def printModel():
  240. with tf.gfile.GFile("D://Anaconda3.4//envs//dl_nlp//fool//ner.pb", mode='rb') as f:
  241. graph_def = tf.GraphDef()
  242. graph_def.ParseFromString(f.read())
  243. for i,n in enumerate(graph_def.node):
  244. print("Name of the node - %s" % n.name)
  245. with tf.Graph().as_default() as graph:
  246. tf.import_graph_def(graph_def)
  247. #trans = graph.get_tensor_by_name("prefix/crf_loss/transitions:0")
  248. #logits = graph.get_tensor_by_name("prefix/project/logits:0")
  249. #y_target = tf.placeholder()
  250. summaryWriter = tf.summary.FileWriter('log/', graph)
  251. #tf.Graph().get_operations()
  252. def getTensorValues():
  253. tensor_file = 'dict_tensor_values.pk'
  254. if os.path.exists(tensor_file):
  255. return load(tensor_file)
  256. graph = load_graph()
  257. with graph.as_default():
  258. input_x = graph.get_tensor_by_name("prefix/char_inputs:0")
  259. batch_size = tf.shape(input_x)[0]
  260. num_steps = tf.shape(input_x)[-1]
  261. list_tensor_names = ["prefix/char_embeding:0",
  262. "prefix/char_bilstm/bidirectional_rnn/fw/basic_lstm_cell/kernel:0",
  263. "prefix/char_bilstm/bidirectional_rnn/fw/basic_lstm_cell/bias:0",
  264. "prefix/char_bilstm/bidirectional_rnn/bw/basic_lstm_cell/kernel:0",
  265. "prefix/char_bilstm/bidirectional_rnn/bw/basic_lstm_cell/bias:0",
  266. "prefix/project/hidden/w_tanh:0",
  267. "prefix/project/hidden/b_tanh:0",
  268. "prefix/project/output/w_out:0",
  269. "prefix/project/output/b_out:0",
  270. "prefix/crf_loss/transitions:0"]
  271. b = graph.get_tensor_by_name
  272. dict_tensor_values = dict()
  273. sess = tf.Session()
  274. for tensor_name in list_tensor_names:
  275. dict_tensor_values[tensor_name] = sess.run(graph.get_tensor_by_name(tensor_name))
  276. #print(np.shape(dict_tensor_values[tensor_name]))
  277. sess.close()
  278. save(dict_tensor_values,tensor_file)
  279. return dict_tensor_values
  280. if __name__=="__main__":
  281. #printModel()
  282. getTensorValues()