123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357 |
- #!/usr/bin/env python
- # -*-coding:utf-8-*-
- import tensorflow as tf
- # from tensorflow.contrib import rnn
- # from tensorflow.contrib.crf import crf_log_likelihood
- # from tensorflow.contrib.layers.python.layers import initializers
- import numpy as np
- from BiddingKG.dl.common.Utils import viterbi_decode
- from zipfile import ZipFile
- import json
- from BiddingKG.dl.common.Utils import *
- import os
- class BiLSTM(object):
- def __init__(self):
- config = {'lstm_dim':100,
- 'num_chars':6591,
- 'num_tags':25,
- 'char_dim':100,
- 'lr':0.00002,
- 'input_dropout_keep':1.0,
- 'optimizer':'adam',
- 'clip':5}
-
- self.config = config
- self.lstm_dim = config["lstm_dim"]
- self.num_chars = config["num_chars"]
- self.num_tags = config["num_tags"]
- self.char_dim = config["char_dim"]
- self.lr = config["lr"]
-
- self.graph = tf.get_default_graph()
- self.sess = tf.Session(graph=self.graph)
- self.char_to_id, self.id_to_seg = _load_map_file(os.path.dirname(__file__)+"/data/map.zip", "char_map", "ner_map")
- self.id_to_tag = {int(k):v for k,v in self.id_to_seg.items()}
-
- self.tag_to_id = {v:int(k) for k,v in self.id_to_seg.items()}
- #self.char_embeding = tf.get_variable(name="char_embeding", initializer=embeddings)
- self.char_embeding = tf.get_variable(name="char_embeding",shape=(self.num_chars,self.char_dim))
-
- #添加一串全0的坑,fool发行版和源代码不一样
- self.const = tf.constant(value=0,dtype=tf.float32,shape=[1,100])
- self.char_embeding = tf.concat([self.const,self.char_embeding],0)
- self.global_step = tf.Variable(0, trainable=False)
- self.initializer = initializers.xavier_initializer()
-
-
- self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name="char_inputs")
-
-
- self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name="targets")
- self.dropout = tf.placeholder(dtype=tf.float32, name="dropout")
- self.lengths = tf.placeholder(dtype=tf.int32, shape=[None, ], name="lengths")
- # self.middle_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name="middle_dropout_keep_prob")
- # self.hidden_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name="hidden_dropout_keep_prob")
- self.input_dropout_keep_prob = tf.placeholder_with_default(config["input_dropout_keep"], [], name="input_dropout_keep_prob")
- self.batch_size = tf.shape(self.char_inputs)[0]
- self.num_steps = tf.shape(self.char_inputs)[-1]
- # forward
- embedding = self.embedding_layer(self.char_inputs)
- lstm_inputs = tf.nn.dropout(embedding, self.input_dropout_keep_prob)
- ## bi-directional lstm layer
- lstm_outputs = self.bilstm_layer(lstm_inputs)
- ## logits for tags
- self.project_layer(lstm_outputs)
- ## loss of the model
- self.loss = self.loss_layer(self.logits, self.lengths)
- with tf.variable_scope("optimizer"):
- optimizer = self.config["optimizer"]
- if optimizer == "sgd":
- self.opt = tf.train.GradientDescentOptimizer(self.lr)
- elif optimizer == "adam":
- self.opt = tf.train.AdamOptimizer(self.lr)
- elif optimizer == "adgrad":
- self.opt = tf.train.AdagradOptimizer(self.lr)
- else:
- raise KeyError
- grads_vars = self.opt.compute_gradients(self.loss)
- capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v] for g, v in grads_vars]
- self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)
- def embedding_layer(self, char_inputs):
- with tf.variable_scope("char_embedding"), tf.device('/cpu:0'):
- embed = tf.nn.embedding_lookup(self.char_embeding, char_inputs)
- return embed
- def bilstm_layer(self, lstm_inputs, name=None):
- with tf.variable_scope("char_bilstm" if not name else name):
- lstm_fw_cell = rnn.BasicLSTMCell(self.lstm_dim, state_is_tuple=True)
- lstm_bw_cell = rnn.BasicLSTMCell(self.lstm_dim, state_is_tuple=True)
- outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, lstm_inputs, dtype=tf.float32, sequence_length=self.lengths)
- return tf.concat(outputs, axis=2)
- def project_layer(self, lstm_outputs, name=None):
- """
- """
- with tf.variable_scope("project" if not name else name):
- with tf.variable_scope("hidden"):
- w_tanh = tf.get_variable("w_tanh", shape=[self.lstm_dim * 2, self.lstm_dim],
- dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))
- b_tanh = tf.get_variable("b_tanh", shape=[self.lstm_dim], dtype=tf.float32,
- initializer=tf.zeros_initializer())
- output = tf.reshape(lstm_outputs, shape=[-1, self.lstm_dim * 2])
- hidden = tf.tanh(tf.nn.xw_plus_b(output, w_tanh, b_tanh))
- drop_hidden = tf.nn.dropout(hidden, self.dropout)
- # project to score of tags
- with tf.variable_scope("output"):
- w_out = tf.get_variable("w_out", shape=[self.lstm_dim, self.num_tags],
- dtype=tf.float32, initializer=self.initializer, regularizer=tf.contrib.layers.l2_regularizer(0.001))
- b_out = tf.get_variable("b_out", shape=[self.num_tags], dtype=tf.float32,
- initializer=tf.zeros_initializer())
- pred = tf.nn.xw_plus_b(drop_hidden, w_out, b_out, name="pred")
- self.logits = tf.reshape(pred, [-1, self.num_steps, self.num_tags], name="logits")
- def loss_layer(self, project_logits, lengths, name=None):
- with tf.variable_scope("crf_loss" if not name else name):
- small = -1000.0
- start_logits = tf.concat(
- [small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])],
- axis=-1)
- pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
- logits = tf.concat([project_logits, pad_logits], axis=-1)
- logits = tf.concat([start_logits, logits], axis=1)
- targets = tf.concat(
- [tf.cast(self.num_tags * tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
- self.trans = tf.get_variable(
- "transitions",
- shape=[self.num_tags + 1, self.num_tags + 1],
- initializer=self.initializer)
- log_likelihood, self.trans = crf_log_likelihood(
- inputs=logits,
- tag_indices=targets,
- transition_params=self.trans,
- sequence_lengths=lengths + 1)
- return tf.reduce_mean(-log_likelihood)
-
- def getNodes(self):
- return self.char_inputs,self.targets,self.lengths,self.dropout,self.logits,self.trans,self.loss,self.train_op
-
- def initVariables(self):
- dict_tensor_values = getTensorValues()
- with self.graph.as_default():
- init_op = tf.global_variables_initializer()
- self.sess.run(init_op)
-
- '''
- trainable_variables = tf.trainable_variables()
- for item in trainable_variables:
- print(item.name,"prefix/"+item.name in dict_tensor_values.keys())
- self.sess.run(tf.assign(item,dict_tensor_values["prefix/"+item.name]))
- print((self.sess.run(item)==dict_tensor_values["prefix/"+item.name]).all())
- '''
-
- ''''''
- for _key in dict_tensor_values.keys():
- self.sess.run(tf.assign(self.graph.get_tensor_by_name(_key[7:]),dict_tensor_values[_key]))
-
- #print(self.sess.run(tf.nn.embedding_lookup(self.char_embeding, np.array([[1]], dtype=np.int32))))
- #print(self.sess.run(self.char_embeding))
- return self
-
- def restore(self):
- print("restore weights")
- saver = tf.train.Saver()
-
- path_add = "0-11/"
- saver.restore(self.sess, os.path.dirname(__file__)+'/model/'+path_add+'model.ckpt')
- '''
- path_add = "0-4/"
- saver.restore(self.sess, os.path.dirname(__file__)+'/model-server/'+path_add+'model.ckpt')
- '''
- return self
-
- def predict(self,sess,sents):
- inputs = []
- lengths = [len(text) for text in sents]
- max_len = max(lengths)
- for sent in sents:
- sent_ids = [self.char_to_id.get(w) if w in self.char_to_id else self.char_to_id.get("<OOV>") for w in sent]
- padding = [0] * (max_len - len(sent_ids))
- sent_ids += padding
- inputs.append(sent_ids)
- inputs = np.array(inputs, dtype=np.int32)
- feed_dict = {
- self.char_inputs: inputs,
- self.lengths: lengths,
- self.dropout: 1.0
- }
-
-
- logits, trans = sess.run([self.logits, self.trans], feed_dict=feed_dict)
- path = decode(logits, trans, lengths, self.num_tags)
- labels = [[self.id_to_tag.get(l) for l in p] for p in path]
- return labels
-
-
- def ner(self, text_list):
- text_list = _check_input(text_list)
- ner_labels = self.predict(self.sess,text_list)
- #print(ner_labels)
- all_entitys = []
- for ti, text in enumerate(text_list):
- ens = []
- entity = ""
- i = 0
- ner_label = ner_labels[ti]
- chars = list(text)
- for label, word in zip(ner_label, chars):
- i += 1
- if label == "O":
- continue
- lt = label.split("_")[1]
- lb = label.split("_")[0]
- if lb == "S":
- ens.append((i, i + 1, lt, word))
- elif lb == "B":
- entity = ""
- entity += word
- elif lb == "M":
- entity += word
- elif lb == "E":
- entity += word
- ens.append((i - len(entity), i + 1, lt, entity))
- entity = ""
- if entity:
- ens.append((i - len(entity), i + 1, lt, entity))
- all_entitys.append(ens)
- return all_entitys
-
- def _check_input(text, ignore=False):
- if not text:
- return []
- if not isinstance(text, list):
- text = [text]
- null_index = [i for i, t in enumerate(text) if not t]
- if null_index and not ignore:
- raise Exception("null text in input ")
- return text
-
- def _load_map_file(path, char_map_name, id_map_name):
- with ZipFile(path) as myzip:
- with myzip.open('all_map.json') as myfile:
- content = myfile.readline()
- content = content.decode()
- data = json.loads(content)
- return data.get(char_map_name), data.get(id_map_name)
-
- def decode(logits, trans, sequence_lengths, tag_num):
- viterbi_sequences = []
- small = -1000.0
- start = np.asarray([[small] * tag_num + [0]])
- for logit, length in zip(logits, sequence_lengths):
- score = logit[:length]
- pad = small * np.ones([length, 1])
- score = np.concatenate([score, pad], axis=1)
- score = np.concatenate([start, score], axis=0)
- viterbi_seq, viterbi_score = viterbi_decode(score, trans)
- viterbi_sequences.append(viterbi_seq[1:])
- return viterbi_sequences
- def load_graph(path = "D://Anaconda3.4//envs//dl_nlp//fool//ner.pb"):
- with tf.gfile.GFile(path, mode='rb') as f:
- graph_def = tf.GraphDef()
- graph_def.ParseFromString(f.read())
- with tf.Graph().as_default() as graph:
- tf.import_graph_def(graph_def, name="prefix")
- return graph
- def printModel():
- with tf.gfile.GFile("D://Anaconda3.4//envs//dl_nlp//fool//ner.pb", mode='rb') as f:
- graph_def = tf.GraphDef()
- graph_def.ParseFromString(f.read())
- for i,n in enumerate(graph_def.node):
- print("Name of the node - %s" % n.name)
- with tf.Graph().as_default() as graph:
- tf.import_graph_def(graph_def)
- #trans = graph.get_tensor_by_name("prefix/crf_loss/transitions:0")
- #logits = graph.get_tensor_by_name("prefix/project/logits:0")
- #y_target = tf.placeholder()
- summaryWriter = tf.summary.FileWriter('log/', graph)
- #tf.Graph().get_operations()
- def getTensorValues():
- tensor_file = 'dict_tensor_values.pk'
- if os.path.exists(tensor_file):
- return load(tensor_file)
- graph = load_graph()
- with graph.as_default():
- input_x = graph.get_tensor_by_name("prefix/char_inputs:0")
- batch_size = tf.shape(input_x)[0]
- num_steps = tf.shape(input_x)[-1]
- list_tensor_names = ["prefix/char_embeding:0",
- "prefix/char_bilstm/bidirectional_rnn/fw/basic_lstm_cell/kernel:0",
- "prefix/char_bilstm/bidirectional_rnn/fw/basic_lstm_cell/bias:0",
- "prefix/char_bilstm/bidirectional_rnn/bw/basic_lstm_cell/kernel:0",
- "prefix/char_bilstm/bidirectional_rnn/bw/basic_lstm_cell/bias:0",
- "prefix/project/hidden/w_tanh:0",
- "prefix/project/hidden/b_tanh:0",
- "prefix/project/output/w_out:0",
- "prefix/project/output/b_out:0",
- "prefix/crf_loss/transitions:0"]
- b = graph.get_tensor_by_name
- dict_tensor_values = dict()
- sess = tf.Session()
- for tensor_name in list_tensor_names:
- dict_tensor_values[tensor_name] = sess.run(graph.get_tensor_by_name(tensor_name))
- #print(np.shape(dict_tensor_values[tensor_name]))
- sess.close()
- save(dict_tensor_values,tensor_file)
- return dict_tensor_values
- if __name__=="__main__":
- #printModel()
- getTensorValues()
|