|
@@ -74,8 +74,8 @@ class CodeNamePredict():
|
|
|
id_PN_B = self.class_labels.index("PN_B")
|
|
|
id_PN_M = self.class_labels.index("PN_M")
|
|
|
id_PN_E = self.class_labels.index("PN_E")
|
|
|
- self.PC_pattern = re.compile(str(id_PC_B)+str(id_PC_M)+"+"+str(id_PC_E)+"?")
|
|
|
- self.PN_pattern = re.compile(str(id_PN_B)+str(id_PN_M)+"+"+str(id_PN_E)+"?")
|
|
|
+ self.PC_pattern = re.compile(str(id_PC_B)+str(id_PC_M)+"*"+str(id_PC_E))
|
|
|
+ self.PN_pattern = re.compile(str(id_PN_B)+str(id_PN_M)+"*"+str(id_PN_E))
|
|
|
print("pc",self.PC_pattern)
|
|
|
print("pn",self.PN_pattern)
|
|
|
self.word2index = dict((w,i) for i,w in enumerate(np.array(self.vocab)))
|
|
@@ -100,14 +100,18 @@ class CodeNamePredict():
|
|
|
log("get model of codename")
|
|
|
with self.sess_codename.as_default():
|
|
|
with self.sess_codename.graph.as_default():
|
|
|
- meta_graph_def = tf.saved_model.loader.load(self.sess_codename, ["serve"], export_dir=os.path.dirname(__file__)+"/codename_savedmodel")
|
|
|
+ meta_graph_def = tf.saved_model.loader.load(self.sess_codename, ["serve"], export_dir=os.path.dirname(__file__)+"/codename_savedmodel_tf")
|
|
|
signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
|
|
|
signature_def = meta_graph_def.signature_def
|
|
|
self.inputs = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].inputs["inputs"].name)
|
|
|
- self.outputs = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].outputs["outputs"].name)
|
|
|
- return self.inputs,self.outputs
|
|
|
+ self.inputs_length = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].inputs["inputs_length"].name)
|
|
|
+ self.keepprob = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].inputs["keepprob"].name)
|
|
|
+ self.logits = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].outputs["logits"].name)
|
|
|
+ self.trans = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].outputs["trans"].name)
|
|
|
+
|
|
|
+ return self.inputs,self.inputs_length,self.keepprob,self.logits,self.trans
|
|
|
else:
|
|
|
- return self.inputs,self.outputs
|
|
|
+ return self.inputs,self.inputs_length,self.keepprob,self.logits,self.trans
|
|
|
'''
|
|
|
if self.model is None:
|
|
|
self.model = self.getBiLSTMCRFModel(self.MAX_LEN, self.vocab, self.EMBED_DIM, self.BiRNN_UNITS, self.class_labels,weights=None)
|
|
@@ -198,7 +202,14 @@ class CodeNamePredict():
|
|
|
else:
|
|
|
result = symbol_dict.get(rightfinds[0])+data
|
|
|
return result
|
|
|
-
|
|
|
+
|
|
|
+ def decode(self,logits, trans, sequence_lengths, tag_num):
|
|
|
+ viterbi_sequences = []
|
|
|
+ for logit, length in zip(logits, sequence_lengths):
|
|
|
+ score = logit[:length]
|
|
|
+ viterbi_seq, viterbi_score = viterbi_decode(score, trans)
|
|
|
+ viterbi_sequences.append(viterbi_seq)
|
|
|
+ return viterbi_sequences
|
|
|
|
|
|
def predict(self,list_sentences,list_entitys=None,MAX_AREA = 5000):
|
|
|
#@summary: 获取每篇文章的code和name
|
|
@@ -228,7 +239,7 @@ class CodeNamePredict():
|
|
|
list_sentence.sort(key=lambda x:len(x.sentence_text),reverse=True)
|
|
|
_begin_index = 0
|
|
|
|
|
|
- item = [doc_id,{"code":[],"name":""}]
|
|
|
+ item = {"code":[],"name":""}
|
|
|
code_set = set()
|
|
|
dict_name_freq_score = dict()
|
|
|
while(True):
|
|
@@ -237,10 +248,13 @@ class CodeNamePredict():
|
|
|
MAX_LEN = MAX_AREA
|
|
|
_LEN = MAX_AREA//MAX_LEN
|
|
|
#预测
|
|
|
- x = [[self.word2index.get(word,index_unk)for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
|
|
|
+
|
|
|
+ # x = [[self.word2index.get(word,index_pad)for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
|
|
|
+ x = [[getIndexOfWord(word) for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
|
|
|
+ x_len = [len(_x) if len(_x) < MAX_LEN else MAX_LEN for _x in x]
|
|
|
x = pad_sequences(x,maxlen=MAX_LEN,padding="post",truncating="post")
|
|
|
+
|
|
|
if USE_PAI_EAS:
|
|
|
-
|
|
|
request = tf_predict_pb2.PredictRequest()
|
|
|
request.inputs["inputs"].dtype = tf_predict_pb2.DT_INT32
|
|
|
request.inputs["inputs"].array_shape.dim.extend(np.shape(x))
|
|
@@ -256,15 +270,20 @@ class CodeNamePredict():
|
|
|
predict_y = self.sess_codename.run(t_output,feed_dict={t_input:x})
|
|
|
else:
|
|
|
with self.sess_codename.as_default():
|
|
|
- t_input,t_output = self.getModel()
|
|
|
- predict_y = self.sess_codename.run(t_output,feed_dict={t_input:x})
|
|
|
+ t_input,t_input_length,t_keepprob,t_logits,t_trans = self.getModel()
|
|
|
+ _logits,_trans = self.sess_codename.run([t_logits,t_trans],feed_dict={t_input:x,
|
|
|
+ t_input_length:x_len,
|
|
|
+ t_keepprob:1.0})
|
|
|
+ predict_y = self.decode(_logits,_trans,x_len,7)
|
|
|
+ # print('==========',_logits)
|
|
|
+
|
|
|
'''
|
|
|
for item11 in np.argmax(predict_y,-1):
|
|
|
print(item11)
|
|
|
print(predict_y)
|
|
|
'''
|
|
|
# print(predict_y)
|
|
|
- for sentence,predict in zip(list_sentence[_begin_index:_begin_index+_LEN],np.argmax(predict_y,-1)):
|
|
|
+ for sentence,predict in zip(list_sentence[_begin_index:_begin_index+_LEN],np.array(predict_y)):
|
|
|
pad_sentence = sentence.sentence_text[:MAX_LEN]
|
|
|
join_predict = "".join([str(s) for s in predict])
|
|
|
# print(pad_sentence)
|
|
@@ -323,7 +342,7 @@ class CodeNamePredict():
|
|
|
|
|
|
if the_code not in code_set:
|
|
|
code_set.add(the_code)
|
|
|
- item[1]['code'] = list(code_set)
|
|
|
+ item['code'] = list(code_set)
|
|
|
for iter in re.finditer(self.PN_pattern,join_predict):
|
|
|
_name = self.fitDataByRule(pad_sentence[iter.span()[0]:iter.span()[1]])
|
|
|
|
|
@@ -352,27 +371,28 @@ class CodeNamePredict():
|
|
|
list_name_freq_score = []
|
|
|
|
|
|
# 2020/11/23 大网站规则调整
|
|
|
- name_re1 = '(项目|工程|招标|合同|标项|标的|计划|询价|询价单|询价通知书|申购)(名称|标题|主题)[::\s]+([^,。:;]{2,60})[,。]'
|
|
|
- for sentence in list_sentence:
|
|
|
- # pad_sentence = sentence.sentence_text
|
|
|
- othername = re.search(name_re1, sentence.sentence_text)
|
|
|
- if othername != None:
|
|
|
- project_name = othername.group(3)
|
|
|
- beg = find_index([project_name], sentence.sentence_text)[0]
|
|
|
- end = beg + len(project_name)
|
|
|
- _name = self.fitDataByRule(sentence.sentence_text[beg:end])
|
|
|
- # add name to entitys
|
|
|
- _entity = Entity(doc_id=sentence.doc_id, entity_id="%s_%s_%s_%s" % (
|
|
|
- sentence.doc_id, sentence.sentence_index, beg, end), entity_text=_name,
|
|
|
- entity_type="name", sentence_index=sentence.sentence_index, begin_index=0,
|
|
|
- end_index=0, wordOffset_begin=beg, wordOffset_end=end)
|
|
|
- list_entity.append(_entity)
|
|
|
- w = 1
|
|
|
- if _name not in dict_name_freq_score:
|
|
|
- # dict_name_freq_score[_name] = [1,len(re.findall(pattern_score,_name))+len(_name)*0.1]
|
|
|
- dict_name_freq_score[_name] = [1, (len(re.findall(pattern_score, _name)) + len(_name) * 0.05) * w]
|
|
|
- else:
|
|
|
- dict_name_freq_score[_name][0] += 1
|
|
|
+ if len(dict_name_freq_score) == 0:
|
|
|
+ name_re1 = '(项目|工程|招标|合同|标项|标的|计划|询价|询价单|询价通知书|申购)(名称|标题|主题)[::\s]+([^,。:;]{2,60})[,。]'
|
|
|
+ for sentence in list_sentence:
|
|
|
+ # pad_sentence = sentence.sentence_text
|
|
|
+ othername = re.search(name_re1, sentence.sentence_text)
|
|
|
+ if othername != None:
|
|
|
+ project_name = othername.group(3)
|
|
|
+ beg = find_index([project_name], sentence.sentence_text)[0]
|
|
|
+ end = beg + len(project_name)
|
|
|
+ _name = self.fitDataByRule(sentence.sentence_text[beg:end])
|
|
|
+ # add name to entitys
|
|
|
+ _entity = Entity(doc_id=sentence.doc_id, entity_id="%s_%s_%s_%s" % (
|
|
|
+ sentence.doc_id, sentence.sentence_index, beg, end), entity_text=_name,
|
|
|
+ entity_type="name", sentence_index=sentence.sentence_index, begin_index=0,
|
|
|
+ end_index=0, wordOffset_begin=beg, wordOffset_end=end)
|
|
|
+ list_entity.append(_entity)
|
|
|
+ w = 1
|
|
|
+ if _name not in dict_name_freq_score:
|
|
|
+ # dict_name_freq_score[_name] = [1,len(re.findall(pattern_score,_name))+len(_name)*0.1]
|
|
|
+ dict_name_freq_score[_name] = [1, (len(re.findall(pattern_score, _name)) + len(_name) * 0.05) * w]
|
|
|
+ else:
|
|
|
+ dict_name_freq_score[_name][0] += 1
|
|
|
# othername = re.search(name_re1, sentence.sentence_text)
|
|
|
# if othername != None:
|
|
|
# _name = othername.group(3)
|
|
@@ -386,7 +406,7 @@ class CodeNamePredict():
|
|
|
# print(list_name_freq_score)
|
|
|
if len(list_name_freq_score)>0:
|
|
|
list_name_freq_score.sort(key=lambda x:x[1][0]*x[1][1],reverse=True)
|
|
|
- item[1]['name'] = list_name_freq_score[0][0]
|
|
|
+ item['name'] = list_name_freq_score[0][0]
|
|
|
# if list_name_freq_score[0][1][0]>1:
|
|
|
# item[1]['name'] = list_name_freq_score[0][0]
|
|
|
# else:
|
|
@@ -394,7 +414,7 @@ class CodeNamePredict():
|
|
|
# item[1]["name"] = list_name_freq_score[0][0]
|
|
|
|
|
|
#下面代码加上去用正则添加某些识别不到的项目编号
|
|
|
- if item[1]['code'] == []:
|
|
|
+ if item['code'] == []:
|
|
|
for sentence in list_sentence:
|
|
|
# othercode = re.search('(采购计划编号|询价编号)[\))]?[::]?([\[\]a-zA-Z0-9\-]{5,30})', sentence.sentence_text)
|
|
|
# if othercode != None:
|
|
@@ -402,7 +422,7 @@ class CodeNamePredict():
|
|
|
# 2020/11/23 大网站规则调整
|
|
|
othercode = re.search('(项目|采购|招标|品目|询价|竞价|询价单|磋商|订单|账单|交易|文件|计划|场次|标的|标段|标包|分包|标段\(包\)|招标文件|合同|通知书|公告)(单号|编号|标号|编码|代码|备案号|号)[::\s]+([^,。;:、]{8,30}[a-zA-Z0-9\号])[\),。]', sentence.sentence_text)
|
|
|
if othercode != None:
|
|
|
- item[1]['code'].append(othercode.group(3))
|
|
|
+ item['code'].append(othercode.group(3))
|
|
|
result.append(item)
|
|
|
|
|
|
list_sentence.sort(key=lambda x: x.sentence_index,reverse=False)
|
|
@@ -884,7 +904,7 @@ class RoleRulePredictor():
|
|
|
|
|
|
|
|
|
for article,list_entity,list_sentence,list_codename in zip(list_articles,list_entitys,list_sentences,list_codenames):
|
|
|
- list_name = list_codename[1]["name"]
|
|
|
+ list_name = list_codename["name"]
|
|
|
list_name = self._check_input(list_name)+[article.title]
|
|
|
for p_entity in list_entity:
|
|
|
|
|
@@ -1168,8 +1188,8 @@ class TimePredictor():
|
|
|
return
|
|
|
points_entitys = datas[1]
|
|
|
with self.sess.as_default():
|
|
|
- predict_y = self.sess.run(self.outputs_code, feed_dict={self.inputs_code[0]:datas[0][0]
|
|
|
- ,self.inputs_code[1]:datas[0][1]})
|
|
|
+ predict_y = limitRun(self.sess,[self.outputs_code], feed_dict={self.inputs_code[0]:datas[0][0]
|
|
|
+ ,self.inputs_code[1]:datas[0][1]})[0]
|
|
|
for i in range(len(predict_y)):
|
|
|
entity = points_entitys[i]
|
|
|
label = np.argmax(predict_y[i])
|
|
@@ -1217,6 +1237,78 @@ def getBiLSTMCRFModel(MAX_LEN,vocab,EMBED_DIM,BiRNN_UNITS,chunk_tags,weights):
|
|
|
model.compile(optimizer = 'adam', loss = crf.loss_function, metrics = [crf.accuracy])
|
|
|
return model
|
|
|
|
|
|
+from tensorflow.contrib.crf import crf_log_likelihood
|
|
|
+from tensorflow.contrib.layers.python.layers import initializers
|
|
|
+def BiLSTM_CRF_tfmodel(sess,weights):
|
|
|
+ BiRNN_Units = 200
|
|
|
+ chunk_tags = {
|
|
|
+ 'O': 0,
|
|
|
+ 'PN_B': 1,
|
|
|
+ 'PN_M': 2,
|
|
|
+ 'PN_E': 3,
|
|
|
+ 'PC_B': 4,
|
|
|
+ 'PC_M': 5,
|
|
|
+ 'PC_E': 6,
|
|
|
+ }
|
|
|
+
|
|
|
+ def embedding_layer(input,keepprob):
|
|
|
+ embedding = tf.get_variable("embedding",initializer=np.array(weights,dtype=np.float32) if weights is not None else None,dtype=tf.float32)
|
|
|
+ embedding = tf.nn.embedding_lookup(params=embedding,ids=input)
|
|
|
+ embedding = tf.nn.dropout(embedding,keepprob)
|
|
|
+ return embedding
|
|
|
+
|
|
|
+ def BiLSTM_Layer(input,length):
|
|
|
+ with tf.variable_scope("BiLSTM"):
|
|
|
+ forward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Units//2,state_is_tuple=True)
|
|
|
+ backward_cell = tf.contrib.rnn.BasicLSTMCell(BiRNN_Units//2,state_is_tuple=True)
|
|
|
+ output, _ = tf.nn.bidirectional_dynamic_rnn(forward_cell,backward_cell,input,dtype=tf.float32,sequence_length=length)
|
|
|
+ output = tf.concat(output,2)
|
|
|
+ return output
|
|
|
+
|
|
|
+ def CRF_layer(input,num_tags,BiRNN_Units,time_step,keepprob):
|
|
|
+ with tf.variable_scope("CRF"):
|
|
|
+ with tf.variable_scope("hidden"):
|
|
|
+ w_hidden = tf.get_variable(name='w_hidden',shape=(BiRNN_Units,BiRNN_Units//2),dtype=tf.float32,
|
|
|
+ initializer=initializers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(0.001))
|
|
|
+ b_hidden = tf.get_variable(name='b_hidden',shape=(BiRNN_Units//2),dtype=tf.float32,initializer=tf.zeros_initializer())
|
|
|
+ # print(input)
|
|
|
+ input_reshape = tf.reshape(input,shape=(-1,BiRNN_Units))
|
|
|
+ hidden = tf.tanh(tf.nn.xw_plus_b(input_reshape,w_hidden,b_hidden))
|
|
|
+ hidden = tf.nn.dropout(hidden,keepprob)
|
|
|
+ with tf.variable_scope("output"):
|
|
|
+ w_output = tf.get_variable(name='w_output',shape=(BiRNN_Units//2,num_tags),dtype=tf.float32,initializer=initializers.xavier_initializer(),regularizer=tf.contrib.layers.l2_regularizer(0.001))
|
|
|
+ b_output = tf.get_variable(name='b_output',shape=(num_tags),dtype=tf.float32,initializer=tf.zeros_initializer())
|
|
|
+ pred = tf.nn.xw_plus_b(hidden,w_output,b_output)
|
|
|
+ logits_ = tf.reshape(pred,shape=(-1,time_step,num_tags),name='logits')
|
|
|
+ return logits_
|
|
|
+
|
|
|
+ def layer_loss(input,true_target,num_tags,length):
|
|
|
+ with tf.variable_scope("crf_loss"):
|
|
|
+ trans = tf.get_variable(name='transitons',shape=(num_tags,num_tags),dtype=tf.float32,initializer=initializers.xavier_initializer())
|
|
|
+ log_likelihood,trans = crf_log_likelihood(inputs=input,tag_indices=true_target,transition_params=trans,sequence_lengths=length)
|
|
|
+ return tf.reduce_mean(-log_likelihood),trans
|
|
|
+
|
|
|
+ with sess.graph.as_default():
|
|
|
+ char_input = tf.placeholder(name='char_input',shape=(None,None),dtype=tf.int32)
|
|
|
+ target = tf.placeholder(name='target',shape=(None,None),dtype=tf.int32)
|
|
|
+ length = tf.placeholder(name='length',shape=(None,),dtype=tf.int32)
|
|
|
+ keepprob = tf.placeholder(name='keepprob',dtype=tf.float32)
|
|
|
+
|
|
|
+ _embedding = embedding_layer(char_input,keepprob)
|
|
|
+ _shape = tf.shape(char_input)
|
|
|
+ batch_size = _shape[0]
|
|
|
+ step_size = _shape[-1]
|
|
|
+ bilstm = BiLSTM_Layer(_embedding,length)
|
|
|
+ _logits = CRF_layer(bilstm,num_tags=len(chunk_tags),BiRNN_Units=BiRNN_Units,time_step=step_size,keepprob=keepprob)
|
|
|
+ crf_loss,trans = layer_loss(_logits,true_target=target,num_tags=len(chunk_tags),length=length)
|
|
|
+ global_step = tf.Variable(0,trainable=False)
|
|
|
+ with tf.variable_scope("optimizer"):
|
|
|
+ opt = tf.train.AdamOptimizer(0.002)
|
|
|
+ grads_vars = opt.compute_gradients(crf_loss)
|
|
|
+ capped_grads_vars = [[tf.clip_by_value(g,-5,5),v] for g,v in grads_vars]
|
|
|
+ train_op = opt.apply_gradients(capped_grads_vars,global_step)
|
|
|
+ return char_input,_logits,target,keepprob,length,crf_loss,trans,train_op
|
|
|
+
|
|
|
import h5py
|
|
|
def h5_to_graph(sess,graph,h5file):
|
|
|
|
|
@@ -1310,40 +1402,48 @@ def initialize_uninitialized(sess):
|
|
|
|
|
|
|
|
|
def save_codename_model():
|
|
|
- filepath = "../projectCode/models/model_project_"+str(60)+"_"+str(200)+".hdf5"
|
|
|
+ # filepath = "../projectCode/models/model_project_"+str(60)+"_"+str(200)+".hdf5"
|
|
|
+ filepath = "models_tf/32-L0.565985563055-F0.8640033553528363-P0.85770792130738-R0.8703918876095912/model.ckpt"
|
|
|
vocabpath = "../projectCode/models/vocab.pk"
|
|
|
classlabelspath = "../projectCode/models/classlabels.pk"
|
|
|
- vocab = load(vocabpath)
|
|
|
- class_labels = load(classlabelspath)
|
|
|
+ # vocab = load(vocabpath)
|
|
|
+ # class_labels = load(classlabelspath)
|
|
|
+ vocab_model = getModel_word()
|
|
|
+ vocab, w2v_matrix = getVocabAndMatrix(vocab_model, Embedding_size=60)
|
|
|
graph = tf.get_default_graph()
|
|
|
with graph.as_default() as g:
|
|
|
''''''
|
|
|
- model = getBiLSTMCRFModel(None, vocab, 60, 200, class_labels,weights=None)
|
|
|
+ # model = getBiLSTMCRFModel(None, vocab, 60, 200, class_labels,weights=None)
|
|
|
#model = models.load_model(filepath,custom_objects={'precision':precision,'recall':recall,'f1_score':f1_score,"CRF":CRF,"loss":CRF.loss_function})
|
|
|
|
|
|
- #sess = tf.Session(graph=g)
|
|
|
- sess = tf.keras.backend.get_session()
|
|
|
-
|
|
|
+ sess = tf.Session(graph=g)
|
|
|
+ # sess = tf.keras.backend.get_session()
|
|
|
+ char_input, logits, target, keepprob, length, crf_loss, trans, train_op = BiLSTM_CRF_tfmodel(sess, w2v_matrix)
|
|
|
#with sess.as_default():
|
|
|
sess.run(tf.global_variables_initializer())
|
|
|
- print(sess.run("time_distributed_1/kernel:0"))
|
|
|
- model.load_weights(filepath)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- print("#",sess.run("time_distributed_1/kernel:0"))
|
|
|
+ # print(sess.run("time_distributed_1/kernel:0"))
|
|
|
+ # model.load_weights(filepath)
|
|
|
+ saver = tf.train.Saver()
|
|
|
+ saver.restore(sess, filepath)
|
|
|
+
|
|
|
+ print("logits",sess.run(logits))
|
|
|
|
|
|
- x = load("codename_x.pk")
|
|
|
+ # print("#",sess.run("time_distributed_1/kernel:0"))
|
|
|
+
|
|
|
+ # x = load("codename_x.pk")
|
|
|
#y = model.predict(x)
|
|
|
- y = sess.run(model.output,feed_dict={model.input:x})
|
|
|
+ # y = sess.run(model.output,feed_dict={model.input:x})
|
|
|
|
|
|
- for item in np.argmax(y,-1):
|
|
|
- print(item)
|
|
|
+ # for item in np.argmax(y,-1):
|
|
|
+ # print(item)
|
|
|
tf.saved_model.simple_save(
|
|
|
sess,
|
|
|
- "./codename_savedmodel/",
|
|
|
- inputs={"inputs": model.input},
|
|
|
- outputs={"outputs": model.output}
|
|
|
+ "./codename_savedmodel_tf/",
|
|
|
+ inputs={"inputs": char_input,
|
|
|
+ "inputs_length":length,
|
|
|
+ 'keepprob':keepprob},
|
|
|
+ outputs={"logits": logits,
|
|
|
+ "trans":trans}
|
|
|
)
|
|
|
|
|
|
|
|
@@ -1457,12 +1557,13 @@ def save_timesplit_model():
|
|
|
|
|
|
if __name__=="__main__":
|
|
|
#save_role_model()
|
|
|
- #save_codename_model()
|
|
|
+ # save_codename_model()
|
|
|
+ save_codename_model()
|
|
|
#save_money_model()
|
|
|
#save_person_model()
|
|
|
#save_form_model()
|
|
|
#save_codesplit_model()
|
|
|
- save_timesplit_model()
|
|
|
+ # save_timesplit_model()
|
|
|
'''
|
|
|
with tf.Session(graph=tf.Graph()) as sess:
|
|
|
from tensorflow.python.saved_model import tag_constants
|