1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889 |
- '''
- Created on 2019年3月26日
- @author: User
- '''
- import os
- import sys
- sys.path.append(os.path.abspath("../.."))
- from BiddingKG.dl.common.Utils import *
- from keras import layers
- from keras import models
- from keras import losses,optimizers
- import tensorflow as tf
- def getBiLSTMModel():
- input = layers.Input(shape=(100,))
-
- lstm = layers.Bidirectional(layers.LSTM(128,return_sequences=False))(input)
-
- matrix = layers.Dense(48,activation="relu")(lstm)
-
- out = layers.Dense(2,activation="softmax")(matrix)
-
- model = models.Model(inputs=input,outputs=out)
-
- model.compile(optimizer=optimizers.Adam(lr=0.01), loss=losses.categorical_crossentropy,metrics=[precision,recall,f1_score])
- model.summary()
- return model
- def getTextCNNModel():
- input = layers.Input(shape=(100,60))
-
- list_pool = []
-
- list_size = [10,15,20]
- for size in list_size:
- c = layers.Conv1D(filters=4,kernel_size=size,strides=1,activation="relu")(input)
- p = layers.AvgPool1D(pool_size=int(c.shape[1]))(c)
- list_pool.append(p)
-
- concat = layers.merge(list_pool,mode="concat")
-
- flatten = layers.Flatten()(concat)
-
- matrix = layers.Dense(12,activation="relu")(flatten)
-
-
- out = layers.Dense(2,activation="softmax")(matrix)
-
- model = models.Model(inputs=input,outputs=out)
- model.compile(optimizer=optimizers.Adadelta(),loss=losses.categorical_crossentropy,metrics=[precision,recall,f1_score])
- model.summary()
- return model
- def get_context_form_model(vocab_len,char_dim,lstm_dim,context_dim):
- input = tf.placeholder(shape=[None,9,30])
- with tf.variable_scope("embedding"):
- embedding = tf.get_variable("char_embedding",shape=[vocab_len,char_dim])
- embedding_input = tf.nn.embedding_lookup(embedding,input)
- with tf.variable_scope("bi_rnn"):
- resize_input = tf.reshape(embedding_input,[-1,30,char_dim])
- forward_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim, state_is_tuple=True)
- backward_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim,state_is_tuple=True)
- outputs,forward_output,backward_output = tf.nn.static_bidirectional_rnn(forward_cell,backward_cell,resize_input)
- bi_output = tf.concat([forward_output,backward_output],-1) #[-1,lstm_dim*2]
- with tf.variable_scope("context"):
- context_out = tf.reshape(bi_output,[-1,9,lstm_dim*2])
- context_v = tf.get_variable("context_v",shape=[lstm_dim*2,context_dim])
- context_emb = tf.nn.relu(tf.matmul(context_out,context_v))
- center_out = tf.slice(context_out,[0,4,0],[-1,1,-1])
- if __name__=="__main__":
- #getBiLSTMModel()
- getTextCNNModel()
-
|