123456789101112131415161718192021222324252627282930313233343536373839 |
- from keras import layers, models
- from keras.layers import Lambda
- from BiddingKG.dl.table_head.models.my_average_pooling import MyAveragePooling1D
- from BiddingKG.dl.table_head.models.self_attention import SeqSelfAttention
- def get_model(input_shape, output_shape):
- # Input
- input_1 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_2 = layers.Input(shape=input_shape[1:], dtype="float32")
- # Embedding
- embed_1 = layers.Embedding(input_dim=6624, output_dim=32,
- input_length=input_shape[1], mask_zero=True)(input_1)
- embed_2 = layers.Embedding(input_dim=6624, output_dim=32,
- input_length=input_shape[1], mask_zero=True)(input_2)
- # Bi-LSTM
- bi_lstm_1 = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(embed_1)
- bi_lstm_2 = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(embed_2)
- # Self-Attention
- self_attention_1 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_1)
- self_attention_2 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_2)
- # Concat
- concat = layers.concatenate([self_attention_1, self_attention_2])
- # Dense + Softmax
- output = layers.Dense(output_shape[0], activation="softmax")(concat)
- # mask mean pooling
- output = MyAveragePooling1D(axis=1)(output)
- model = models.Model(inputs=[input_1, input_2], outputs=output)
- model.summary()
- return model
|