123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416 |
- import sys
- import os
- import numpy as np
- sys.path.append(os.path.abspath(os.path.dirname(__file__)))
- from keras.layers import Lambda, Dense, Reshape, Bidirectional, LSTM, Conv2D, BatchNormalization, LeakyReLU, Masking
- from keras.preprocessing.sequence import pad_sequences
- from models.layer_utils import BatchReshape1, BatchReshape2, MyPadding, MySplit, BatchReshape3, \
- BatchReshape4, BatchReshape5, BatchReshape6
- from keras import layers, models, Sequential
- import keras.backend as K
- import tensorflow as tf
- from models.my_average_pooling import MyAveragePooling1D
- from models.self_attention import SeqSelfAttention, MySelfAttention
- from models.u_net import u_net_small
- def model_1(input_shape, output_shape):
- # Input (batch, 10, 60)
- input_1 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_2 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_3 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_4 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_5 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_6 = layers.Input(shape=input_shape[1:], dtype="float32")
- # ----------- Three box sequence -----------
- # Concat (batch, 30, 60)
- concat_1 = layers.concatenate([input_1, input_2, input_3], axis=-2, name='seq_concat')
- concat_2 = layers.concatenate([input_4, input_5, input_6], axis=-2)
- # Bi-LSTM (batch, 30, 128)
- bi_lstm_1 = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(concat_1)
- bi_lstm_2 = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(concat_2)
- # Self-Attention (batch, 30, 128)
- self_attention_1 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_1)
- self_attention_2 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_2)
- # Dense (batch, 30, 1)
- dense_1 = layers.Dense(output_shape[0], activation="relu")(self_attention_1)
- dense_2 = layers.Dense(output_shape[0], activation="relu")(self_attention_2)
- # Squeeze (batch, 30)
- squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_1)
- squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_2)
- # ----------- One box feature -----------
- # Bi-LSTM (batch, 10, 128)
- bi_lstm = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(input_2)
- # Self-Attention (batch, 10, 128)
- self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
- # mask mean pooling
- # pool_1 = MyAveragePooling1D(axis=-1)(self_attention_1)
- # Dense (batch, 10, 1)
- dense = layers.Dense(output_shape[0], activation="relu")(self_attention)
- # Squeeze (batch, 10) - one box feature
- squeeze = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
- # ----------- Three box sequence & One box feature -----------
- # Dense (batch, 1)
- concat = layers.concatenate([squeeze, squeeze_1, squeeze_2])
- output = layers.Dense(64, activation='relu')(concat)
- output = layers.Dense(1, activation="sigmoid", name='output')(output)
- model = models.Model(inputs=[input_1, input_2, input_3, input_4, input_5, input_6],
- outputs=output)
- # model.summary()
- return model
- def model_1_small(input_shape, output_shape):
- # Input (batch, 10, 60)
- input_1 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_2 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_3 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_4 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_5 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_6 = layers.Input(shape=input_shape[1:], dtype="float32")
- # ----------- Three box sequence -----------
- # Concat (batch, 30, 60)
- concat_1 = layers.concatenate([input_1, input_2, input_3], axis=-2, name='seq_concat')
- concat_2 = layers.concatenate([input_4, input_5, input_6], axis=-2)
- # Bi-LSTM (batch, 30, 128)
- bi_lstm_1 = layers.Bidirectional(layers.LSTM(32, return_sequences=True))(concat_1)
- bi_lstm_2 = layers.Bidirectional(layers.LSTM(32, return_sequences=True))(concat_2)
- # Self-Attention (batch, 30, 128)
- self_attention_1 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_1)
- self_attention_2 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_2)
- # Dense (batch, 30, 1)
- dense_1 = layers.Dense(output_shape[0], activation="relu")(self_attention_1)
- dense_2 = layers.Dense(output_shape[0], activation="relu")(self_attention_2)
- # Squeeze (batch, 30)
- squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_1)
- squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_2)
- # ----------- One box feature -----------
- # Bi-LSTM (batch, 10, 128)
- bi_lstm = layers.Bidirectional(layers.LSTM(32, return_sequences=True))(input_2)
- # Self-Attention (batch, 10, 128)
- self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
- # mask mean pooling
- # pool_1 = MyAveragePooling1D(axis=-1)(self_attention_1)
- # Dense (batch, 10, 1)
- dense = layers.Dense(output_shape[0], activation="relu")(self_attention)
- # Squeeze (batch, 10) - one box feature
- squeeze = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
- # ----------- Three box sequence & One box feature -----------
- # Dense (batch, 1)
- concat = layers.concatenate([squeeze, squeeze_1, squeeze_2])
- output = layers.Dense(32, activation='relu')(concat)
- output = layers.Dense(1, activation="sigmoid", name='output')(output)
- model = models.Model(inputs=[input_1, input_2, input_3, input_4, input_5, input_6],
- outputs=output)
- # model.summary()
- return model
- def model_1_tiny(input_shape, output_shape):
- # Input (batch, 10, 60)
- input_1 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_2 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_3 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_4 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_5 = layers.Input(shape=input_shape[1:], dtype="float32")
- input_6 = layers.Input(shape=input_shape[1:], dtype="float32")
- # ----------- Three box sequence -----------
- # Concat (batch, 30, 60)
- concat_1 = layers.concatenate([input_1, input_2, input_3], axis=-2, name='seq_concat')
- concat_2 = layers.concatenate([input_4, input_5, input_6], axis=-2)
- # Bi-LSTM (batch, 30, 128)
- bi_lstm_1 = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(concat_1)
- bi_lstm_2 = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(concat_2)
- # Self-Attention (batch, 30, 128)
- self_attention_1 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_1)
- self_attention_2 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_2)
- # Dense (batch, 30, 1)
- dense_1 = layers.Dense(output_shape[0], activation="relu")(self_attention_1)
- dense_2 = layers.Dense(output_shape[0], activation="relu")(self_attention_2)
- # Squeeze (batch, 30)
- squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_1)
- squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_2)
- # ----------- One box feature -----------
- # Bi-LSTM (batch, 10, 128)
- bi_lstm = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(input_2)
- # Self-Attention (batch, 10, 128)
- self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
- # mask mean pooling
- # pool_1 = MyAveragePooling1D(axis=-1)(self_attention_1)
- # Dense (batch, 10, 1)
- dense = layers.Dense(output_shape[0], activation="relu")(self_attention)
- # Squeeze (batch, 10) - one box feature
- squeeze = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
- # ----------- Three box sequence & One box feature -----------
- # Dense (batch, 1)
- concat = layers.concatenate([squeeze, squeeze_1, squeeze_2])
- output = layers.Dense(16, activation='relu')(concat)
- output = layers.Dense(1, activation="sigmoid", name='output')(output)
- model = models.Model(inputs=[input_1, input_2, input_3, input_4, input_5, input_6],
- outputs=output)
- # model.summary()
- return model
- def model_2(input_shape, output_shape):
- # input_shape = (None, None, 10, 60)
- # (batch_size, row_num, col_num, character_num, character_embedding)
- hidden_size = 64
- attention_size = 64
- character_num = 10
- character_embed = 60
- cell_embed = 1
- # Input
- input_1 = layers.Input(shape=input_shape, dtype="float32", name="input_1")
- input_2 = layers.Input(shape=(None, None, None, None), dtype="int32", name="input_2")
- # batch = tf.shape(_input)[0]
- height = tf.shape(input_2)[1]
- width = tf.shape(input_2)[2]
- pad_height = tf.shape(input_2)[3]
- pad_width = tf.shape(input_2)[4]
- # print("batch, height, width", batch, height, width)
- # Reshape
- reshape = BatchReshape1(character_num, character_embed)(input_1)
- print("model_2_0", reshape)
- # Bi-LSTM + Attention
- bi_lstm = Bidirectional(LSTM(hidden_size))(reshape)
- print("model_2_1", bi_lstm)
- # bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape)
- # self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
- # trans = Lambda(lambda x: tf.transpose(x, (0, 2, 1)))(self_attention)
- # dense = Dense(1, activation='relu')(trans)
- # squeeze = Lambda(lambda x: tf.squeeze(x, -1))(dense)
- dense = Dense(1, activation="sigmoid")(bi_lstm)
- print("model_2_2", dense)
- # reshape = Lambda(batch_reshape, output_shape=(height, width, cell_embed))(dense)
- reshape = BatchReshape2(cell_embed)([input_1, dense])
- print("model_2_3", reshape)
- # squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1), name="output_1")(reshape)
- # print("model_2_4", squeeze)
- # Padding
- padding = MyPadding(pad_height, pad_width, cell_embed)(reshape)
- # padding = reshape
- print("model_2_4", padding)
- # U-Net
- # u_net = u_net_small(padding)
- # print("model_2_5", u_net)
- # Conv 5*5
- conv = Conv2D(1, (5, 5), padding='same')(padding)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(1, (5, 5), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(1, (5, 5), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu_1 = LeakyReLU(alpha=0.)(bn)
- # Conv 3*3
- conv = Conv2D(1, (3, 3), padding='same')(padding)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(1, (3, 3), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(1, (3, 3), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu_2 = LeakyReLU(alpha=0.)(bn)
- # Conv 1*1
- conv = Conv2D(1, (1, 1), padding='same')(padding)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(1, (1, 1), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(1, (1, 1), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu_3 = LeakyReLU(alpha=0.)(bn)
- # conv = Conv2D(cell_embed, (3, 3), padding='same')(relu)
- # bn = BatchNormalization()(conv)
- # relu_2 = LeakyReLU(alpha=0.)(bn)
- # Merge
- # print("model_2_5", relu_1, relu_2)
- merge = layers.Concatenate(axis=-1)([relu_1, relu_2, relu_3])
- # merge = u_net
- # merge = relu
- dense = layers.Dense(1, activation='sigmoid')(merge)
- squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
- # Split
- split = MySplit(height, width, name="output")(squeeze_2)
- model = models.Model(inputs=[input_1, input_2], outputs=split)
- model.summary(line_length=120)
- return model
- def model_3(input_shape, output_shape):
- # (batch_size, row_num, col_num, character_num, character_embedding)
- hidden_size = 16
- attention_size = 2*hidden_size
- character_num = 20
- character_embed = 60
- cell_embed = 2*hidden_size
- pad_len = 100
- mask_timestamps = pad_len
- # Input
- input_1 = layers.Input(shape=input_shape, dtype="float32", name="input_1")
- input_2 = layers.Input(shape=(None, None, None, None), dtype="int32", name="input_2")
- # Reshape
- reshape = BatchReshape1(character_num, character_embed)(input_1)
- print("model_2_0", reshape)
- # Bi-LSTM
- bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape)
- bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=False))(bi_lstm)
- print("model_2_1", bi_lstm)
- # Reshape
- reshape = BatchReshape2(cell_embed)([input_1, bi_lstm])
- print("model_2_3", reshape)
- # Rows Reshape
- reshape_1 = BatchReshape3(cell_embed)(reshape)
- # Cols Reshape
- trans = Lambda(lambda x: tf.transpose(x, (0, 2, 1, 3)))(reshape)
- reshape_2 = BatchReshape3(cell_embed)(trans)
- # All boxes Reshape
- reshape_3 = BatchReshape5(cell_embed)(reshape)
- # Masking
- # mask_1 = Masking(mask_value=-1, input_shape=(mask_timestamps, cell_embed))(pad_1)
- # mask_2 = Masking(mask_value=-1, input_shape=(mask_timestamps, cell_embed))(pad_2)
- # print("model_2_4", mask_1)
- # Padding
- # pad_1 = MyPadding()
- # Bi-LSTM
- # bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=True))
- # bi_lstm_1 = bi_lstm(reshape_1)
- # bi_lstm_2 = bi_lstm(reshape_2)
- bi_lstm_1 = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape_1)
- bi_lstm_2 = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape_2)
- # bi_lstm_1 = LSTM(2*hidden_size, return_sequences=True)(reshape_1)
- # print("model_2_4", bi_lstm_1)
- # bi_lstm_2 = LSTM(2*hidden_size, return_sequences=True)(reshape_2)
- # self_attention_1 = MySelfAttention(output_dim=attention_size)(bi_lstm_1)
- # self_attention_2 = MySelfAttention(output_dim=attention_size)(bi_lstm_2)
- # Bi-LSTM + Attention
- bi_lstm_3 = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape_3)
- # bi_lstm_3 = LSTM(2*hidden_size, return_sequences=True)(reshape_3)
- # self_attention_3 = MySelfAttention(output_dim=attention_size)(bi_lstm_3)
- # print("model_2_5", bi_lstm_1)
- # Reshape
- reshape_1 = BatchReshape4(cell_embed)([reshape, bi_lstm_1])
- reshape_2 = BatchReshape4(cell_embed)([trans, bi_lstm_2])
- reshape_2 = Lambda(lambda x: tf.transpose(x, (0, 2, 1, 3)))(reshape_2)
- reshape_3 = BatchReshape6(cell_embed)([reshape, bi_lstm_3])
- print("model_2_6", reshape_1)
- # Merge
- merge = layers.Concatenate(axis=-1)([reshape, reshape_1, reshape_2, reshape_3])
- dense = layers.Dense(hidden_size, activation='relu')(merge)
- dense = layers.Dense(1, activation='sigmoid')(dense)
- squeeze = Lambda(lambda x: K.squeeze(x, axis=-1), name="output")(dense)
- model = models.Model(inputs=[input_1, input_2], outputs=squeeze)
- model.summary(line_length=110)
- return model
- def get_model(input_shape, output_shape, model_id):
- if model_id == 1:
- return model_1_tiny(input_shape, output_shape)
- elif model_id == 2:
- return model_2(input_shape, output_shape)
- elif model_id == 3:
- return model_3(input_shape, output_shape)
- else:
- print("No such model!")
- raise Exception()
- def test_layer():
- model = Sequential()
- model.add(Masking(mask_value=-1, input_shape=(5, 8)))
- model.add(Lambda(lambda x: pad_sequences(x, maxlen=100, dtype='float32',
- padding='post', truncating='post',
- value=-1)))
- model.add(Masking(mask_value=-1, input_shape=(5, 8)))
- model.add(LSTM(32, return_sequences=True))
- model.compile(optimizer='sgd', loss='mse')
- x = np.zeros([1, 5, 8])
- print(x.shape)
- y = np.zeros([1, 5, 32])
- model.summary()
- model.fit(x, y, batch_size=32, epochs=10)
- if __name__ == "__main__":
- test_layer()
|