model.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. import sys
  2. import os
  3. import numpy as np
  4. sys.path.append(os.path.abspath(os.path.dirname(__file__)))
  5. from keras.layers import Lambda, Dense, Reshape, Bidirectional, LSTM, Conv2D, BatchNormalization, LeakyReLU, Masking
  6. from keras.preprocessing.sequence import pad_sequences
  7. from models.layer_utils import BatchReshape1, BatchReshape2, MyPadding, MySplit, BatchReshape3, \
  8. BatchReshape4, BatchReshape5, BatchReshape6
  9. from keras import layers, models, Sequential
  10. import keras.backend as K
  11. import tensorflow as tf
  12. from models.my_average_pooling import MyAveragePooling1D
  13. from models.self_attention import SeqSelfAttention, MySelfAttention
  14. from models.u_net import u_net_small
  15. def model_1(input_shape, output_shape):
  16. # Input (batch, 10, 60)
  17. input_1 = layers.Input(shape=input_shape[1:], dtype="float32")
  18. input_2 = layers.Input(shape=input_shape[1:], dtype="float32")
  19. input_3 = layers.Input(shape=input_shape[1:], dtype="float32")
  20. input_4 = layers.Input(shape=input_shape[1:], dtype="float32")
  21. input_5 = layers.Input(shape=input_shape[1:], dtype="float32")
  22. input_6 = layers.Input(shape=input_shape[1:], dtype="float32")
  23. # ----------- Three box sequence -----------
  24. # Concat (batch, 30, 60)
  25. concat_1 = layers.concatenate([input_1, input_2, input_3], axis=-2, name='seq_concat')
  26. concat_2 = layers.concatenate([input_4, input_5, input_6], axis=-2)
  27. # Bi-LSTM (batch, 30, 128)
  28. bi_lstm_1 = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(concat_1)
  29. bi_lstm_2 = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(concat_2)
  30. # Self-Attention (batch, 30, 128)
  31. self_attention_1 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_1)
  32. self_attention_2 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_2)
  33. # Dense (batch, 30, 1)
  34. dense_1 = layers.Dense(output_shape[0], activation="relu")(self_attention_1)
  35. dense_2 = layers.Dense(output_shape[0], activation="relu")(self_attention_2)
  36. # Squeeze (batch, 30)
  37. squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_1)
  38. squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_2)
  39. # ----------- One box feature -----------
  40. # Bi-LSTM (batch, 10, 128)
  41. bi_lstm = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(input_2)
  42. # Self-Attention (batch, 10, 128)
  43. self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
  44. # mask mean pooling
  45. # pool_1 = MyAveragePooling1D(axis=-1)(self_attention_1)
  46. # Dense (batch, 10, 1)
  47. dense = layers.Dense(output_shape[0], activation="relu")(self_attention)
  48. # Squeeze (batch, 10) - one box feature
  49. squeeze = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
  50. # ----------- Three box sequence & One box feature -----------
  51. # Dense (batch, 1)
  52. concat = layers.concatenate([squeeze, squeeze_1, squeeze_2])
  53. output = layers.Dense(64, activation='relu')(concat)
  54. output = layers.Dense(1, activation="sigmoid", name='output')(output)
  55. model = models.Model(inputs=[input_1, input_2, input_3, input_4, input_5, input_6],
  56. outputs=output)
  57. # model.summary()
  58. return model
  59. def model_1_small(input_shape, output_shape):
  60. # Input (batch, 10, 60)
  61. input_1 = layers.Input(shape=input_shape[1:], dtype="float32")
  62. input_2 = layers.Input(shape=input_shape[1:], dtype="float32")
  63. input_3 = layers.Input(shape=input_shape[1:], dtype="float32")
  64. input_4 = layers.Input(shape=input_shape[1:], dtype="float32")
  65. input_5 = layers.Input(shape=input_shape[1:], dtype="float32")
  66. input_6 = layers.Input(shape=input_shape[1:], dtype="float32")
  67. # ----------- Three box sequence -----------
  68. # Concat (batch, 30, 60)
  69. concat_1 = layers.concatenate([input_1, input_2, input_3], axis=-2, name='seq_concat')
  70. concat_2 = layers.concatenate([input_4, input_5, input_6], axis=-2)
  71. # Bi-LSTM (batch, 30, 128)
  72. bi_lstm_1 = layers.Bidirectional(layers.LSTM(32, return_sequences=True))(concat_1)
  73. bi_lstm_2 = layers.Bidirectional(layers.LSTM(32, return_sequences=True))(concat_2)
  74. # Self-Attention (batch, 30, 128)
  75. self_attention_1 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_1)
  76. self_attention_2 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_2)
  77. # Dense (batch, 30, 1)
  78. dense_1 = layers.Dense(output_shape[0], activation="relu")(self_attention_1)
  79. dense_2 = layers.Dense(output_shape[0], activation="relu")(self_attention_2)
  80. # Squeeze (batch, 30)
  81. squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_1)
  82. squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_2)
  83. # ----------- One box feature -----------
  84. # Bi-LSTM (batch, 10, 128)
  85. bi_lstm = layers.Bidirectional(layers.LSTM(32, return_sequences=True))(input_2)
  86. # Self-Attention (batch, 10, 128)
  87. self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
  88. # mask mean pooling
  89. # pool_1 = MyAveragePooling1D(axis=-1)(self_attention_1)
  90. # Dense (batch, 10, 1)
  91. dense = layers.Dense(output_shape[0], activation="relu")(self_attention)
  92. # Squeeze (batch, 10) - one box feature
  93. squeeze = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
  94. # ----------- Three box sequence & One box feature -----------
  95. # Dense (batch, 1)
  96. concat = layers.concatenate([squeeze, squeeze_1, squeeze_2])
  97. output = layers.Dense(32, activation='relu')(concat)
  98. output = layers.Dense(1, activation="sigmoid", name='output')(output)
  99. model = models.Model(inputs=[input_1, input_2, input_3, input_4, input_5, input_6],
  100. outputs=output)
  101. # model.summary()
  102. return model
  103. def model_1_tiny(input_shape, output_shape):
  104. # Input (batch, 10, 60)
  105. input_1 = layers.Input(shape=input_shape[1:], dtype="float32")
  106. input_2 = layers.Input(shape=input_shape[1:], dtype="float32")
  107. input_3 = layers.Input(shape=input_shape[1:], dtype="float32")
  108. input_4 = layers.Input(shape=input_shape[1:], dtype="float32")
  109. input_5 = layers.Input(shape=input_shape[1:], dtype="float32")
  110. input_6 = layers.Input(shape=input_shape[1:], dtype="float32")
  111. # ----------- Three box sequence -----------
  112. # Concat (batch, 30, 60)
  113. concat_1 = layers.concatenate([input_1, input_2, input_3], axis=-2, name='seq_concat')
  114. concat_2 = layers.concatenate([input_4, input_5, input_6], axis=-2)
  115. # Bi-LSTM (batch, 30, 128)
  116. bi_lstm_1 = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(concat_1)
  117. bi_lstm_2 = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(concat_2)
  118. # Self-Attention (batch, 30, 128)
  119. self_attention_1 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_1)
  120. self_attention_2 = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm_2)
  121. # Dense (batch, 30, 1)
  122. dense_1 = layers.Dense(output_shape[0], activation="relu")(self_attention_1)
  123. dense_2 = layers.Dense(output_shape[0], activation="relu")(self_attention_2)
  124. # Squeeze (batch, 30)
  125. squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_1)
  126. squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense_2)
  127. # ----------- One box feature -----------
  128. # Bi-LSTM (batch, 10, 128)
  129. bi_lstm = layers.Bidirectional(layers.LSTM(16, return_sequences=True))(input_2)
  130. # Self-Attention (batch, 10, 128)
  131. self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
  132. # mask mean pooling
  133. # pool_1 = MyAveragePooling1D(axis=-1)(self_attention_1)
  134. # Dense (batch, 10, 1)
  135. dense = layers.Dense(output_shape[0], activation="relu")(self_attention)
  136. # Squeeze (batch, 10) - one box feature
  137. squeeze = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
  138. # ----------- Three box sequence & One box feature -----------
  139. # Dense (batch, 1)
  140. concat = layers.concatenate([squeeze, squeeze_1, squeeze_2])
  141. output = layers.Dense(16, activation='relu')(concat)
  142. output = layers.Dense(1, activation="sigmoid", name='output')(output)
  143. model = models.Model(inputs=[input_1, input_2, input_3, input_4, input_5, input_6],
  144. outputs=output)
  145. # model.summary()
  146. return model
  147. def model_2(input_shape, output_shape):
  148. # input_shape = (None, None, 10, 60)
  149. # (batch_size, row_num, col_num, character_num, character_embedding)
  150. hidden_size = 64
  151. attention_size = 64
  152. character_num = 10
  153. character_embed = 60
  154. cell_embed = 1
  155. # Input
  156. input_1 = layers.Input(shape=input_shape, dtype="float32", name="input_1")
  157. input_2 = layers.Input(shape=(None, None, None, None), dtype="int32", name="input_2")
  158. # batch = tf.shape(_input)[0]
  159. height = tf.shape(input_2)[1]
  160. width = tf.shape(input_2)[2]
  161. pad_height = tf.shape(input_2)[3]
  162. pad_width = tf.shape(input_2)[4]
  163. # print("batch, height, width", batch, height, width)
  164. # Reshape
  165. reshape = BatchReshape1(character_num, character_embed)(input_1)
  166. print("model_2_0", reshape)
  167. # Bi-LSTM + Attention
  168. bi_lstm = Bidirectional(LSTM(hidden_size))(reshape)
  169. print("model_2_1", bi_lstm)
  170. # bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape)
  171. # self_attention = SeqSelfAttention(attention_activation='sigmoid')(bi_lstm)
  172. # trans = Lambda(lambda x: tf.transpose(x, (0, 2, 1)))(self_attention)
  173. # dense = Dense(1, activation='relu')(trans)
  174. # squeeze = Lambda(lambda x: tf.squeeze(x, -1))(dense)
  175. dense = Dense(1, activation="sigmoid")(bi_lstm)
  176. print("model_2_2", dense)
  177. # reshape = Lambda(batch_reshape, output_shape=(height, width, cell_embed))(dense)
  178. reshape = BatchReshape2(cell_embed)([input_1, dense])
  179. print("model_2_3", reshape)
  180. # squeeze_1 = Lambda(lambda x: K.squeeze(x, axis=-1), name="output_1")(reshape)
  181. # print("model_2_4", squeeze)
  182. # Padding
  183. padding = MyPadding(pad_height, pad_width, cell_embed)(reshape)
  184. # padding = reshape
  185. print("model_2_4", padding)
  186. # U-Net
  187. # u_net = u_net_small(padding)
  188. # print("model_2_5", u_net)
  189. # Conv 5*5
  190. conv = Conv2D(1, (5, 5), padding='same')(padding)
  191. bn = BatchNormalization()(conv)
  192. relu = LeakyReLU(alpha=0.)(bn)
  193. conv = Conv2D(1, (5, 5), padding='same')(relu)
  194. bn = BatchNormalization()(conv)
  195. relu = LeakyReLU(alpha=0.)(bn)
  196. conv = Conv2D(1, (5, 5), padding='same')(relu)
  197. bn = BatchNormalization()(conv)
  198. relu_1 = LeakyReLU(alpha=0.)(bn)
  199. # Conv 3*3
  200. conv = Conv2D(1, (3, 3), padding='same')(padding)
  201. bn = BatchNormalization()(conv)
  202. relu = LeakyReLU(alpha=0.)(bn)
  203. conv = Conv2D(1, (3, 3), padding='same')(relu)
  204. bn = BatchNormalization()(conv)
  205. relu = LeakyReLU(alpha=0.)(bn)
  206. conv = Conv2D(1, (3, 3), padding='same')(relu)
  207. bn = BatchNormalization()(conv)
  208. relu_2 = LeakyReLU(alpha=0.)(bn)
  209. # Conv 1*1
  210. conv = Conv2D(1, (1, 1), padding='same')(padding)
  211. bn = BatchNormalization()(conv)
  212. relu = LeakyReLU(alpha=0.)(bn)
  213. conv = Conv2D(1, (1, 1), padding='same')(relu)
  214. bn = BatchNormalization()(conv)
  215. relu = LeakyReLU(alpha=0.)(bn)
  216. conv = Conv2D(1, (1, 1), padding='same')(relu)
  217. bn = BatchNormalization()(conv)
  218. relu_3 = LeakyReLU(alpha=0.)(bn)
  219. # conv = Conv2D(cell_embed, (3, 3), padding='same')(relu)
  220. # bn = BatchNormalization()(conv)
  221. # relu_2 = LeakyReLU(alpha=0.)(bn)
  222. # Merge
  223. # print("model_2_5", relu_1, relu_2)
  224. merge = layers.Concatenate(axis=-1)([relu_1, relu_2, relu_3])
  225. # merge = u_net
  226. # merge = relu
  227. dense = layers.Dense(1, activation='sigmoid')(merge)
  228. squeeze_2 = Lambda(lambda x: K.squeeze(x, axis=-1))(dense)
  229. # Split
  230. split = MySplit(height, width, name="output")(squeeze_2)
  231. model = models.Model(inputs=[input_1, input_2], outputs=split)
  232. model.summary(line_length=120)
  233. return model
  234. def model_3(input_shape, output_shape):
  235. # (batch_size, row_num, col_num, character_num, character_embedding)
  236. hidden_size = 16
  237. attention_size = 2*hidden_size
  238. character_num = 20
  239. character_embed = 60
  240. cell_embed = 2*hidden_size
  241. pad_len = 100
  242. mask_timestamps = pad_len
  243. # Input
  244. input_1 = layers.Input(shape=input_shape, dtype="float32", name="input_1")
  245. input_2 = layers.Input(shape=(None, None, None, None), dtype="int32", name="input_2")
  246. # Reshape
  247. reshape = BatchReshape1(character_num, character_embed)(input_1)
  248. print("model_2_0", reshape)
  249. # Bi-LSTM
  250. bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape)
  251. bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=False))(bi_lstm)
  252. print("model_2_1", bi_lstm)
  253. # Reshape
  254. reshape = BatchReshape2(cell_embed)([input_1, bi_lstm])
  255. print("model_2_3", reshape)
  256. # Rows Reshape
  257. reshape_1 = BatchReshape3(cell_embed)(reshape)
  258. # Cols Reshape
  259. trans = Lambda(lambda x: tf.transpose(x, (0, 2, 1, 3)))(reshape)
  260. reshape_2 = BatchReshape3(cell_embed)(trans)
  261. # All boxes Reshape
  262. reshape_3 = BatchReshape5(cell_embed)(reshape)
  263. # Masking
  264. # mask_1 = Masking(mask_value=-1, input_shape=(mask_timestamps, cell_embed))(pad_1)
  265. # mask_2 = Masking(mask_value=-1, input_shape=(mask_timestamps, cell_embed))(pad_2)
  266. # print("model_2_4", mask_1)
  267. # Padding
  268. # pad_1 = MyPadding()
  269. # Bi-LSTM
  270. # bi_lstm = Bidirectional(LSTM(hidden_size, return_sequences=True))
  271. # bi_lstm_1 = bi_lstm(reshape_1)
  272. # bi_lstm_2 = bi_lstm(reshape_2)
  273. bi_lstm_1 = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape_1)
  274. bi_lstm_2 = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape_2)
  275. # bi_lstm_1 = LSTM(2*hidden_size, return_sequences=True)(reshape_1)
  276. # print("model_2_4", bi_lstm_1)
  277. # bi_lstm_2 = LSTM(2*hidden_size, return_sequences=True)(reshape_2)
  278. # self_attention_1 = MySelfAttention(output_dim=attention_size)(bi_lstm_1)
  279. # self_attention_2 = MySelfAttention(output_dim=attention_size)(bi_lstm_2)
  280. # Bi-LSTM + Attention
  281. bi_lstm_3 = Bidirectional(LSTM(hidden_size, return_sequences=True))(reshape_3)
  282. # bi_lstm_3 = LSTM(2*hidden_size, return_sequences=True)(reshape_3)
  283. # self_attention_3 = MySelfAttention(output_dim=attention_size)(bi_lstm_3)
  284. # print("model_2_5", bi_lstm_1)
  285. # Reshape
  286. reshape_1 = BatchReshape4(cell_embed)([reshape, bi_lstm_1])
  287. reshape_2 = BatchReshape4(cell_embed)([trans, bi_lstm_2])
  288. reshape_2 = Lambda(lambda x: tf.transpose(x, (0, 2, 1, 3)))(reshape_2)
  289. reshape_3 = BatchReshape6(cell_embed)([reshape, bi_lstm_3])
  290. print("model_2_6", reshape_1)
  291. # Merge
  292. merge = layers.Concatenate(axis=-1)([reshape, reshape_1, reshape_2, reshape_3])
  293. dense = layers.Dense(hidden_size, activation='relu')(merge)
  294. dense = layers.Dense(1, activation='sigmoid')(dense)
  295. squeeze = Lambda(lambda x: K.squeeze(x, axis=-1), name="output")(dense)
  296. model = models.Model(inputs=[input_1, input_2], outputs=squeeze)
  297. model.summary(line_length=110)
  298. return model
  299. def get_model(input_shape, output_shape, model_id):
  300. if model_id == 1:
  301. return model_1_tiny(input_shape, output_shape)
  302. elif model_id == 2:
  303. return model_2(input_shape, output_shape)
  304. elif model_id == 3:
  305. return model_3(input_shape, output_shape)
  306. else:
  307. print("No such model!")
  308. raise Exception()
  309. def test_layer():
  310. model = Sequential()
  311. model.add(Masking(mask_value=-1, input_shape=(5, 8)))
  312. model.add(Lambda(lambda x: pad_sequences(x, maxlen=100, dtype='float32',
  313. padding='post', truncating='post',
  314. value=-1)))
  315. model.add(Masking(mask_value=-1, input_shape=(5, 8)))
  316. model.add(LSTM(32, return_sequences=True))
  317. model.compile(optimizer='sgd', loss='mse')
  318. x = np.zeros([1, 5, 8])
  319. print(x.shape)
  320. y = np.zeros([1, 5, 32])
  321. model.summary()
  322. model.fit(x, y, batch_size=32, epochs=10)
  323. if __name__ == "__main__":
  324. test_layer()