|
@@ -0,0 +1,725 @@
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
+"""
|
|
|
+Created on Tue Jun 21 10:53:51 2022
|
|
|
+model
|
|
|
+@author: fangjiasheng
|
|
|
+"""
|
|
|
+import os
|
|
|
+import sys
|
|
|
+sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
|
+from isr.post_process import yolo_eval
|
|
|
+import time
|
|
|
+from functools import wraps
|
|
|
+from keras.layers import Lambda, Dense, Reshape, Conv2D, BatchNormalization, LeakyReLU, Masking, MaxPool2D, \
|
|
|
+ MaxPooling2D, UpSampling2D, concatenate, Concatenate, Layer, GlobalAveragePooling2D, Multiply
|
|
|
+from keras import layers, models, Sequential, Input, Model
|
|
|
+import keras.backend as K
|
|
|
+import tensorflow as tf
|
|
|
+import numpy as np
|
|
|
+from keras.regularizers import l2
|
|
|
+from isr.utils import compose
|
|
|
+from tensorflow.python.framework import ops
|
|
|
+
|
|
|
+
|
|
|
+def seal_model_se(input_shape, output_shape, cls_num=3):
|
|
|
+ inputs = Input(shape=input_shape)
|
|
|
+ use_bias = False
|
|
|
+
|
|
|
+ # # 256
|
|
|
+ # down0 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
|
|
|
+ # down0 = BatchNormalization()(down0)
|
|
|
+ # down0 = LeakyReLU(alpha=0.1)(down0)
|
|
|
+ # down0 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(down0)
|
|
|
+ # down0 = BatchNormalization()(down0)
|
|
|
+ # down0 = LeakyReLU(alpha=0.1)(down0)
|
|
|
+ # down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
|
|
|
+
|
|
|
+ # 128
|
|
|
+ down1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(down1)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(down1)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1 = SeBlock()(down1)
|
|
|
+ down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
|
|
|
+
|
|
|
+ # 64
|
|
|
+ down2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down1_pool)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down2)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(down2)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2 = SeBlock()(down2)
|
|
|
+ down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
|
|
|
+
|
|
|
+ # 32
|
|
|
+ down3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down2_pool)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down3)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(down3)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3 = SeBlock()(down3)
|
|
|
+ down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
|
|
|
+
|
|
|
+ # 16
|
|
|
+ center = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down3_pool)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+ center = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(center)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+ center = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(center)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+ center = SeBlock()(center)
|
|
|
+
|
|
|
+ # 32
|
|
|
+ up3 = UpSampling2D((2, 2))(center)
|
|
|
+ up3 = concatenate([down3, up3], axis=3)
|
|
|
+ up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = SeBlock()(up3)
|
|
|
+
|
|
|
+ # 64
|
|
|
+ up2 = UpSampling2D((2, 2))(up3)
|
|
|
+ up2 = concatenate([down2, up2], axis=3)
|
|
|
+ up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = SeBlock()(up2)
|
|
|
+
|
|
|
+ # 128
|
|
|
+ up1 = UpSampling2D((2, 2))(up2)
|
|
|
+ up1 = K.concatenate([down1, up1], axis=3)
|
|
|
+ up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = SeBlock()(up1)
|
|
|
+
|
|
|
+ # # 256
|
|
|
+ # up0 = UpSampling2D((2, 2))(up1)
|
|
|
+ # up0 = K.concatenate([down0, up0], axis=3)
|
|
|
+ # up0 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0)
|
|
|
+ # up0 = BatchNormalization()(up0)
|
|
|
+ # up0 = LeakyReLU(alpha=0.1)(up0)
|
|
|
+ # up0 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0)
|
|
|
+ # up0 = BatchNormalization()(up0)
|
|
|
+ # up0 = LeakyReLU(alpha=0.1)(up0)
|
|
|
+ # up0 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(up0)
|
|
|
+ # up0 = BatchNormalization()(up0)
|
|
|
+ # up0 = LeakyReLU(alpha=0.1)(up0)
|
|
|
+
|
|
|
+ classify = Conv2D(cls_num, (1, 1), activation='sigmoid')(up1)
|
|
|
+ # classify = Dense(cls_num, activation="softmax")(up1)
|
|
|
+ model = Model(inputs=inputs, outputs=classify)
|
|
|
+
|
|
|
+ # model.summary(line_length=100)
|
|
|
+ return model
|
|
|
+
|
|
|
+
|
|
|
+def seal_model(input_shape, output_shape, cls_num=3):
|
|
|
+ inputs = Input(shape=input_shape)
|
|
|
+ use_bias = False
|
|
|
+
|
|
|
+ # # 256
|
|
|
+ # down0 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
|
|
|
+ # down0 = BatchNormalization()(down0)
|
|
|
+ # down0 = LeakyReLU(alpha=0.1)(down0)
|
|
|
+ # down0 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(down0)
|
|
|
+ # down0 = BatchNormalization()(down0)
|
|
|
+ # down0 = LeakyReLU(alpha=0.1)(down0)
|
|
|
+ # down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
|
|
|
+
|
|
|
+ # 128
|
|
|
+ down1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(down1)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(down1)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
|
|
|
+
|
|
|
+ # 64
|
|
|
+ down2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down1_pool)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down2)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(down2)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
|
|
|
+
|
|
|
+ # 32
|
|
|
+ down3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down2_pool)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down3)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(down3)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
|
|
|
+
|
|
|
+ # 16
|
|
|
+ center = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down3_pool)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+ center = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(center)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+ center = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(center)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+
|
|
|
+ # 32
|
|
|
+ up3 = UpSampling2D((2, 2))(center)
|
|
|
+ up3 = concatenate([down3, up3], axis=3)
|
|
|
+ up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+
|
|
|
+ # 64
|
|
|
+ up2 = UpSampling2D((2, 2))(up3)
|
|
|
+ up2 = concatenate([down2, up2], axis=3)
|
|
|
+ up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+
|
|
|
+ # 128
|
|
|
+ up1 = UpSampling2D((2, 2))(up2)
|
|
|
+ up1 = K.concatenate([down1, up1], axis=3)
|
|
|
+ up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+
|
|
|
+ # # 256
|
|
|
+ # up0 = UpSampling2D((2, 2))(up1)
|
|
|
+ # up0 = K.concatenate([down0, up0], axis=3)
|
|
|
+ # up0 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0)
|
|
|
+ # up0 = BatchNormalization()(up0)
|
|
|
+ # up0 = LeakyReLU(alpha=0.1)(up0)
|
|
|
+ # up0 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up0)
|
|
|
+ # up0 = BatchNormalization()(up0)
|
|
|
+ # up0 = LeakyReLU(alpha=0.1)(up0)
|
|
|
+ # up0 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(up0)
|
|
|
+ # up0 = BatchNormalization()(up0)
|
|
|
+ # up0 = LeakyReLU(alpha=0.1)(up0)
|
|
|
+
|
|
|
+ classify = Conv2D(cls_num, (1, 1), activation='sigmoid')(up1)
|
|
|
+ # classify = Dense(cls_num, activation="softmax")(up1)
|
|
|
+ model = Model(inputs=inputs, outputs=classify)
|
|
|
+
|
|
|
+ model.summary(line_length=100)
|
|
|
+ return model
|
|
|
+
|
|
|
+
|
|
|
+def seal_model_small(input_shape, output_shape, cls_num=3):
|
|
|
+ inputs = Input(shape=input_shape)
|
|
|
+ use_bias = False
|
|
|
+
|
|
|
+ # 128
|
|
|
+ down1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(down1)
|
|
|
+ down1 = BatchNormalization()(down1)
|
|
|
+ down1 = LeakyReLU(alpha=0.1)(down1)
|
|
|
+ down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
|
|
|
+
|
|
|
+ # 64
|
|
|
+ down2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down1_pool)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(down2)
|
|
|
+ down2 = BatchNormalization()(down2)
|
|
|
+ down2 = LeakyReLU(alpha=0.1)(down2)
|
|
|
+ down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
|
|
|
+
|
|
|
+ # 32
|
|
|
+ down3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down2_pool)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(down3)
|
|
|
+ down3 = BatchNormalization()(down3)
|
|
|
+ down3 = LeakyReLU(alpha=0.1)(down3)
|
|
|
+ down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
|
|
|
+
|
|
|
+ # 16
|
|
|
+ center = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down3_pool)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+ center = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(center)
|
|
|
+ center = BatchNormalization()(center)
|
|
|
+ center = LeakyReLU(alpha=0.1)(center)
|
|
|
+
|
|
|
+ # 32
|
|
|
+ up3 = UpSampling2D((2, 2))(center)
|
|
|
+ up3 = concatenate([down3, up3], axis=3)
|
|
|
+ up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+ up3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(up3)
|
|
|
+ up3 = BatchNormalization()(up3)
|
|
|
+ up3 = LeakyReLU(alpha=0.1)(up3)
|
|
|
+
|
|
|
+ # 64
|
|
|
+ up2 = UpSampling2D((2, 2))(up3)
|
|
|
+ up2 = concatenate([down2, up2], axis=3)
|
|
|
+ up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+ up2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(up2)
|
|
|
+ up2 = BatchNormalization()(up2)
|
|
|
+ up2 = LeakyReLU(alpha=0.1)(up2)
|
|
|
+
|
|
|
+ # 128
|
|
|
+ up1 = UpSampling2D((2, 2))(up2)
|
|
|
+ up1 = K.concatenate([down1, up1], axis=3)
|
|
|
+ up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+ up1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(up1)
|
|
|
+ up1 = BatchNormalization()(up1)
|
|
|
+ up1 = LeakyReLU(alpha=0.1)(up1)
|
|
|
+
|
|
|
+ classify = Conv2D(cls_num, (1, 1), activation='sigmoid')(up1)
|
|
|
+ # classify = Dense(cls_num, activation="softmax")(up1)
|
|
|
+ model = Model(inputs=inputs, outputs=classify)
|
|
|
+
|
|
|
+ model.summary(line_length=100)
|
|
|
+ return model
|
|
|
+
|
|
|
+
|
|
|
+class SeBlock(Layer):
|
|
|
+ def __init__(self, reduction=4, **kwargs):
|
|
|
+ super(SeBlock, self).__init__(**kwargs)
|
|
|
+ self.reduction = reduction
|
|
|
+
|
|
|
+ def build(self, input_shape):
|
|
|
+ # 构建layer时需要实现
|
|
|
+ # 手动将该自定义层参数加入,否则参数为0
|
|
|
+ self.pool = GlobalAveragePooling2D(keepdims=True, name="my_pool")
|
|
|
+ self.dense_1 = Dense(int(input_shape[-1]) // self.reduction, use_bias=False, activation="relu", name='my_dense_1')
|
|
|
+ self.dense_2 = Dense(int(input_shape[-1]), use_bias=False, activation="hard_sigmoid", name='my_dense_2')
|
|
|
+ # self.dense_1.build(input_shape)
|
|
|
+ # self.dense_2.build((input_shape[0], input_shape[1], input_shape[2], int(input_shape[-1]) // self.reduction))
|
|
|
+ self._trainable_weights += self.dense_1._trainable_weights
|
|
|
+ self._trainable_weights += self.dense_2._trainable_weights
|
|
|
+ super(SeBlock, self).build(input_shape)
|
|
|
+
|
|
|
+ def call(self, inputs):
|
|
|
+ x = self.pool(inputs)
|
|
|
+ x = self.dense_1(x)
|
|
|
+ x = self.dense_2(x)
|
|
|
+ # 给通道加权重
|
|
|
+ return Multiply()([inputs, x])
|
|
|
+
|
|
|
+
|
|
|
+VGG_MEAN = [103.939, 116.779, 123.68]
|
|
|
+
|
|
|
+
|
|
|
+class Vgg16:
|
|
|
+ def __init__(self, vgg16_npy_path=None):
|
|
|
+ if vgg16_npy_path is None:
|
|
|
+ # path = inspect.getfile(Vgg16)
|
|
|
+ # path = os.path.abspath(os.path.join(path, os.pardir))
|
|
|
+ # path = os.path.join(path, "vgg16.npy")
|
|
|
+ # vgg16_npy_path = path
|
|
|
+ # print(path)
|
|
|
+ print("there is no vgg_16_npy!")
|
|
|
+ raise
|
|
|
+
|
|
|
+ self.data_dict = np.load(vgg16_npy_path, encoding='latin1', allow_pickle=True).item()
|
|
|
+ print("npy file loaded")
|
|
|
+
|
|
|
+ def build(self, bgr):
|
|
|
+ """
|
|
|
+ load variable from npy to build the VGG
|
|
|
+
|
|
|
+ :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
|
|
|
+ """
|
|
|
+
|
|
|
+ start_time = time.time()
|
|
|
+ print("build model started")
|
|
|
+ bgr_scaled = bgr * 255.0
|
|
|
+
|
|
|
+ # Convert RGB to BGR
|
|
|
+ # red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
|
|
|
+ # print("red", red)
|
|
|
+ # assert red.get_shape().as_list()[1:] == [224, 224, 1]
|
|
|
+ # assert green.get_shape().as_list()[1:] == [224, 224, 1]
|
|
|
+ # assert blue.get_shape().as_list()[1:] == [224, 224, 1]
|
|
|
+ # bgr = tf.concat(axis=3, values=[
|
|
|
+ # blue - VGG_MEAN[0],
|
|
|
+ # green - VGG_MEAN[1],
|
|
|
+ # red - VGG_MEAN[2],
|
|
|
+ # ])
|
|
|
+ # assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
|
|
|
+
|
|
|
+ self.conv1_1 = self.conv_layer(bgr_scaled, "conv1_1")
|
|
|
+ self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
|
|
|
+ self.pool1 = self.max_pool(self.conv1_2, 'pool1')
|
|
|
+
|
|
|
+ self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
|
|
|
+ self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
|
|
|
+ self.pool2 = self.max_pool(self.conv2_2, 'pool2')
|
|
|
+
|
|
|
+ self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
|
|
|
+ self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
|
|
|
+ self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
|
|
|
+ self.pool3 = self.max_pool(self.conv3_3, 'pool3')
|
|
|
+
|
|
|
+ self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
|
|
|
+ self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
|
|
|
+ self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
|
|
|
+ self.pool4 = self.max_pool(self.conv4_3, 'pool4')
|
|
|
+
|
|
|
+ self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
|
|
|
+ self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
|
|
|
+ self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
|
|
|
+ self.pool5 = self.max_pool(self.conv5_3, 'pool5')
|
|
|
+
|
|
|
+ # self.fc6 = self.fc_layer(self.pool5, "fc6")
|
|
|
+ # # assert self.fc6.get_shape().as_list()[1:] == [4096]
|
|
|
+ # self.relu6 = tf.nn.relu(self.fc6)
|
|
|
+ #
|
|
|
+ # self.fc7 = self.fc_layer(self.relu6, "fc7")
|
|
|
+ # self.relu7 = tf.nn.relu(self.fc7)
|
|
|
+ #
|
|
|
+ # self.fc8 = self.fc_layer(self.relu7, "fc8")
|
|
|
+ #
|
|
|
+ # self.prob = tf.nn.softmax(self.fc8, name="prob")
|
|
|
+
|
|
|
+ # self.data_dict = None
|
|
|
+ print(("build model finished: %ds" % (time.time() - start_time)))
|
|
|
+
|
|
|
+ def avg_pool(self, bottom, name):
|
|
|
+ return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
|
|
|
+
|
|
|
+ def max_pool(self, bottom, name):
|
|
|
+ return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
|
|
|
+
|
|
|
+ def conv_layer(self, bottom, name):
|
|
|
+ with tf.compat.v1.variable_scope(name):
|
|
|
+ filt = self.get_conv_filter(name)
|
|
|
+
|
|
|
+ conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
|
|
|
+
|
|
|
+ conv_biases = self.get_bias(name)
|
|
|
+ bias = tf.nn.bias_add(conv, conv_biases)
|
|
|
+
|
|
|
+ relu = tf.nn.relu(bias)
|
|
|
+ return relu
|
|
|
+
|
|
|
+ def fc_layer(self, bottom, name):
|
|
|
+ with tf.compat.v1.variable_scope(name):
|
|
|
+ shape = bottom.get_shape().as_list()
|
|
|
+ dim = 1
|
|
|
+ for d in shape[1:]:
|
|
|
+ dim *= d
|
|
|
+ x = tf.reshape(bottom, [-1, dim])
|
|
|
+
|
|
|
+ weights = self.get_fc_weight(name)
|
|
|
+ biases = self.get_bias(name)
|
|
|
+
|
|
|
+ # Fully connected layer. Note that the '+' operation automatically
|
|
|
+ # broadcasts the biases.
|
|
|
+ fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
|
|
|
+
|
|
|
+ return fc
|
|
|
+
|
|
|
+ def get_conv_filter(self, name):
|
|
|
+ return tf.constant(self.data_dict[name][0], name="filter")
|
|
|
+
|
|
|
+ def get_bias(self, name):
|
|
|
+ return tf.constant(self.data_dict[name][1], name="biases")
|
|
|
+
|
|
|
+ def get_fc_weight(self, name):
|
|
|
+ return tf.constant(self.data_dict[name][0], name="weights")
|
|
|
+
|
|
|
+
|
|
|
+class Vgg19:
|
|
|
+ def __init__(self, vgg19_npy_path=None):
|
|
|
+ if vgg19_npy_path is None:
|
|
|
+ print("there is no vgg_16_npy!")
|
|
|
+ raise
|
|
|
+
|
|
|
+ self.data_dict = np.load(vgg19_npy_path, encoding='latin1', allow_pickle=True).item()
|
|
|
+
|
|
|
+ def build(self, bgr):
|
|
|
+ """
|
|
|
+ load variable from npy to build the VGG
|
|
|
+ :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
|
|
|
+ """
|
|
|
+ bgr = bgr * 255.0
|
|
|
+ # bgr = bgr - np.array(VGG_MEAN).reshape((1, 1, 1, 3))
|
|
|
+
|
|
|
+ self.conv1_1 = self.conv_layer(bgr, "conv1_1")
|
|
|
+ self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
|
|
|
+ self.pool1 = self.max_pool(self.conv1_2, 'pool1')
|
|
|
+
|
|
|
+ self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
|
|
|
+ self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
|
|
|
+ self.pool2 = self.max_pool(self.conv2_2, 'pool2')
|
|
|
+
|
|
|
+ self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
|
|
|
+ self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
|
|
|
+ self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
|
|
|
+ self.conv3_4 = self.conv_layer(self.conv3_3, "conv3_4")
|
|
|
+ self.pool3 = self.max_pool(self.conv3_4, 'pool3')
|
|
|
+
|
|
|
+ self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
|
|
|
+ self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
|
|
|
+ self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
|
|
|
+ self.conv4_4 = self.conv_layer(self.conv4_3, "conv4_4")
|
|
|
+ self.pool4 = self.max_pool(self.conv4_4, 'pool4')
|
|
|
+
|
|
|
+ self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
|
|
|
+ self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
|
|
|
+ self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
|
|
|
+ self.conv5_4 = self.conv_layer(self.conv5_3, "conv5_4")
|
|
|
+ self.pool5 = self.max_pool(self.conv5_4, 'pool5')
|
|
|
+
|
|
|
+ def avg_pool(self, bottom, name):
|
|
|
+ return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
|
|
|
+
|
|
|
+ def max_pool(self, bottom, name):
|
|
|
+ return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
|
|
|
+
|
|
|
+ def conv_layer(self, bottom, name):
|
|
|
+ with tf.compat.v1.variable_scope(name):
|
|
|
+ filt = self.get_conv_filter(name)
|
|
|
+
|
|
|
+ conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
|
|
|
+
|
|
|
+ conv_biases = self.get_bias(name)
|
|
|
+ bias = tf.nn.bias_add(conv, conv_biases)
|
|
|
+
|
|
|
+ relu = tf.nn.relu(bias)
|
|
|
+ return relu
|
|
|
+
|
|
|
+ def fc_layer(self, bottom, name):
|
|
|
+ with tf.compat.v1.variable_scope(name):
|
|
|
+ shape = bottom.get_shape().as_list()
|
|
|
+ dim = 1
|
|
|
+ for d in shape[1:]:
|
|
|
+ dim *= d
|
|
|
+ x = tf.reshape(bottom, [-1, dim])
|
|
|
+
|
|
|
+ weights = self.get_fc_weight(name)
|
|
|
+ biases = self.get_bias(name)
|
|
|
+
|
|
|
+ # Fully connected layer. Note that the '+' operation automatically
|
|
|
+ # broadcasts the biases.
|
|
|
+ fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
|
|
|
+
|
|
|
+ return fc
|
|
|
+
|
|
|
+ def get_conv_filter(self, name):
|
|
|
+ return tf.constant(self.data_dict[name][0], name="filter")
|
|
|
+
|
|
|
+ def get_bias(self, name):
|
|
|
+ return tf.constant(self.data_dict[name][1], name="biases")
|
|
|
+
|
|
|
+ def get_fc_weight(self, name):
|
|
|
+ return tf.constant(self.data_dict[name][0], name="weights")
|
|
|
+
|
|
|
+
|
|
|
+def tiny_yolo_body(inputs, num_anchors, num_classes):
|
|
|
+ """Create Tiny YOLO_v3 model CNN body in keras."""
|
|
|
+ x1 = compose(
|
|
|
+ DarknetConv2D_BN_Leaky(16, (3, 3), ),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(32, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(64, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(128, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs)
|
|
|
+
|
|
|
+ x2 = compose(
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(512, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(1024, (3, 3)),
|
|
|
+ DarknetConv2D_BN_Leaky(256, (1, 1)))(x1)
|
|
|
+
|
|
|
+ y1 = compose(
|
|
|
+ DarknetConv2D_BN_Leaky(512, (3, 3)),
|
|
|
+ DarknetConv2D(num_anchors*(num_classes+5), (1, 1)))(x2)
|
|
|
+
|
|
|
+ x2 = compose(
|
|
|
+ DarknetConv2D_BN_Leaky(128, (1, 1)),
|
|
|
+ UpSampling2D(2))(x2)
|
|
|
+
|
|
|
+ y2 = compose(
|
|
|
+ Concatenate(),
|
|
|
+ DarknetConv2D_BN_Leaky(256, (3, 3)),
|
|
|
+ DarknetConv2D(num_anchors*(num_classes+5), (1, 1)))([x2, x1])
|
|
|
+
|
|
|
+ return Model(inputs, [y1, y2])
|
|
|
+
|
|
|
+
|
|
|
+def tinier_yolo_se_body(inputs, num_anchors, num_classes):
|
|
|
+ """Create Tiny YOLO_v3 model CNN body in keras."""
|
|
|
+ x1 = compose(
|
|
|
+ DarknetConv2D_BN_Leaky(8, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(16, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(32, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(64, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(128, (3, 3)),
|
|
|
+ )(inputs)
|
|
|
+ x1 = SeBlock()(x1)
|
|
|
+
|
|
|
+ x2 = compose(
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(256, (3, 3)),
|
|
|
+ MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
|
|
|
+ DarknetConv2D_BN_Leaky(512, (3, 3)),
|
|
|
+ DarknetConv2D_BN_Leaky(128, (1, 1)),
|
|
|
+ )(x1)
|
|
|
+ x2 = SeBlock()(x2)
|
|
|
+
|
|
|
+ y1 = compose(
|
|
|
+ DarknetConv2D_BN_Leaky(256, (3, 3)),
|
|
|
+ DarknetConv2D(num_anchors*(num_classes+5), (1, 1))
|
|
|
+ )(x2)
|
|
|
+ y1 = SeBlock()(y1)
|
|
|
+
|
|
|
+ x2 = compose(
|
|
|
+ DarknetConv2D_BN_Leaky(64, (1, 1)),
|
|
|
+ UpSampling2D(2)
|
|
|
+ )(x2)
|
|
|
+ x2 = SeBlock()(x2)
|
|
|
+
|
|
|
+ y2 = compose(
|
|
|
+ Concatenate(),
|
|
|
+ DarknetConv2D_BN_Leaky(128, (3, 3)),
|
|
|
+ DarknetConv2D(num_anchors*(num_classes+5), (1, 1))
|
|
|
+ )([x2, x1])
|
|
|
+ y2 = SeBlock()(y2)
|
|
|
+
|
|
|
+ model = Model(inputs, [y1, y2])
|
|
|
+ model.summary(120)
|
|
|
+ return model
|
|
|
+
|
|
|
+
|
|
|
+@wraps(Conv2D)
|
|
|
+def DarknetConv2D(*args, **kwargs):
|
|
|
+ """Wrapper to set Darknet parameters for Convolution2D."""
|
|
|
+ darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4),
|
|
|
+ 'padding': 'valid' if kwargs.get('strides') == (2, 2) else 'same'}
|
|
|
+ darknet_conv_kwargs.update(kwargs)
|
|
|
+ return Conv2D(*args, **darknet_conv_kwargs)
|
|
|
+
|
|
|
+
|
|
|
+def DarknetConv2D_BN_Leaky(*args, **kwargs):
|
|
|
+ """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
|
|
|
+ no_bias_kwargs = {'use_bias': False}
|
|
|
+ no_bias_kwargs.update(kwargs)
|
|
|
+ return compose(
|
|
|
+ DarknetConv2D(*args, **no_bias_kwargs),
|
|
|
+ BatchNormalization(),
|
|
|
+ LeakyReLU(alpha=0.1))
|
|
|
+
|
|
|
+
|
|
|
+def get_tiny_inference_model(anchors, num_classes, weights_path='models/tiny_yolo_weights.h5'):
|
|
|
+ """create the inference model, for Tiny YOLOv3"""
|
|
|
+ image_input = Input(shape=(None, None, 3))
|
|
|
+ image_shape = Input(shape=(2,), dtype='int64', name='image_shape')
|
|
|
+ num_anchors = len(anchors)
|
|
|
+
|
|
|
+ model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
|
|
|
+ print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
|
|
|
+
|
|
|
+ model_body.load_weights(weights_path)
|
|
|
+ print('Load weights {}.'.format(weights_path))
|
|
|
+
|
|
|
+ boxes, scores, classes = Lambda(yolo_eval,
|
|
|
+ name='yolo_eval',
|
|
|
+ arguments={'anchors': anchors,
|
|
|
+ 'num_classes': num_classes}
|
|
|
+ )([model_body.output, image_shape])
|
|
|
+ # boxes, scores, classes = yolo_eval([model_body.output, image_shape], anchors, num_classes)
|
|
|
+ model = Model([model_body.input, image_shape], [boxes, scores, classes])
|
|
|
+ # model.summary(120)
|
|
|
+ return model
|
|
|
+
|