123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226 |
- # -*- coding: utf-8 -*-
- """
- Created on Tue Jun 21 10:53:51 2022
- model
- @author: fangjiasheng
- """
- import os
- import sys
- sys.path.append(os.path.dirname(os.path.abspath(__file__)))
- from keras.layers import Lambda, Dense, Reshape, Conv2D, BatchNormalization, LeakyReLU, Masking, MaxPool2D, \
- MaxPooling2D, UpSampling2D, concatenate, Activation, GlobalAveragePooling2D, DepthwiseConv2D, Add
- from keras import layers, models, Sequential, Input, Model
- import keras.backend as K
- def direction_model(input_shape, output_shape):
- model = cnn_model(input_shape, output_shape)
- # print(input_shape, output_shape)
- # model = mobile_net_v3_tiny(input_shape, output_shape)
- # model = fpn(input_shape, output_shape)
- # model.summary(line_length=100)
- return model
- def cnn_model(input_shape, output_shape):
- conv_num = 6
- # Input
- _input = Input(shape=input_shape, dtype="float32", name="input")
- conv = Conv2D(16, (3, 3), padding='same')(_input)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- max_pool = MaxPool2D()(relu)
- for i in range(conv_num):
- conv = Conv2D(16, (3, 3), padding='same')(max_pool)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- # conv = Conv2D(32, (1, 1), padding='same')(relu)
- # bn = BatchNormalization()(conv)
- # relu = LeakyReLU(alpha=0.)(bn)
- max_pool = MaxPool2D()(relu)
- # conv = Conv2D(16, (3, 3), padding='same')(max_pool)
- # bn = BatchNormalization()(conv)
- # relu = LeakyReLU(alpha=0.)(bn)
- max_pool = MaxPool2D((6, 6))(relu)
- dense = layers.Dense(output_shape, activation='softmax')(max_pool)
- squeeze = Lambda(lambda x: K.squeeze(x, axis=1))(dense)
- squeeze = Lambda(lambda x: K.squeeze(x, axis=1))(squeeze)
- model = Model(inputs=_input, outputs=squeeze)
- return model
- def cnn_model_240314(input_shape, output_shape):
- conv_num = 5
- # Input
- _input = Input(shape=input_shape, dtype="float32", name="input")
- conv = Conv2D(16, (3, 3), padding='same')(_input)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- max_pool = MaxPool2D()(relu)
- for i in range(conv_num):
- conv = Conv2D(16, (3, 3), padding='same')(max_pool)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- # conv = Conv2D(32, (1, 1), padding='same')(relu)
- # bn = BatchNormalization()(conv)
- # relu = LeakyReLU(alpha=0.)(bn)
- max_pool = MaxPool2D()(relu)
- # conv = Conv2D(16, (3, 3), padding='same')(max_pool)
- # bn = BatchNormalization()(conv)
- # relu = LeakyReLU(alpha=0.)(bn)
- max_pool = MaxPool2D((6, 6))(relu)
- dense = layers.Dense(output_shape, activation='softmax')(max_pool)
- squeeze = Lambda(lambda x: K.squeeze(x, axis=1))(dense)
- squeeze = Lambda(lambda x: K.squeeze(x, axis=1))(squeeze)
- model = Model(inputs=_input, outputs=squeeze)
- return model
- def fpn(input_shape, output_shape):
- # Input
- _input = Input(shape=input_shape, dtype="float32", name="input")
- # 192 -> 96
- conv = Conv2D(8, (3, 3), padding='same')(_input)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(8, (3, 3), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- max_pool_1 = MaxPool2D()(relu)
- # 96 -> 48
- conv = Conv2D(16, (3, 3), padding='same')(max_pool_1)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(16, (3, 3), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- max_pool_2 = MaxPool2D()(relu)
- # 48 -> 24
- conv = Conv2D(32, (3, 3), padding='same')(max_pool_2)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- conv = Conv2D(32, (3, 3), padding='same')(relu)
- bn = BatchNormalization()(conv)
- relu = LeakyReLU(alpha=0.)(bn)
- max_pool_3 = MaxPool2D()(relu)
- #
- conv_pre = Conv2D(32, (1, 1))(max_pool_2)
- up_sample = UpSampling2D((2, 2))(max_pool_3)
- add_1 = Add()([conv_pre, up_sample])
- conv_pre = Conv2D(16, (1, 1))(max_pool_1)
- up_sample = UpSampling2D((2, 2))(max_pool_2)
- add_2 = Add()([conv_pre, up_sample])
- global_pool_1 = GlobalAveragePooling2D()(max_pool_3)
- global_pool_2 = GlobalAveragePooling2D()(add_1)
- global_pool_3 = GlobalAveragePooling2D()(add_2)
- reshape_1 = Reshape((1, 1, 32))(global_pool_1)
- reshape_2 = Reshape((1, 1, 32))(global_pool_2)
- reshape_3 = Reshape((1, 1, 16))(global_pool_3)
- conv_1 = Conv2D(64, (1, 1), padding='same')(reshape_1)
- conv_2 = Conv2D(64, (1, 1), padding='same')(reshape_2)
- conv_3 = Conv2D(64, (1, 1), padding='same')(reshape_3)
- conv_1 = Conv2D(output_shape, (1, 1), padding='same')(conv_1)
- conv_2 = Conv2D(output_shape, (1, 1), padding='same')(conv_2)
- conv_3 = Conv2D(output_shape, (1, 1), padding='same')(conv_3)
- reshape_1 = Reshape((output_shape,))(conv_1)
- reshape_2 = Reshape((output_shape,))(conv_2)
- reshape_3 = Reshape((output_shape,))(conv_3)
- add = Add()([reshape_1, reshape_2, reshape_3])
- softmax = Activation('softmax')(add)
- model = Model(_input, softmax)
- return model
- def tiny_unet(input_shape, output_shape):
- inputs = Input(shape=input_shape)
- use_bias = False
- # 128
- down1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(inputs)
- down1 = BatchNormalization()(down1)
- down1 = LeakyReLU(alpha=0.1)(down1)
- down1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(down1)
- down1 = BatchNormalization()(down1)
- down1 = LeakyReLU(alpha=0.1)(down1)
- down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
- # 64
- down2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(down1_pool)
- down2 = BatchNormalization()(down2)
- down2 = LeakyReLU(alpha=0.1)(down2)
- down2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(down2)
- down2 = BatchNormalization()(down2)
- down2 = LeakyReLU(alpha=0.1)(down2)
- down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
- # 32
- down3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down2_pool)
- down3 = BatchNormalization()(down3)
- down3 = LeakyReLU(alpha=0.1)(down3)
- down3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(down3)
- down3 = BatchNormalization()(down3)
- down3 = LeakyReLU(alpha=0.1)(down3)
- down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
- # 16
- center = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(down3_pool)
- center = BatchNormalization()(center)
- center = LeakyReLU(alpha=0.1)(center)
- center = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(center)
- center = BatchNormalization()(center)
- center = LeakyReLU(alpha=0.1)(center)
- # 32
- up3 = UpSampling2D((2, 2))(center)
- up3 = concatenate([down3, up3], axis=3)
- up3 = Conv2D(64, (3, 3), padding='same', use_bias=use_bias)(up3)
- up3 = BatchNormalization()(up3)
- up3 = LeakyReLU(alpha=0.1)(up3)
- up3 = Conv2D(64, (1, 1), padding='same', use_bias=use_bias)(up3)
- up3 = BatchNormalization()(up3)
- up3 = LeakyReLU(alpha=0.1)(up3)
- # 64
- up2 = UpSampling2D((2, 2))(up3)
- up2 = concatenate([down2, up2], axis=3)
- up2 = Conv2D(32, (3, 3), padding='same', use_bias=use_bias)(up2)
- up2 = BatchNormalization()(up2)
- up2 = LeakyReLU(alpha=0.1)(up2)
- up2 = Conv2D(32, (1, 1), padding='same', use_bias=use_bias)(up2)
- up2 = BatchNormalization()(up2)
- up2 = LeakyReLU(alpha=0.1)(up2)
- # 128
- up1 = UpSampling2D((2, 2))(up2)
- up1 = K.concatenate([down1, up1], axis=3)
- up1 = Conv2D(16, (3, 3), padding='same', use_bias=use_bias)(up1)
- up1 = BatchNormalization()(up1)
- up1 = LeakyReLU(alpha=0.1)(up1)
- up1 = Conv2D(16, (1, 1), padding='same', use_bias=use_bias)(up1)
- up1 = BatchNormalization()(up1)
- up1 = LeakyReLU(alpha=0.1)(up1)
- classify = Conv2D(output_shape, (1, 1), activation='softmax')(up1)
- model = Model(inputs=inputs, outputs=classify)
- return model
|