model_260.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. """YOLO_v3 Model Defined in Keras."""
  2. from functools import wraps
  3. import numpy as np
  4. import tensorflow as tf
  5. from keras import backend as K, Input
  6. # keras2.6.0 and keras2.1.5
  7. # from keras.engine import Layer
  8. from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D, Dense, \
  9. GlobalAveragePooling2D, Multiply, Lambda, Layer
  10. from keras.layers.advanced_activations import LeakyReLU
  11. # keras2.6.0 and keras2.1.5
  12. # from keras.layers.normalization import BatchNormalization
  13. from tensorflow.keras.layers import BatchNormalization
  14. from keras.models import Model
  15. from keras.regularizers import l2
  16. from click_captcha.utils import compose
  17. def yolo_net(input_shape, anchors, num_classes, load_pretrained=True,
  18. weights_path='models/tiny_yolo_weights.h5'):
  19. """create the training model, for Tiny YOLOv3"""
  20. # get a new session
  21. # ops.reset_default_graph()
  22. K.clear_session()
  23. image_input = Input(shape=(None, None, 1))
  24. h, w = input_shape
  25. num_anchors = len(anchors)
  26. y_true = [Input(shape=(h//{0: 32, 1: 16}[l], w//{0: 32, 1: 16}[l],
  27. num_anchors//2, num_classes+5)) for l in range(2)]
  28. model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
  29. print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
  30. if load_pretrained:
  31. model_body.load_weights(weights_path)
  32. print('Load weights {}.'.format(weights_path))
  33. model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
  34. arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': .5})(
  35. [*model_body.output, *y_true])
  36. model = Model([model_body.input, *y_true], model_loss)
  37. model.summary(120)
  38. return model
  39. def yolo_net_char(input_shape, anchors, num_classes, load_pretrained=True,
  40. weights_path='models/tiny_yolo_weights.h5'):
  41. """create the training model, for Tiny YOLOv3"""
  42. # get a new session
  43. # ops.reset_default_graph()
  44. K.clear_session()
  45. image_input = Input(shape=(None, None, 3))
  46. h, w = input_shape
  47. num_anchors = len(anchors)
  48. y_true = [Input(shape=(h//{0: 32, 1: 16}[l], w//{0: 32, 1: 16}[l],
  49. num_anchors//2, num_classes+5)) for l in range(2)]
  50. model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
  51. print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
  52. if load_pretrained:
  53. model_body.load_weights(weights_path)
  54. print('Load weights {}.'.format(weights_path))
  55. model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
  56. arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': .5})(
  57. [*model_body.output, *y_true])
  58. model = Model([model_body.input, *y_true], model_loss)
  59. model.summary(120)
  60. return model
  61. @wraps(Conv2D)
  62. def DarknetConv2D(*args, **kwargs):
  63. """Wrapper to set Darknet parameters for Convolution2D."""
  64. darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
  65. darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
  66. darknet_conv_kwargs.update(kwargs)
  67. return Conv2D(*args, **darknet_conv_kwargs)
  68. def DarknetConv2D_BN_Leaky(*args, **kwargs):
  69. """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
  70. no_bias_kwargs = {'use_bias': False}
  71. no_bias_kwargs.update(kwargs)
  72. return compose(
  73. DarknetConv2D(*args, **no_bias_kwargs),
  74. BatchNormalization(),
  75. LeakyReLU(alpha=0.1))
  76. def resblock_body(x, num_filters, num_blocks):
  77. '''A series of resblocks starting with a downsampling Convolution2D'''
  78. # Darknet uses left and top padding instead of 'same' mode
  79. x = ZeroPadding2D(((1,0),(1,0)))(x)
  80. x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)
  81. for i in range(num_blocks):
  82. y = compose(
  83. DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),
  84. DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)
  85. x = Add()([x,y])
  86. return x
  87. class SeBlock(Layer):
  88. def __init__(self, reduction=4, **kwargs):
  89. super(SeBlock, self).__init__(**kwargs)
  90. self.reduction = reduction
  91. def build(self, input_shape):
  92. # 构建layer时需要实现
  93. # 手动将该自定义层参数加入,否则参数为0
  94. self.pool = GlobalAveragePooling2D(name="my_pool")
  95. self.expand_1 = Lambda(lambda x: K.expand_dims(x, axis=1))
  96. self.expand_2 = Lambda(lambda x: K.expand_dims(x, axis=1))
  97. self.dense_1 = Dense(int(input_shape[-1]) // self.reduction, use_bias=False, activation="relu", name='my_dense_1')
  98. self.dense_2 = Dense(int(input_shape[-1]), use_bias=False, activation="hard_sigmoid", name='my_dense_2')
  99. # keras2.2.0以下需要单独加,keras2.6.0不用加
  100. # self.dense_1.build((input_shape[0], 1, 1, input_shape[-1]))
  101. # self.dense_2.build((input_shape[0], 1, 1, input_shape[-1] // self.reduction))
  102. self._trainable_weights += self.dense_1._trainable_weights
  103. self._trainable_weights += self.dense_2._trainable_weights
  104. super(SeBlock, self).build(input_shape)
  105. def call(self, inputs):
  106. x = self.pool(inputs)
  107. x = self.expand_1(x)
  108. x = self.expand_2(x)
  109. x = self.dense_1(x)
  110. x = self.dense_2(x)
  111. # 给通道加权重
  112. return Multiply()([inputs, x])
  113. def darknet_body(x):
  114. '''Darknent body having 52 Convolution2D layers'''
  115. x = DarknetConv2D_BN_Leaky(32, (3,3))(x)
  116. x = resblock_body(x, 64, 1)
  117. x = resblock_body(x, 128, 2)
  118. x = resblock_body(x, 256, 8)
  119. x = resblock_body(x, 512, 8)
  120. x = resblock_body(x, 1024, 4)
  121. return x
  122. def make_last_layers(x, num_filters, out_filters):
  123. '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
  124. x = compose(
  125. DarknetConv2D_BN_Leaky(num_filters, (1,1)),
  126. DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
  127. DarknetConv2D_BN_Leaky(num_filters, (1,1)),
  128. DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
  129. DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
  130. y = compose(
  131. DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
  132. DarknetConv2D(out_filters, (1,1)))(x)
  133. return x, y
  134. def yolo_body(inputs, num_anchors, num_classes):
  135. """Create YOLO_V3 model CNN body in Keras."""
  136. darknet = Model(inputs, darknet_body(inputs))
  137. x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))
  138. x = compose(
  139. DarknetConv2D_BN_Leaky(256, (1,1)),
  140. UpSampling2D(2))(x)
  141. x = Concatenate()([x,darknet.layers[152].output])
  142. x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))
  143. x = compose(
  144. DarknetConv2D_BN_Leaky(128, (1,1)),
  145. UpSampling2D(2))(x)
  146. x = Concatenate()([x,darknet.layers[92].output])
  147. x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))
  148. return Model(inputs, [y1,y2,y3])
  149. def tiny_yolo_body(inputs, num_anchors, num_classes):
  150. """Create Tiny YOLO_v3 model CNN body in keras."""
  151. x1 = compose(
  152. DarknetConv2D_BN_Leaky(16, (3,3)),
  153. MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
  154. DarknetConv2D_BN_Leaky(32, (3,3)),
  155. MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
  156. DarknetConv2D_BN_Leaky(64, (3,3)),
  157. MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
  158. DarknetConv2D_BN_Leaky(128, (3,3)),
  159. MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
  160. DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)
  161. x2 = compose(
  162. MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
  163. DarknetConv2D_BN_Leaky(512, (3,3)),
  164. MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),
  165. DarknetConv2D_BN_Leaky(1024, (3,3)),
  166. DarknetConv2D_BN_Leaky(256, (1,1)))(x1)
  167. y1 = compose(
  168. DarknetConv2D_BN_Leaky(512, (3,3)),
  169. DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
  170. x2 = compose(
  171. DarknetConv2D_BN_Leaky(128, (1,1)),
  172. UpSampling2D(2))(x2)
  173. y2 = compose(
  174. Concatenate(),
  175. DarknetConv2D_BN_Leaky(256, (3,3)),
  176. DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
  177. model = Model(inputs, [y1,y2])
  178. model.summary(120)
  179. return model
  180. def tiny_yolo_se_body(inputs, num_anchors, num_classes):
  181. """Create Tiny YOLO_v3 model CNN body in keras."""
  182. x1 = compose(
  183. DarknetConv2D_BN_Leaky(16, (3, 3)),
  184. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  185. DarknetConv2D_BN_Leaky(32, (3, 3)),
  186. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  187. DarknetConv2D_BN_Leaky(64, (3, 3)),
  188. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  189. DarknetConv2D_BN_Leaky(128, (3, 3)),
  190. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  191. DarknetConv2D_BN_Leaky(256, (3, 3)),
  192. )(inputs)
  193. x1 = SeBlock()(x1)
  194. x2 = compose(
  195. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  196. DarknetConv2D_BN_Leaky(512, (3, 3)),
  197. MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
  198. DarknetConv2D_BN_Leaky(1024, (3, 3)),
  199. DarknetConv2D_BN_Leaky(256, (1, 1)),
  200. )(x1)
  201. x2 = SeBlock()(x2)
  202. y1 = compose(
  203. DarknetConv2D_BN_Leaky(512, (3, 3)),
  204. DarknetConv2D(num_anchors*(num_classes+5), (1, 1))
  205. )(x2)
  206. y1 = SeBlock()(y1)
  207. x2 = compose(
  208. DarknetConv2D_BN_Leaky(128, (1, 1)),
  209. UpSampling2D(2)
  210. )(x2)
  211. x2 = SeBlock()(x2)
  212. y2 = compose(
  213. Concatenate(),
  214. DarknetConv2D_BN_Leaky(256, (3, 3)),
  215. DarknetConv2D(num_anchors*(num_classes+5), (1, 1))
  216. )([x2, x1])
  217. y2 = SeBlock()(y2)
  218. model = Model(inputs, [y1, y2])
  219. model.summary(120)
  220. return model
  221. def tinier_yolo_se_body(inputs, num_anchors, num_classes):
  222. """Create Tiny YOLO_v3 model CNN body in keras."""
  223. x1 = compose(
  224. DarknetConv2D_BN_Leaky(8, (3, 3)),
  225. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  226. DarknetConv2D_BN_Leaky(16, (3, 3)),
  227. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  228. DarknetConv2D_BN_Leaky(32, (3, 3)),
  229. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  230. DarknetConv2D_BN_Leaky(64, (3, 3)),
  231. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  232. DarknetConv2D_BN_Leaky(128, (3, 3)),
  233. )(inputs)
  234. x1 = SeBlock()(x1)
  235. x2 = compose(
  236. MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
  237. DarknetConv2D_BN_Leaky(256, (3, 3)),
  238. MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
  239. DarknetConv2D_BN_Leaky(512, (3, 3)),
  240. DarknetConv2D_BN_Leaky(128, (1, 1)),
  241. )(x1)
  242. x2 = SeBlock()(x2)
  243. y1 = compose(
  244. DarknetConv2D_BN_Leaky(256, (3, 3)),
  245. DarknetConv2D(num_anchors*(num_classes+5), (1, 1))
  246. )(x2)
  247. y1 = SeBlock()(y1)
  248. x2 = compose(
  249. DarknetConv2D_BN_Leaky(64, (1, 1)),
  250. UpSampling2D(2)
  251. )(x2)
  252. x2 = SeBlock()(x2)
  253. y2 = compose(
  254. Concatenate(),
  255. DarknetConv2D_BN_Leaky(128, (3, 3)),
  256. DarknetConv2D(num_anchors*(num_classes+5), (1, 1))
  257. )([x2, x1])
  258. y2 = SeBlock()(y2)
  259. model = Model(inputs, [y1, y2])
  260. model.summary(120)
  261. return model
  262. def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
  263. """Convert final layer features to bounding box parameters."""
  264. num_anchors = len(anchors)
  265. # Reshape to batch, height, width, num_anchors, box_params.
  266. anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
  267. grid_shape = K.shape(feats)[1:3] # height, width
  268. grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
  269. [1, grid_shape[1], 1, 1])
  270. grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
  271. [grid_shape[0], 1, 1, 1])
  272. grid = K.concatenate([grid_x, grid_y])
  273. grid = K.cast(grid, K.dtype(feats))
  274. feats = K.reshape(
  275. feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
  276. # Adjust preditions to each spatial grid point and anchor size.
  277. box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
  278. box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
  279. box_confidence = K.sigmoid(feats[..., 4:5])
  280. box_class_probs = K.sigmoid(feats[..., 5:])
  281. if calc_loss == True:
  282. return grid, feats, box_xy, box_wh
  283. return box_xy, box_wh, box_confidence, box_class_probs
  284. def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
  285. '''Get corrected boxes'''
  286. box_yx = box_xy[..., ::-1]
  287. box_hw = box_wh[..., ::-1]
  288. input_shape = K.cast(input_shape, K.dtype(box_yx))
  289. image_shape = K.cast(image_shape, K.dtype(box_yx))
  290. new_shape = K.round(image_shape * K.min(input_shape/image_shape))
  291. offset = (input_shape-new_shape)/2./input_shape
  292. scale = input_shape/new_shape
  293. box_yx = (box_yx - offset) * scale
  294. box_hw *= scale
  295. box_mins = box_yx - (box_hw / 2.)
  296. box_maxes = box_yx + (box_hw / 2.)
  297. boxes = K.concatenate([
  298. box_mins[..., 0:1], # y_min
  299. box_mins[..., 1:2], # x_min
  300. box_maxes[..., 0:1], # y_max
  301. box_maxes[..., 1:2] # x_max
  302. ])
  303. # Scale boxes back to original image shape.
  304. boxes *= K.concatenate([image_shape, image_shape])
  305. return boxes
  306. def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
  307. '''Process Conv layer output'''
  308. box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
  309. anchors, num_classes, input_shape)
  310. boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
  311. boxes = K.reshape(boxes, [-1, 4])
  312. box_scores = box_confidence * box_class_probs
  313. box_scores = K.reshape(box_scores, [-1, num_classes])
  314. return boxes, box_scores
  315. def yolo_eval(yolo_outputs,
  316. anchors,
  317. num_classes,
  318. image_shape,
  319. max_boxes=20,
  320. score_threshold=.6,
  321. iou_threshold=.5):
  322. """Evaluate YOLO model on given input and return filtered boxes."""
  323. num_layers = len(yolo_outputs)
  324. print("yolo_outputs", yolo_outputs[0])
  325. print("num_layers", num_layers)
  326. print("image_shape", image_shape)
  327. anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
  328. input_shape = K.shape(yolo_outputs[0])[1:3] * 32
  329. boxes = []
  330. box_scores = []
  331. for l in range(num_layers):
  332. _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
  333. anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
  334. boxes.append(_boxes)
  335. box_scores.append(_box_scores)
  336. boxes = K.concatenate(boxes, axis=0)
  337. box_scores = K.concatenate(box_scores, axis=0)
  338. mask = box_scores >= score_threshold
  339. max_boxes_tensor = K.constant(max_boxes, dtype='int32')
  340. boxes_ = []
  341. scores_ = []
  342. classes_ = []
  343. for c in range(num_classes):
  344. # TODO: use keras backend instead of tf.
  345. class_boxes = tf.boolean_mask(boxes, mask[:, c])
  346. class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
  347. nms_index = tf.image.non_max_suppression(
  348. class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
  349. class_boxes = K.gather(class_boxes, nms_index)
  350. class_box_scores = K.gather(class_box_scores, nms_index)
  351. classes = K.ones_like(class_box_scores, 'int32') * c
  352. boxes_.append(class_boxes)
  353. scores_.append(class_box_scores)
  354. classes_.append(classes)
  355. boxes_ = K.concatenate(boxes_, axis=0)
  356. scores_ = K.concatenate(scores_, axis=0)
  357. classes_ = K.concatenate(classes_, axis=0)
  358. return boxes_, scores_, classes_
  359. def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
  360. '''Preprocess true boxes to training input format
  361. Parameters
  362. ----------
  363. true_boxes: array, shape=(m, T, 5)
  364. Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
  365. input_shape: array-like, hw, multiples of 32
  366. anchors: array, shape=(N, 2), wh
  367. num_classes: integer
  368. Returns
  369. -------
  370. y_true: list of array, shape like yolo_outputs, xywh are reletive value
  371. '''
  372. assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
  373. num_layers = len(anchors)//3 # default setting
  374. anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
  375. true_boxes = np.array(true_boxes, dtype='float32')
  376. input_shape = np.array(input_shape, dtype='int32')
  377. boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
  378. boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
  379. true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
  380. true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]
  381. m = true_boxes.shape[0]
  382. grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
  383. y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
  384. dtype='float32') for l in range(num_layers)]
  385. # Expand dim to apply broadcasting.
  386. anchors = np.expand_dims(anchors, 0)
  387. anchor_maxes = anchors / 2.
  388. anchor_mins = -anchor_maxes
  389. valid_mask = boxes_wh[..., 0]>0
  390. for b in range(m):
  391. # Discard zero rows.
  392. wh = boxes_wh[b, valid_mask[b]]
  393. if len(wh)==0: continue
  394. # Expand dim to apply broadcasting.
  395. wh = np.expand_dims(wh, -2)
  396. box_maxes = wh / 2.
  397. box_mins = -box_maxes
  398. intersect_mins = np.maximum(box_mins, anchor_mins)
  399. intersect_maxes = np.minimum(box_maxes, anchor_maxes)
  400. intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
  401. intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
  402. box_area = wh[..., 0] * wh[..., 1]
  403. anchor_area = anchors[..., 0] * anchors[..., 1]
  404. iou = intersect_area / (box_area + anchor_area - intersect_area)
  405. # Find best anchor for each true box
  406. best_anchor = np.argmax(iou, axis=-1)
  407. for t, n in enumerate(best_anchor):
  408. for l in range(num_layers):
  409. if n in anchor_mask[l]:
  410. i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')
  411. j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')
  412. k = anchor_mask[l].index(n)
  413. c = true_boxes[b,t, 4].astype('int32')
  414. y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]
  415. y_true[l][b, j, i, k, 4] = 1
  416. y_true[l][b, j, i, k, 5+c] = 1
  417. return y_true
  418. def box_iou(b1, b2):
  419. '''Return iou tensor
  420. Parameters
  421. ----------
  422. b1: tensor, shape=(i1,...,iN, 4), xywh
  423. b2: tensor, shape=(j, 4), xywh
  424. Returns
  425. -------
  426. iou: tensor, shape=(i1,...,iN, j)
  427. '''
  428. # Expand dim to apply broadcasting.
  429. b1 = K.expand_dims(b1, -2)
  430. b1_xy = b1[..., :2]
  431. b1_wh = b1[..., 2:4]
  432. b1_wh_half = b1_wh/2.
  433. b1_mins = b1_xy - b1_wh_half
  434. b1_maxes = b1_xy + b1_wh_half
  435. # Expand dim to apply broadcasting.
  436. b2 = K.expand_dims(b2, 0)
  437. b2_xy = b2[..., :2]
  438. b2_wh = b2[..., 2:4]
  439. b2_wh_half = b2_wh/2.
  440. b2_mins = b2_xy - b2_wh_half
  441. b2_maxes = b2_xy + b2_wh_half
  442. intersect_mins = K.maximum(b1_mins, b2_mins)
  443. intersect_maxes = K.minimum(b1_maxes, b2_maxes)
  444. intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
  445. intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
  446. b1_area = b1_wh[..., 0] * b1_wh[..., 1]
  447. b2_area = b2_wh[..., 0] * b2_wh[..., 1]
  448. iou = intersect_area / (b1_area + b2_area - intersect_area)
  449. return iou
  450. def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
  451. """Return yolo_loss tensor
  452. Parameters
  453. ----------
  454. yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
  455. y_true: list of array, the output of preprocess_true_boxes
  456. anchors: array, shape=(N, 2), wh
  457. num_classes: integer
  458. ignore_thresh: float, the iou threshold whether to ignore object confidence loss
  459. Returns
  460. -------
  461. loss: tensor, shape=(1,)
  462. """
  463. num_layers = len(anchors)//3 # default setting
  464. yolo_outputs = args[:num_layers]
  465. y_true = args[num_layers:]
  466. anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
  467. input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
  468. grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
  469. loss = 0
  470. m = K.shape(yolo_outputs[0])[0] # batch size, tensor
  471. mf = K.cast(m, K.dtype(yolo_outputs[0]))
  472. for l in range(num_layers):
  473. object_mask = y_true[l][..., 4:5]
  474. true_class_probs = y_true[l][..., 5:]
  475. grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
  476. anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
  477. pred_box = K.concatenate([pred_xy, pred_wh])
  478. # Darknet raw box to calculate loss.
  479. raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
  480. raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
  481. raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
  482. box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]
  483. # Find ignore mask, iterate over each of batch.
  484. ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
  485. object_mask_bool = K.cast(object_mask, 'bool')
  486. def loop_body(b, ignore_mask):
  487. true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
  488. iou = box_iou(pred_box[b], true_box)
  489. best_iou = K.max(iou, axis=-1)
  490. ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
  491. return b+1, ignore_mask
  492. # keras2.6.0 and keras2.1.5
  493. # _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
  494. _, ignore_mask = tf.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
  495. ignore_mask = ignore_mask.stack()
  496. ignore_mask = K.expand_dims(ignore_mask, -1)
  497. # K.binary_crossentropy is helpful to avoid exp overflow.
  498. xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)
  499. wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])
  500. confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
  501. (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
  502. class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)
  503. xy_loss = K.sum(xy_loss) / mf
  504. wh_loss = K.sum(wh_loss) / mf
  505. confidence_loss = K.sum(confidence_loss) / mf
  506. class_loss = K.sum(class_loss) / mf
  507. loss += xy_loss + wh_loss + confidence_loss + class_loss
  508. # loss += (xy_loss + wh_loss + confidence_loss) * 2
  509. # loss += xy_loss + confidence_loss + 2*wh_loss
  510. # loss += xy_loss * 10 + wh_loss * 10 + confidence_loss
  511. if print_loss:
  512. loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')
  513. return loss
  514. def new_yolo_loss(input_shape, anchors, num_classes, ignore_thresh=.8, print_loss=False):
  515. """Return yolo_loss tensor
  516. """
  517. def yolo_loss_fixed(y_true, y_pred):
  518. num_layers = len(anchors)//3 # default setting
  519. yolo_outputs = y_pred
  520. print("y_true.shape", y_true.shape)
  521. print("y_pred.shape", y_pred.shape)
  522. anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
  523. input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
  524. grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
  525. loss = 0
  526. m = K.shape(yolo_outputs[0])[0] # batch size, tensor
  527. mf = K.cast(m, K.dtype(yolo_outputs[0]))
  528. for l in range(num_layers):
  529. object_mask = y_true[l][..., 4:5]
  530. true_class_probs = y_true[l][..., 5:]
  531. grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
  532. anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
  533. pred_box = K.concatenate([pred_xy, pred_wh])
  534. # Darknet raw box to calculate loss.
  535. raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
  536. raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
  537. raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
  538. box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]
  539. # Find ignore mask, iterate over each of batch.
  540. ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
  541. object_mask_bool = K.cast(object_mask, 'bool')
  542. def loop_body(b, ignore_mask):
  543. true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
  544. iou = box_iou(pred_box[b], true_box)
  545. best_iou = K.max(iou, axis=-1)
  546. ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
  547. return b+1, ignore_mask
  548. _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
  549. ignore_mask = ignore_mask.stack()
  550. ignore_mask = K.expand_dims(ignore_mask, -1)
  551. # K.binary_crossentropy is helpful to avoid exp overflow.
  552. xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)
  553. wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])
  554. confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
  555. (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
  556. class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)
  557. xy_loss = K.sum(xy_loss) / mf
  558. wh_loss = K.sum(wh_loss) / mf
  559. confidence_loss = K.sum(confidence_loss) / mf
  560. class_loss = K.sum(class_loss) / mf
  561. # loss += xy_loss + wh_loss + confidence_loss + class_loss
  562. loss += (xy_loss + wh_loss + confidence_loss) * 2
  563. if print_loss:
  564. loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')
  565. return loss
  566. # h, w = input_shape
  567. # y_true = [Input(shape=(h//{0: 32, 1: 16}[l], w//{0: 32, 1: 16}[l],
  568. # len(anchors)//2, num_classes+5)) for l in range(2)]
  569. return yolo_loss_fixed