1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798 |
- # -*- coding: utf-8 -*-
- """
- Created on Tue Jun 24 10:53:51 2022
- train
- @author: fangjiasheng
- """
- import copy
- import json
- import os
- import random
- import sys
- sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
- sys.path.append(os.path.dirname(os.path.abspath(__file__)))
- from click_captcha.utils import get_classes, get_anchors
- from click_captcha.model_260 import yolo_net
- os.environ["CUDA_VISIBLE_DEVICES"] = "1"
- # train need keras low version
- from tensorflow.keras.optimizers import Adam
- # from keras.optimizer_v2.adam import Adam
- # from keras.optimizers import Adam
- # from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
- from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
- from glob import glob
- from click_captcha.pre_process import gen_yolo_char
- from keras.metrics import mse
- PRETRAINED = False
- CHECKPOINT = False
- random.seed(42)
- if __name__ == '__main__':
- annotation_path = '../data/detect/map.txt'
- weight_path = 'models/e20-loss116.38.h5'
- log_dir = 'yolo_data/logs/000/'
- classes_path = 'yolo_data/my_classes.txt'
- anchors_path = 'yolo_data/my_anchors.txt'
- class_names = get_classes(classes_path)
- num_classes = len(class_names)
- anchors = get_anchors(anchors_path)
- # multiple of 32, hw
- input_shape = (160, 256)
- # default setting
- is_tiny_version = len(anchors) == 6
- model = yolo_net(input_shape, anchors, num_classes,
- load_pretrained=PRETRAINED,
- weights_path=weight_path)
- val_split = 0.1
- with open(annotation_path) as f:
- lines = f.readlines()
- random.shuffle(lines)
- # lines = lines[:100]
- num_val = int(len(lines)*val_split)
- num_train = len(lines) - num_val
- print('total:', len(lines), 'train:', num_train, 'test:', num_val)
- file_path = 'models/e{epoch:02d}-loss{val_loss:.2f}-char.h5'
- checkpoint = ModelCheckpoint(file_path, monitor='val_loss',
- save_weights_only=True, save_best_only=True, period=2)
- reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=1)
- # model.compile(optimizer=Adam(lr=0.003), loss={'yolo_loss': lambda y_true, y_pred: y_pred},
- # metrics=['acc', mse])
- # batch num
- batch_size = 32
- steps_per_epoch = max(1, num_train//batch_size)
- # data loader
- train_loader = gen_yolo_char(lines[:num_train], batch_size, input_shape, anchors, num_classes)
- test_loader = gen_yolo_char(lines[num_train:], batch_size, input_shape, anchors, num_classes)
- # Train with frozen layers first, to get a stable loss.
- # model.fit_generator(train_loader,
- # steps_per_epoch=steps_per_epoch,
- # callbacks=[reduce_lr],
- # validation_data=test_loader,
- # validation_steps=max(1, num_val // batch_size),
- # epochs=20,
- # max_queue_size=50)
- # Unfreeze and continue training, to fine-tune.
- for i in range(len(model.layers)):
- model.layers[i].trainable = True
- print('Unfreeze all of the layers.')
- model.compile(optimizer=Adam(lr=0.003), loss={'yolo_loss': lambda y_true, y_pred: y_pred},
- metrics=[mse])
- model.fit_generator(train_loader,
- steps_per_epoch=steps_per_epoch,
- validation_data=test_loader,
- validation_steps=max(1, num_val//batch_size),
- epochs=500,
- callbacks=[checkpoint, reduce_lr])
|