import os import random import sys from glob import glob os.environ["CUDA_VISIBLE_DEVICES"] = "0" import tensorflow as tf tf.compat.v1.disable_eager_execution() sys.path.append(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../") from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint from keras.losses import BinaryCrossentropy, mse, CategoricalCrossentropy, MSE from keras.optimizer_v2.adam import Adam from click_captcha.metrics import precision, recall, f1 from click_captcha.loss import focal_loss, contrastive_loss, l2_focal_loss, l1_focal_loss from click_captcha.model import siamese_net, mobile_net, cnn_net, cnn_net_small, cnn_net_tiny, cnn_net_tiny_dropout from click_captcha.pre_process import gen_char PRETRAINED = False random.seed(42) image_shape = (40, 40, 1) project_root = os.path.dirname(os.path.abspath(__file__)) + "/../" class_num = 5649 data_path = 'click_simple' if __name__ == "__main__": model = cnn_net_small(input_shape=image_shape, output_shape=class_num) if PRETRAINED: _path = "./models/e130-acc0.87-char.h5" model.load_weights(_path, skip_mismatch=True, by_name=True) print("read pretrained model", _path) else: print("no pretrained") # with open(project_root + "data/click/map.txt", "r") as f: # paths = f.readlines() # print("len(paths)", len(paths)) paths = glob("../data/" + data_path + "/*.jpg") # data path split into train,test random.shuffle(paths) # paths = paths[:100000] trainP = paths[:int(len(paths)*0.9)] testP = paths[int(len(paths)*0.9):] print('total:', len(paths), 'train:', len(trainP), 'test:', len(testP)) # batch num batch_size = 32 steps_per_epoch = max(1, len(trainP) // batch_size) validation_steps = max(1, len(testP) // batch_size) # 模型权重存放位置 filepath = 'models/e{epoch:02d}-acc{val_acc:.2f}-char.h5' check_pointer = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=0, save_weights_only=True, save_best_only=True, mode="max", save_freq='epoch') rlu = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=10, verbose=1, mode='max', cooldown=0, min_lr=0) model.compile(optimizer=Adam(lr=0.0003), loss=CategoricalCrossentropy(), metrics=['acc', f1]) # data loader train_loader = gen_char(trainP, batch_size=batch_size, shape=image_shape, cls_num=class_num, data_path=data_path) test_loader = gen_char(testP, batch_size=batch_size, shape=image_shape, cls_num=class_num, data_path=data_path) # train model.fit(train_loader, steps_per_epoch=steps_per_epoch, callbacks=[check_pointer, rlu], validation_data=test_loader, validation_steps=validation_steps, epochs=1000, max_queue_size=1000, use_multiprocessing=True, workers=10) # model.fit_generator(train_loader, # steps_per_epoch=steps_per_epoch, # callbacks=[check_pointer, rlu], # validation_data=test_loader, # validation_steps=validation_steps, # epochs=1000, # max_queue_size=1000, # use_multiprocessing=True, # workers=8)