123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960 |
- import os
- import random
- import sys
- from glob import glob
- os.environ["CUDA_VISIBLE_DEVICES"] = "1"
- import tensorflow as tf
- # tf.compat.v1.disable_eager_execution()
- sys.path.append(os.path.dirname(os.path.abspath(__file__)))
- sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
- from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
- from keras.losses import BinaryCrossentropy, mse, CategoricalCrossentropy, MSE
- from keras.optimizer_v2.adam import Adam
- from click_captcha.metrics import precision, recall, f1
- from click_captcha.loss import focal_loss, contrastive_loss, l2_focal_loss, l1_focal_loss, perceptual_loss
- from click_captcha.model import u_net_denoise
- from click_captcha.pre_process import gen_equation, gen_equation2, gen_equation_denoise
- PRETRAINED = False
- random.seed(42)
- image_shape = (32, 192, 1)
- project_root = os.path.dirname(os.path.abspath(__file__)) + "/../"
- if __name__ == "__main__":
- model = u_net_denoise(input_shape=image_shape, class_num=image_shape[2])
- if PRETRAINED:
- _path = "./models/e130-acc0.87-char.h5"
- model.load_weights(_path, skip_mismatch=True, by_name=True)
- print("read pretrained model", _path)
- else:
- print("no pretrained")
- # batch num
- batch_size = 32
- # 模型权重存放位置
- filepath = 'models/e{epoch:02d}-loss{val_loss:.2f}-denoise.h5'
- check_pointer = ModelCheckpoint(filepath=filepath, monitor='val_loss', verbose=0,
- save_weights_only=True, save_best_only=True,
- mode="min", save_freq='epoch')
- rlu = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10,
- verbose=1, mode='min', cooldown=0, min_lr=0)
- model.compile(optimizer=Adam(lr=0.0003), loss=perceptual_loss(),
- metrics=['acc', precision, recall, f1])
- # data loader
- train_loader = gen_equation_denoise(None, batch_size=batch_size, shape=image_shape)
- test_loader = gen_equation_denoise(None, batch_size=batch_size, shape=image_shape)
- # train
- steps_per_epoch = 1000
- validation_steps = int(steps_per_epoch * 0.1)
- model.fit_generator(train_loader,
- steps_per_epoch=steps_per_epoch,
- callbacks=[check_pointer, rlu],
- validation_data=test_loader,
- validation_steps=validation_steps,
- epochs=1000,
- max_queue_size=100,
- use_multiprocessing=False)
|