1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768 |
- import os
- import random
- import sys
- from glob import glob
- os.environ["CUDA_VISIBLE_DEVICES"] = "0"
- sys.path.append(os.path.dirname(os.path.abspath(__file__)))
- sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
- from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
- from keras.losses import BinaryCrossentropy, mse, CategoricalCrossentropy
- from keras.metrics import Precision, Recall
- from keras.optimizer_v2.adam import Adam
- import keras.backend as K
- from click_captcha.metrics import precision, recall, f1
- from click_captcha.loss import focal_loss, contrastive_loss, l2_focal_loss, l1_focal_loss, l2_loss
- from click_captcha.model import siamese_net, mobile_net, cnn_net, cnn_net_drag, u_net_drag, lstm_phrase, text_cnn_phrase
- from click_captcha.pre_process import gen_mobile, gen_drag, gen_phrase
- PRETRAINED = False
- random.seed(42)
- project_root = os.path.dirname(os.path.abspath(__file__)) + "/../"
- vocabulary_len = 5792
- sequence_len = 6
- if __name__ == "__main__":
- model = text_cnn_phrase((sequence_len, vocabulary_len))
- if PRETRAINED:
- _path = "./models/e08-f10.86-phrase.h5"
- model.load_weights(_path, skip_mismatch=True, by_name=True)
- print("read pretrained model", _path)
- else:
- print("no pretrained")
- with open("../data/phrase/map2.txt", "r") as f:
- map_list = f.readlines()
- # data path split into train,test
- random.shuffle(map_list)
- testP = map_list[:int(len(map_list)*0.1)]
- trainP = map_list[int(len(map_list)*0.1):]
- print('total:', len(map_list), 'train:', len(trainP), 'test:', len(testP))
- # batch num
- batch_size = 64
- steps_per_epoch = max(1, len(trainP) // batch_size)
- validation_steps = max(1, len(testP) // batch_size)
- # 模型权重存放位置
- filepath = 'models/e{epoch:02d}-f1{val_f1:.2f}-phrase.h5'
- check_pointer = ModelCheckpoint(filepath=filepath, monitor='val_f1', verbose=0,
- save_weights_only=True, save_best_only=True,
- mode="max", save_freq='epoch')
- rlu = ReduceLROnPlateau(monitor='val_f1', factor=0.5, patience=10,
- verbose=1, mode='max', cooldown=0, min_lr=0)
- model.compile(optimizer=Adam(lr=0.003), loss=BinaryCrossentropy(),
- metrics=['acc', f1])
- # data loader
- train_loader = gen_phrase(trainP, batch_size=batch_size, shape=(vocabulary_len, sequence_len))
- test_loader = gen_phrase(testP, batch_size=batch_size, shape=(vocabulary_len, sequence_len))
- # train
- model.fit_generator(train_loader,
- steps_per_epoch=steps_per_epoch,
- callbacks=[check_pointer],
- validation_data=test_loader,
- validation_steps=validation_steps,
- epochs=1000)
|