det_train.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. # -*- coding: utf-8 -*-
  2. # @Time : 2020/5/19 21:44
  3. # @Author : xiangjing
  4. import os
  5. import sys
  6. import pathlib
  7. # 将 torchocr路径加到python路径里
  8. __dir__ = pathlib.Path(os.path.abspath(__file__))
  9. sys.path.append(str(__dir__))
  10. sys.path.append(str(__dir__.parent.parent))
  11. import random
  12. import time
  13. import shutil
  14. import traceback
  15. from importlib import import_module
  16. import numpy as np
  17. import torch
  18. from tqdm import tqdm
  19. from torch import nn
  20. from torchocr.networks import build_model, build_loss
  21. from torchocr.postprocess import build_post_process
  22. from torchocr.datasets import build_dataloader
  23. from torchocr.utils import get_logger, weight_init, load_checkpoint, save_checkpoint
  24. from torchocr.metrics import DetMetric
  25. def parse_args():
  26. import argparse
  27. parser = argparse.ArgumentParser(description='train')
  28. parser.add_argument('--config', type=str, default='config/cfg_det_db.py', help='train config file path')
  29. args = parser.parse_args()
  30. # 解析.py文件
  31. config_path = os.path.abspath(os.path.expanduser(args.config))
  32. assert os.path.isfile(config_path)
  33. if config_path.endswith('.py'):
  34. module_name = os.path.basename(config_path)[:-3]
  35. config_dir = os.path.dirname(config_path)
  36. sys.path.insert(0, config_dir)
  37. mod = import_module(module_name)
  38. sys.path.pop(0)
  39. return mod.config
  40. # cfg_dict = {
  41. # name: value
  42. # for name, value in mod.__dict__.items()
  43. # if not name.startswith('__')
  44. # }
  45. # return cfg_dict
  46. else:
  47. raise IOError('Only py type are supported now!')
  48. def set_random_seed(seed, use_cuda=True, deterministic=False):
  49. """Set random seed.
  50. Args:
  51. seed (int): Seed to be used.
  52. use_cuda: whether depend on cuda
  53. deterministic (bool): Whether to set the deterministic option for
  54. CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
  55. to True and `torch.backends.cudnn.benchmark` to False.
  56. Default: False.
  57. """
  58. random.seed(seed)
  59. np.random.seed(seed)
  60. if use_cuda:
  61. torch.manual_seed(seed)
  62. torch.cuda.manual_seed_all(seed)
  63. if deterministic:
  64. torch.backends.cudnn.deterministic = True
  65. torch.backends.cudnn.benchmark = False
  66. def build_optimizer(params, config):
  67. """
  68. 优化器
  69. Returns:
  70. """
  71. from torch import optim
  72. opt_type = config.pop('type')
  73. opt = getattr(optim, opt_type)(params, **config)
  74. return opt
  75. def adjust_learning_rate(optimizer, base_lr, iter, all_iters, factor, warmup_iters=0, warmup_factor=1.0 / 3):
  76. """
  77. 带 warmup 的学习率衰减
  78. :param optimizer: 优化器
  79. :param base_lr: 开始的学习率
  80. :param iter: 当前迭代次数
  81. :param all_iters: 总的迭代次数
  82. :param factor: 学习率衰减系数
  83. :param warmup_iters: warmup 迭代数
  84. :param warmup_factor: warmup 系数
  85. :return:
  86. """
  87. """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
  88. if iter < warmup_iters:
  89. alpha = float(iter) / warmup_iters
  90. rate = warmup_factor * (1 - alpha) + alpha
  91. else:
  92. rate = np.power(1.0 - iter / float(all_iters + 1), factor)
  93. lr = rate * base_lr
  94. for param_group in optimizer.param_groups:
  95. param_group['lr'] = lr
  96. return lr
  97. def get_fine_tune_params(net, finetune_stage):
  98. """
  99. 获取需要优化的参数
  100. Args:
  101. net:
  102. Returns: 需要优化的参数
  103. """
  104. to_return_parameters = []
  105. for stage in finetune_stage:
  106. attr = getattr(net.module, stage, None)
  107. for element in attr.parameters():
  108. to_return_parameters.append(element)
  109. return to_return_parameters
  110. def evaluate(net, val_loader, to_use_device, logger, post_process, metric):
  111. """
  112. 在验证集上评估模型
  113. :param net: 网络
  114. :param val_loader: 验证集 dataloader
  115. :param to_use_device: device
  116. :param logger: logger类对象
  117. :param post_process: 后处理类对象
  118. :param metric: 根据网络输出和 label 计算 acc 等指标的类对象
  119. :return: 一个包含 eval_loss,eval_acc和 norm_edit_dis 的 dict,
  120. 例子: {
  121. 'recall':0,
  122. 'precision': 0.99,
  123. 'hmean': 0.9999,
  124. }
  125. """
  126. logger.info('start evaluate')
  127. net.eval()
  128. raw_metrics = []
  129. total_frame = 0.0
  130. total_time = 0.0
  131. with torch.no_grad():
  132. for batch_data in tqdm(val_loader):
  133. start = time.time()
  134. output = net.forward(batch_data['img'].to(to_use_device))
  135. boxes, scores = post_process(output.cpu().numpy(), batch_data['shape'])
  136. total_frame += batch_data['img'].size()[0]
  137. total_time += time.time() - start
  138. raw_metric = metric(batch_data, (boxes, scores))
  139. raw_metrics.append(raw_metric)
  140. metrics = metric.gather_measure(raw_metrics)
  141. net.train()
  142. result_dict = {'recall': metrics['recall'].avg, 'precision': metrics['precision'].avg,
  143. 'hmean': metrics['fmeasure'].avg}
  144. for k, v in result_dict.items():
  145. logger.info(f'{k}:{v}')
  146. logger.info('FPS:{}'.format(total_frame / total_time))
  147. return result_dict
  148. def train(net, optimizer, loss_func, train_loader, eval_loader, to_use_device,
  149. cfg, global_state, logger, post_process):
  150. """
  151. 训练函数
  152. :param net: 网络
  153. :param optimizer: 优化器
  154. :param scheduler: 学习率更新器
  155. :param loss_func: loss函数
  156. :param train_loader: 训练数据集 dataloader
  157. :param eval_loader: 验证数据集 dataloader
  158. :param to_use_device: device
  159. :param cfg: 当前训练所使用的配置
  160. :param global_state: 训练过程中的一些全局状态,如cur_epoch,cur_iter,最优模型的相关信息
  161. :param logger: logger 对象
  162. :param post_process: 后处理类对象
  163. :return: None
  164. """
  165. train_options = cfg.train_options
  166. metric = DetMetric()
  167. # ===>
  168. logger.info('Training...')
  169. # ===> print loss信息的参数
  170. all_step = len(train_loader)
  171. logger.info(f'train dataset has {train_loader.dataset.__len__()} samples,{all_step} in dataloader')
  172. logger.info(f'eval dataset has {eval_loader.dataset.__len__()} samples,{len(eval_loader)} in dataloader')
  173. if len(global_state) > 0:
  174. best_model = global_state['best_model']
  175. start_epoch = global_state['start_epoch']
  176. global_step = global_state['global_step']
  177. else:
  178. best_model = {'recall': 0, 'precision': 0, 'hmean': 0, 'best_model_epoch': 0}
  179. start_epoch = 0
  180. global_step = 0
  181. # 开始训练
  182. base_lr = cfg['optimizer']['lr']
  183. all_iters = all_step * train_options['epochs']
  184. warmup_iters = 3 * all_step
  185. try:
  186. for epoch in range(start_epoch, train_options['epochs']): # traverse each epoch
  187. net.train() # train mode
  188. train_loss = 0.
  189. start = time.time()
  190. for i, batch_data in enumerate(train_loader): # traverse each batch in the epoch
  191. current_lr = adjust_learning_rate(optimizer, base_lr, global_step, all_iters, 0.9,
  192. warmup_iters=warmup_iters)
  193. # 数据进行转换和丢到gpu
  194. for key, value in batch_data.items():
  195. if value is not None:
  196. if isinstance(value, torch.Tensor):
  197. batch_data[key] = value.to(to_use_device)
  198. # 清零梯度及反向传播
  199. optimizer.zero_grad()
  200. output = net.forward(batch_data['img'].to(to_use_device))
  201. loss_dict = loss_func(output, batch_data)
  202. loss_dict['loss'].backward()
  203. optimizer.step()
  204. # statistic loss for print
  205. train_loss += loss_dict['loss'].item()
  206. loss_str = 'loss: {:.4f} - '.format(loss_dict.pop('loss').item())
  207. for idx, (key, value) in enumerate(loss_dict.items()):
  208. loss_dict[key] = value.item()
  209. loss_str += '{}: {:.4f}'.format(key, loss_dict[key])
  210. if idx < len(loss_dict) - 1:
  211. loss_str += ' - '
  212. if (i + 1) % train_options['print_interval'] == 0:
  213. interval_batch_time = time.time() - start
  214. logger.info(f"[{epoch}/{train_options['epochs']}] - "
  215. f"[{i + 1}/{all_step}] - "
  216. f"lr:{current_lr} - "
  217. f"{loss_str} - "
  218. f"time:{interval_batch_time:.4f}")
  219. start = time.time()
  220. global_step += 1
  221. logger.info(f'train_loss: {train_loss / len(train_loader)}')
  222. if (epoch + 1) % train_options['val_interval'] == 0:
  223. global_state['start_epoch'] = epoch
  224. global_state['best_model'] = best_model
  225. global_state['global_step'] = global_step
  226. net_save_path = f"{train_options['checkpoint_save_dir']}/latest.pth"
  227. save_checkpoint(net_save_path, net, optimizer, logger, cfg, global_state=global_state)
  228. if train_options['ckpt_save_type'] == 'HighestAcc':
  229. # val
  230. eval_dict = evaluate(net, eval_loader, to_use_device, logger, post_process, metric)
  231. if eval_dict['hmean'] > best_model['hmean']:
  232. best_model.update(eval_dict)
  233. best_model['best_model_epoch'] = epoch
  234. best_model['models'] = net_save_path
  235. global_state['start_epoch'] = epoch
  236. global_state['best_model'] = best_model
  237. global_state['global_step'] = global_step
  238. net_save_path = f"{train_options['checkpoint_save_dir']}/best.pth"
  239. save_checkpoint(net_save_path, net, optimizer, logger, cfg, global_state=global_state)
  240. elif train_options['ckpt_save_type'] == 'FixedEpochStep' and epoch % train_options[
  241. 'ckpt_save_epoch'] == 0:
  242. shutil.copy(net_save_path, net_save_path.replace('latest.pth', f'{epoch}.pth'))
  243. best_str = 'current best, '
  244. for k, v in best_model.items():
  245. best_str += '{}: {}, '.format(k, v)
  246. logger.info(best_str)
  247. except KeyboardInterrupt:
  248. import os
  249. save_checkpoint(os.path.join(train_options['checkpoint_save_dir'], 'final.pth'), net, optimizer, logger, cfg,
  250. global_state=global_state)
  251. except:
  252. error_msg = traceback.format_exc()
  253. logger.error(error_msg)
  254. finally:
  255. for k, v in best_model.items():
  256. logger.info(f'{k}: {v}')
  257. def main():
  258. # ===> 获取配置文件参数
  259. cfg = parse_args()
  260. os.makedirs(cfg.train_options['checkpoint_save_dir'], exist_ok=True)
  261. logger = get_logger('torchocr', log_file=os.path.join(cfg.train_options['checkpoint_save_dir'], 'train.log'))
  262. # ===> 训练信息的打印
  263. train_options = cfg.train_options
  264. logger.info(cfg)
  265. # ===>
  266. to_use_device = torch.device(
  267. train_options['device'] if torch.cuda.is_available() and ('cuda' in train_options['device']) else 'cpu')
  268. set_random_seed(cfg['SEED'], 'cuda' in train_options['device'], deterministic=True)
  269. # ===> build network
  270. net = build_model(cfg['model'])
  271. # ===> 模型初始化及模型部署到对应的设备
  272. if not cfg['model']['backbone']['pretrained']: # 使用 pretrained
  273. net.apply(weight_init)
  274. # if torch.cuda.device_count() > 1:
  275. net = nn.DataParallel(net)
  276. net = net.to(to_use_device)
  277. net.train()
  278. # ===> get fine tune layers
  279. params_to_train = get_fine_tune_params(net, train_options['fine_tune_stage'])
  280. # ===> solver and lr scheduler
  281. optimizer = build_optimizer(net.parameters(), cfg['optimizer'])
  282. # ===> whether to resume from checkpoint
  283. resume_from = train_options['resume_from']
  284. if resume_from:
  285. net, _resumed_optimizer, global_state = load_checkpoint(net, resume_from, to_use_device, optimizer,
  286. third_name=train_options['third_party_name'])
  287. if _resumed_optimizer:
  288. optimizer = _resumed_optimizer
  289. logger.info(f'net resume from {resume_from}')
  290. else:
  291. global_state = {}
  292. logger.info(f'net resume from scratch.')
  293. # ===> loss function
  294. loss_func = build_loss(cfg['loss'])
  295. loss_func = loss_func.to(to_use_device)
  296. # ===> data loader
  297. train_loader = build_dataloader(cfg.dataset.train)
  298. eval_loader = build_dataloader(cfg.dataset.eval)
  299. # post_process
  300. post_process = build_post_process(cfg['post_process'])
  301. # ===> train
  302. train(net, optimizer, loss_func, train_loader, eval_loader, to_use_device, cfg, global_state, logger, post_process)
  303. if __name__ == '__main__':
  304. main()