3 Commits fccc997e45 ... 80d5ca4829

Autor SHA1 Nachricht Datum
  znj 80d5ca4829 OCR_pytorch模型更新 vor 1 Jahr
  znj 932a19a6e1 Merge branch 'master' of http://192.168.2.103:3000/fangjiasheng/FORMAT_CONVERSION_MAXCOMPUTE vor 1 Jahr
  znj 1fae4ceaba merge vor 1 Jahr

+ 17 - 0
format_convert/utils.py

@@ -38,6 +38,23 @@ if get_platform() == "Linux":
     import resource
 import math
 
+from shapely.geometry import Polygon
+
+def has_intersection(poly1, poly2):
+    """
+    判断两个四边形是否有交集。
+    参数:
+        poly1, poly2: list of tuples, 每个tuple表示一个顶点的(x, y)坐标。
+        例如: [(x1, y1), (x2, y2), (x3, y3), (x4, y4)]
+    返回:
+        bool: 如果两个四边形有交集则返回True,否则返回False。
+    """
+    # 创建Shapely多边形对象
+    polygon1 = Polygon(poly1)
+    polygon2 = Polygon(poly2)
+
+    # 使用intersects方法判断是否有交集
+    return polygon1.intersects(polygon2)
 
 def judge_error_code(_list, code=[0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16]):
     """

BIN
ocr/model/pytorch/det/det.pth


BIN
ocr/model/pytorch/rec/rec.pth


+ 60 - 10
ocr/paddleocr.py

@@ -193,6 +193,47 @@ def parse_args(mMain=True, add_help=True):
         else:
             use_gpu = True
 
+        # return argparse.Namespace(
+        #     use_gpu=use_gpu,
+        #     ir_optim=True,
+        #     use_tensorrt=False,
+        #     gpu_mem=8000,
+        #     image_dir='',
+        #     det_algorithm='DB',
+        #     det_model_dir=None,
+        #     det_limit_side_len=1280,
+        #     det_limit_type='max',
+        #     det_db_thresh=0.1,
+        #     # det_db_box_thresh 漏行 调小
+        #     det_db_box_thresh=0.1,
+        #     # det_db_unclip_ratio 检测框的贴近程度
+        #     det_db_unclip_ratio=2.5,
+        #     # 对文字膨胀操作
+        #     use_dilation=False,
+        #     det_east_score_thresh=0.8,
+        #     det_east_cover_thresh=0.1,
+        #     det_east_nms_thresh=0.2,
+        #     rec_algorithm='CRNN',
+        #     rec_model_dir=None,
+        #     rec_image_shape="3, 32, 1000",
+        #     rec_char_type='ch',
+        #     rec_batch_num=30,
+        #     max_text_length=128,
+        #     rec_char_dict_path='ocr/ppocr/utils/ppocr_keys_v1.txt',
+        #     use_space_char=True,
+        #     drop_score=0.5,
+        #     cls_model_dir=None,
+        #     cls_image_shape="3, 32, 1000",
+        #     label_list=['0', '180'],
+        #     cls_batch_num=30,
+        #     cls_thresh=0.9,
+        #     enable_mkldnn=False,
+        #     use_zero_copy_run=True,
+        #     use_pdserving=False,
+        #     lang='ch',
+        #     det=True,
+        #     rec=True,
+        #     use_angle_cls=False)
         return argparse.Namespace(
             use_gpu=use_gpu,
             ir_optim=True,
@@ -203,11 +244,14 @@ def parse_args(mMain=True, add_help=True):
             det_model_dir=None,
             det_limit_side_len=1280,
             det_limit_type='max',
-            det_db_thresh=0.1,
+            # det_db_thresh=0.1,
+            det_db_thresh=0.2,# torch
             # det_db_box_thresh 漏行 调小
-            det_db_box_thresh=0.1,
+            # det_db_box_thresh=0.1,
+            det_db_box_thresh=0.5,# torch
             # det_db_unclip_ratio 检测框的贴近程度
-            det_db_unclip_ratio=2.5,
+            # det_db_unclip_ratio=2.5,
+            det_db_unclip_ratio=2.5,# torch
             # 对文字膨胀操作
             use_dilation=False,
             det_east_score_thresh=0.8,
@@ -219,8 +263,10 @@ def parse_args(mMain=True, add_help=True):
             rec_char_type='ch',
             rec_batch_num=30,
             max_text_length=128,
-            rec_char_dict_path='ocr/ppocr/utils/ppocr_keys_v1.txt',
-            use_space_char=True,
+            # rec_char_dict_path='ocr/ppocr/utils/ppocr_keys_v1.txt',
+            # use_space_char=True,
+            rec_char_dict_path='ocr/ppocr/utils/char_std_7551.txt',
+            use_space_char=False,
             drop_score=0.5,
             cls_model_dir=None,
             cls_image_shape="3, 32, 1000",
@@ -256,20 +302,24 @@ class PaddleOCR(predict_system.TextSystem):
 
         # init model dir
         if postprocess_params.det_model_dir is None:
+            # postprocess_params.det_model_dir = os.path.join(
+            #     BASE_DIR, '{}/det'.format(VERSION))
             postprocess_params.det_model_dir = os.path.join(
-                BASE_DIR, '{}/det'.format(VERSION))
+                BASE_DIR, 'pytorch/det/det.pth') # torch
         if postprocess_params.rec_model_dir is None:
+            # postprocess_params.rec_model_dir = os.path.join(
+            #     BASE_DIR, '{}/rec/{}'.format(VERSION, lang))
             postprocess_params.rec_model_dir = os.path.join(
-                BASE_DIR, '{}/rec/{}'.format(VERSION, lang))
+                BASE_DIR, 'pytorch/rec/rec.pth')# torch
         if postprocess_params.cls_model_dir is None:
             postprocess_params.cls_model_dir = os.path.join(
                 BASE_DIR, '{}/cls'.format(VERSION))
         logger.info(postprocess_params)
 
         # download model
-        maybe_download(postprocess_params.det_model_dir, model_urls['det'])
-        maybe_download(postprocess_params.rec_model_dir,
-                       model_urls['rec'][lang]['url'])
+        # maybe_download(postprocess_params.det_model_dir, model_urls['det'])
+        # maybe_download(postprocess_params.rec_model_dir,
+        #                model_urls['rec'][lang]['url'])
         maybe_download(postprocess_params.cls_model_dir, model_urls['cls'])
 
         if postprocess_params.det_algorithm not in SUPPORT_DET_MODEL:

+ 23 - 1
ocr/ppocr/postprocess/db_postprocess.py

@@ -69,7 +69,8 @@ class DBPostProcess(object):
             if sside < self.min_size:
                 continue
             points = np.array(points)
-            score = self.box_score_fast(pred, points.reshape(-1, 2))
+            # score = self.box_score_fast(pred, points.reshape(-1, 2)) # fast 近似计算得分
+            score = self.box_score_slow(pred, contour) # slow
             if self.box_thresh > score:
                 continue
 
@@ -133,6 +134,27 @@ class DBPostProcess(object):
         cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
         return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
 
+    def box_score_slow(self, bitmap, contour):
+        '''
+        box_score_slow: use polyon mean score as the mean score
+        '''
+        h, w = bitmap.shape[:2]
+        contour = contour.copy()
+        contour = np.reshape(contour, (-1, 2))
+
+        xmin = np.clip(np.min(contour[:, 0]), 0, w - 1)
+        xmax = np.clip(np.max(contour[:, 0]), 0, w - 1)
+        ymin = np.clip(np.min(contour[:, 1]), 0, h - 1)
+        ymax = np.clip(np.max(contour[:, 1]), 0, h - 1)
+
+        mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
+
+        contour[:, 0] = contour[:, 0] - xmin
+        contour[:, 1] = contour[:, 1] - ymin
+
+        cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype(np.int32), 1)
+        return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
+
     def __call__(self, outs_dict, shape_list):
         pred = outs_dict['maps']
         if isinstance(pred, paddle.Tensor):

+ 7551 - 0
ocr/ppocr/utils/char_std_7551.txt

@@ -0,0 +1,7551 @@
+'
+疗
+绚
+诚
+娇
+溜
+题
+贿
+者
+廖
+更
+纳
+加
+奉
+公
+一
+就
+汴
+计
+与
+路
+房
+原
+妇
+2
+0
+8
+-
+7
+其
+>
+:
+]
+,
+,
+骑
+刈
+全
+消
+昏
+傈
+安
+久
+钟
+嗅
+不
+影
+处
+驽
+蜿
+资
+关
+椤
+地
+瘸
+专
+问
+忖
+票
+嫉
+炎
+韵
+要
+月
+田
+节
+陂
+鄙
+捌
+备
+拳
+伺
+眼
+网
+盎
+大
+傍
+心
+东
+愉
+汇
+蹿
+科
+每
+业
+里
+航
+晏
+字
+平
+录
+先
+1
+3
+彤
+鲶
+产
+稍
+督
+腴
+有
+象
+岳
+注
+绍
+在
+泺
+文
+定
+核
+名
+水
+过
+理
+让
+偷
+率
+等
+这
+发
+”
+为
+含
+肥
+酉
+相
+鄱
+七
+编
+猥
+锛
+日
+镀
+蒂
+掰
+倒
+辆
+栾
+栗
+综
+涩
+州
+雌
+滑
+馀
+了
+机
+块
+司
+宰
+甙
+兴
+矽
+抚
+保
+用
+沧
+秩
+如
+收
+息
+滥
+页
+疑
+埠
+!
+!
+姥
+异
+橹
+钇
+向
+下
+跄
+的
+椴
+沫
+国
+绥
+獠
+报
+开
+民
+蜇
+何
+分
+凇
+长
+讥
+藏
+掏
+施
+羽
+中
+讲
+派
+嘟
+人
+提
+浼
+间
+世
+而
+古
+多
+倪
+唇
+饯
+控
+庚
+首
+赛
+蜓
+味
+断
+制
+觉
+技
+替
+艰
+溢
+潮
+夕
+钺
+外
+摘
+枋
+动
+双
+单
+啮
+户
+枇
+确
+锦
+曜
+杜
+或
+能
+效
+霜
+盒
+然
+侗
+电
+晁
+放
+步
+鹃
+新
+杖
+蜂
+吒
+濂
+瞬
+评
+总
+隍
+对
+独
+合
+也
+是
+府
+青
+天
+诲
+墙
+组
+滴
+级
+邀
+帘
+示
+已
+时
+骸
+仄
+泅
+和
+遨
+店
+雇
+疫
+持
+巍
+踮
+境
+只
+亨
+目
+鉴
+崤
+闲
+体
+泄
+杂
+作
+般
+轰
+化
+解
+迂
+诿
+蛭
+璀
+腾
+告
+版
+服
+省
+师
+小
+规
+程
+线
+海
+办
+引
+二
+桧
+牌
+砺
+洄
+裴
+修
+图
+痫
+胡
+许
+犊
+事
+郛
+基
+柴
+呼
+食
+研
+奶
+律
+蛋
+因
+葆
+察
+戏
+褒
+戒
+再
+李
+骁
+工
+貂
+油
+鹅
+章
+啄
+休
+场
+给
+睡
+纷
+豆
+器
+捎
+说
+敏
+学
+会
+浒
+设
+诊
+格
+廓
+查
+来
+霓
+室
+溆
+¢
+诡
+寥
+焕
+舜
+柒
+狐
+回
+戟
+砾
+厄
+实
+翩
+尿
+五
+入
+径
+惭
+喹
+股
+宇
+篝
+|
+;
+美
+期
+云
+九
+祺
+扮
+靠
+锝
+槌
+系
+企
+酰
+阊
+暂
+蚕
+忻
+豁
+本
+羹
+执
+条
+钦
+H
+獒
+限
+进
+季
+楦
+于
+芘
+玖
+铋
+茯
+未
+答
+粘
+括
+样
+精
+欠
+矢
+甥
+帷
+嵩
+扣
+令
+仔
+风
+皈
+行
+支
+部
+蓉
+刮
+站
+蜡
+救
+钊
+汗
+松
+嫌
+成
+可
+.
+鹤
+院
+从
+交
+政
+怕
+活
+调
+球
+局
+验
+髌
+第
+韫
+谗
+串
+到
+圆
+年
+米
+/
+*
+友
+忿
+检
+区
+看
+自
+敢
+刃
+个
+兹
+弄
+流
+留
+同
+没
+齿
+星
+聆
+轼
+湖
+什
+三
+建
+蛔
+儿
+椋
+汕
+震
+颧
+鲤
+跟
+力
+情
+璺
+铨
+陪
+务
+指
+族
+训
+滦
+鄣
+濮
+扒
+商
+箱
+十
+召
+慷
+辗
+所
+莞
+管
+护
+臭
+横
+硒
+嗓
+接
+侦
+六
+露
+党
+馋
+驾
+剖
+高
+侬
+妪
+幂
+猗
+绺
+骐
+央
+酐
+孝
+筝
+课
+徇
+缰
+门
+男
+西
+项
+句
+谙
+瞒
+秃
+篇
+教
+碲
+罚
+声
+呐
+景
+前
+富
+嘴
+鳌
+稀
+免
+朋
+啬
+睐
+去
+赈
+鱼
+住
+肩
+愕
+速
+旁
+波
+厅
+健
+茼
+厥
+鲟
+谅
+投
+攸
+炔
+数
+方
+击
+呋
+谈
+绩
+别
+愫
+僚
+躬
+鹧
+胪
+炳
+招
+喇
+膨
+泵
+蹦
+毛
+结
+5
+4
+谱
+识
+陕
+粽
+婚
+拟
+构
+且
+搜
+任
+潘
+比
+郢
+妨
+醪
+陀
+桔
+碘
+扎
+选
+哈
+骷
+楷
+亿
+明
+缆
+脯
+监
+睫
+逻
+婵
+共
+赴
+淝
+凡
+惦
+及
+达
+揖
+谩
+澹
+减
+焰
+蛹
+番
+祁
+柏
+员
+禄
+怡
+峤
+龙
+白
+叽
+生
+闯
+起
+细
+装
+谕
+竟
+聚
+钙
+上
+导
+渊
+按
+艾
+辘
+挡
+耒
+盹
+饪
+臀
+记
+邮
+蕙
+受
+各
+医
+搂
+普
+滇
+朗
+茸
+带
+翻
+酚
+(
+光
+堤
+墟
+蔷
+万
+幻
+瑙
+辈
+昧
+盏
+亘
+蛀
+吉
+铰
+请
+子
+假
+闻
+税
+井
+诩
+哨
+嫂
+好
+面
+琐
+校
+馊
+鬣
+缂
+营
+访
+炖
+占
+农
+缀
+否
+经
+钚
+棵
+趟
+张
+亟
+吏
+茶
+谨
+捻
+论
+迸
+堂
+玉
+信
+吧
+瞠
+乡
+姬
+寺
+咬
+溏
+苄
+皿
+意
+赉
+宝
+尔
+钰
+艺
+特
+唳
+踉
+都
+荣
+倚
+登
+荐
+丧
+奇
+涵
+批
+炭
+近
+符
+傩
+感
+道
+着
+菊
+虹
+仲
+众
+懈
+濯
+颞
+眺
+南
+释
+北
+缝
+标
+既
+茗
+整
+撼
+迤
+贲
+挎
+耱
+拒
+某
+妍
+卫
+哇
+英
+矶
+藩
+治
+他
+元
+领
+膜
+遮
+穗
+蛾
+飞
+荒
+棺
+劫
+么
+市
+火
+温
+拈
+棚
+洼
+转
+果
+奕
+卸
+迪
+伸
+泳
+斗
+邡
+侄
+涨
+屯
+萋
+胭
+氡
+崮
+枞
+惧
+冒
+彩
+斜
+手
+豚
+随
+旭
+淑
+妞
+形
+菌
+吲
+沱
+争
+驯
+歹
+挟
+兆
+柱
+传
+至
+包
+内
+响
+临
+红
+功
+弩
+衡
+寂
+禁
+老
+棍
+耆
+渍
+织
+害
+氵
+渑
+布
+载
+靥
+嗬
+虽
+苹
+咨
+娄
+库
+雉
+榜
+帜
+嘲
+套
+瑚
+亲
+簸
+欧
+边
+6
+腿
+旮
+抛
+吹
+瞳
+得
+镓
+梗
+厨
+继
+漾
+愣
+憨
+士
+策
+窑
+抑
+躯
+襟
+脏
+参
+贸
+言
+干
+绸
+鳄
+穷
+藜
+音
+折
+详
+)
+举
+悍
+甸
+癌
+黎
+谴
+死
+罩
+迁
+寒
+驷
+袖
+媒
+蒋
+掘
+模
+纠
+恣
+观
+祖
+蛆
+碍
+位
+稿
+主
+澧
+跌
+筏
+京
+锏
+帝
+贴
+证
+糠
+才
+黄
+鲸
+略
+炯
+饱
+四
+出
+园
+犀
+牧
+容
+汉
+杆
+浈
+汰
+瑷
+造
+虫
+瘩
+怪
+驴
+济
+应
+花
+沣
+谔
+夙
+旅
+价
+矿
+以
+考
+s
+u
+呦
+晒
+巡
+茅
+准
+肟
+瓴
+詹
+仟
+褂
+译
+桌
+混
+宁
+怦
+郑
+抿
+些
+余
+鄂
+饴
+攒
+珑
+群
+阖
+岔
+琨
+藓
+预
+环
+洮
+岌
+宀
+杲
+瀵
+最
+常
+囡
+周
+踊
+女
+鼓
+袭
+喉
+简
+范
+薯
+遐
+疏
+粱
+黜
+禧
+法
+箔
+斤
+遥
+汝
+奥
+直
+贞
+撑
+置
+绱
+集
+她
+馅
+逗
+钧
+橱
+魉
+[
+恙
+躁
+唤
+9
+旺
+膘
+待
+脾
+惫
+购
+吗
+依
+盲
+度
+瘿
+蠖
+俾
+之
+镗
+拇
+鲵
+厝
+簧
+续
+款
+展
+啃
+表
+剔
+品
+钻
+腭
+损
+清
+锶
+统
+涌
+寸
+滨
+贪
+链
+吠
+冈
+伎
+迥
+咏
+吁
+览
+防
+迅
+失
+汾
+阔
+逵
+绀
+蔑
+列
+川
+凭
+努
+熨
+揪
+利
+俱
+绉
+抢
+鸨
+我
+即
+责
+膦
+易
+毓
+鹊
+刹
+玷
+岿
+空
+嘞
+绊
+排
+术
+估
+锷
+违
+们
+苟
+铜
+播
+肘
+件
+烫
+审
+鲂
+广
+像
+铌
+惰
+铟
+巳
+胍
+鲍
+康
+憧
+色
+恢
+想
+拷
+尤
+疳
+知
+S
+Y
+F
+D
+A
+峄
+裕
+帮
+握
+搔
+氐
+氘
+难
+墒
+沮
+雨
+叁
+缥
+悴
+藐
+湫
+娟
+苑
+稠
+颛
+簇
+后
+阕
+闭
+蕤
+缚
+怎
+佞
+码
+嘤
+蔡
+痊
+舱
+螯
+帕
+赫
+昵
+升
+烬
+岫
+、
+疵
+蜻
+髁
+蕨
+隶
+烛
+械
+丑
+盂
+梁
+强
+鲛
+由
+拘
+揉
+劭
+龟
+撤
+钩
+呕
+孛
+费
+妻
+漂
+求
+阑
+崖
+秤
+甘
+通
+深
+补
+赃
+坎
+床
+啪
+承
+吼
+量
+暇
+钼
+烨
+阂
+擎
+脱
+逮
+称
+P
+神
+属
+矗
+华
+届
+狍
+葑
+汹
+育
+患
+窒
+蛰
+佼
+静
+槎
+运
+鳗
+庆
+逝
+曼
+疱
+克
+代
+官
+此
+麸
+耧
+蚌
+晟
+例
+础
+榛
+副
+测
+唰
+缢
+迹
+灬
+霁
+身
+岁
+赭
+扛
+又
+菡
+乜
+雾
+板
+读
+陷
+徉
+贯
+郁
+虑
+变
+钓
+菜
+圾
+现
+琢
+式
+乐
+维
+渔
+浜
+左
+吾
+脑
+钡
+警
+T
+啵
+拴
+偌
+漱
+湿
+硕
+止
+骼
+魄
+积
+燥
+联
+踢
+玛
+则
+窿
+见
+振
+畿
+送
+班
+钽
+您
+赵
+刨
+印
+讨
+踝
+籍
+谡
+舌
+崧
+汽
+蔽
+沪
+酥
+绒
+怖
+财
+帖
+肱
+私
+莎
+勋
+羔
+霸
+励
+哼
+帐
+将
+帅
+渠
+纪
+婴
+娩
+岭
+厘
+滕
+吻
+伤
+坝
+冠
+戊
+隆
+瘁
+介
+涧
+物
+黍
+并
+姗
+奢
+蹑
+掣
+垸
+锴
+命
+箍
+捉
+病
+辖
+琰
+眭
+迩
+艘
+绌
+繁
+寅
+若
+毋
+思
+诉
+类
+诈
+燮
+轲
+酮
+狂
+重
+反
+职
+筱
+县
+委
+磕
+绣
+奖
+晋
+濉
+志
+徽
+肠
+呈
+獐
+坻
+口
+片
+碰
+几
+村
+柿
+劳
+料
+获
+亩
+惕
+晕
+厌
+号
+罢
+池
+正
+鏖
+煨
+家
+棕
+复
+尝
+懋
+蜥
+锅
+岛
+扰
+队
+坠
+瘾
+钬
+@
+卧
+疣
+镇
+譬
+冰
+彷
+频
+黯
+据
+垄
+采
+八
+缪
+瘫
+型
+熹
+砰
+楠
+襁
+箐
+但
+嘶
+绳
+啤
+拍
+盥
+穆
+傲
+洗
+盯
+塘
+怔
+筛
+丿
+台
+恒
+喂
+葛
+永
+¥
+烟
+酒
+桦
+书
+砂
+蚝
+缉
+态
+瀚
+袄
+圳
+轻
+蛛
+超
+榧
+遛
+姒
+奘
+铮
+右
+荽
+望
+偻
+卡
+丶
+氰
+附
+做
+革
+索
+戚
+坨
+桷
+唁
+垅
+榻
+岐
+偎
+坛
+莨
+山
+殊
+微
+骇
+陈
+爨
+推
+嗝
+驹
+澡
+藁
+呤
+卤
+嘻
+糅
+逛
+侵
+郓
+酌
+德
+摇
+※
+鬃
+被
+慨
+殡
+羸
+昌
+泡
+戛
+鞋
+河
+宪
+沿
+玲
+鲨
+翅
+哽
+源
+铅
+语
+照
+邯
+址
+荃
+佬
+顺
+鸳
+町
+霭
+睾
+瓢
+夸
+椁
+晓
+酿
+痈
+咔
+侏
+券
+噎
+湍
+签
+嚷
+离
+午
+尚
+社
+锤
+背
+孟
+使
+浪
+缦
+潍
+鞅
+军
+姹
+驶
+笑
+鳟
+鲁
+》
+孽
+钜
+绿
+洱
+礴
+焯
+椰
+颖
+囔
+乌
+孔
+巴
+互
+性
+椽
+哞
+聘
+昨
+早
+暮
+胶
+炀
+隧
+低
+彗
+昝
+铁
+呓
+氽
+藉
+喔
+癖
+瑗
+姨
+权
+胱
+韦
+堑
+蜜
+酋
+楝
+砝
+毁
+靓
+歙
+锲
+究
+屋
+喳
+骨
+辨
+碑
+武
+鸠
+宫
+辜
+烊
+适
+坡
+殃
+培
+佩
+供
+走
+蜈
+迟
+翼
+况
+姣
+凛
+浔
+吃
+飘
+债
+犟
+金
+促
+苛
+崇
+坂
+莳
+畔
+绂
+兵
+蠕
+斋
+根
+砍
+亢
+欢
+恬
+崔
+剁
+餐
+榫
+快
+扶
+濒
+缠
+鳜
+当
+彭
+驭
+浦
+篮
+昀
+锆
+秸
+钳
+弋
+娣
+瞑
+夷
+龛
+苫
+拱
+致
+%
+嵊
+障
+隐
+弑
+初
+娓
+抉
+汩
+累
+蓖
+"
+唬
+助
+苓
+昙
+押
+毙
+破
+城
+郧
+逢
+嚏
+獭
+瞻
+溱
+婿
+赊
+跨
+恼
+璧
+萃
+姻
+貉
+灵
+炉
+密
+氛
+陶
+砸
+谬
+衔
+点
+琛
+沛
+枳
+层
+岱
+诺
+脍
+榈
+埂
+征
+冷
+裁
+打
+蹴
+素
+瘘
+逞
+蛐
+聊
+激
+腱
+萘
+踵
+飒
+蓟
+吆
+取
+咙
+簋
+涓
+矩
+曝
+挺
+揣
+座
+你
+史
+舵
+焱
+尘
+苏
+笈
+脚
+溉
+榨
+诵
+樊
+邓
+焊
+义
+庶
+儋
+蟋
+蒲
+赦
+呷
+杞
+诠
+豪
+还
+试
+颓
+茉
+太
+除
+紫
+逃
+痴
+草
+充
+鳕
+珉
+祗
+墨
+渭
+烩
+蘸
+慕
+璇
+镶
+穴
+嵘
+恶
+骂
+险
+绋
+幕
+碉
+肺
+戳
+刘
+潞
+秣
+纾
+潜
+銮
+洛
+须
+罘
+销
+瘪
+汞
+兮
+屉
+r
+林
+厕
+质
+探
+划
+狸
+殚
+善
+煊
+烹
+〒
+锈
+逯
+宸
+辍
+泱
+柚
+袍
+远
+蹋
+嶙
+绝
+峥
+娥
+缍
+雀
+徵
+认
+镱
+谷
+=
+贩
+勉
+撩
+鄯
+斐
+洋
+非
+祚
+泾
+诒
+饿
+撬
+威
+晷
+搭
+芍
+锥
+笺
+蓦
+候
+琊
+档
+礁
+沼
+卵
+荠
+忑
+朝
+凹
+瑞
+头
+仪
+弧
+孵
+畏
+铆
+突
+衲
+车
+浩
+气
+茂
+悖
+厢
+枕
+酝
+戴
+湾
+邹
+飚
+攘
+锂
+写
+宵
+翁
+岷
+无
+喜
+丈
+挑
+嗟
+绛
+殉
+议
+槽
+具
+醇
+淞
+笃
+郴
+阅
+饼
+底
+壕
+砚
+弈
+询
+缕
+庹
+翟
+零
+筷
+暨
+舟
+闺
+甯
+撞
+麂
+茌
+蔼
+很
+珲
+捕
+棠
+角
+阉
+媛
+娲
+诽
+剿
+尉
+爵
+睬
+韩
+诰
+匣
+危
+糍
+镯
+立
+浏
+阳
+少
+盆
+舔
+擘
+匪
+申
+尬
+铣
+旯
+抖
+赘
+瓯
+居
+哮
+游
+锭
+茏
+歌
+坏
+甚
+秒
+舞
+沙
+仗
+劲
+潺
+阿
+燧
+郭
+嗖
+霏
+忠
+材
+奂
+耐
+跺
+砀
+输
+岖
+媳
+氟
+极
+摆
+灿
+今
+扔
+腻
+枝
+奎
+药
+熄
+吨
+话
+q
+额
+慑
+嘌
+协
+喀
+壳
+埭
+视
+著
+於
+愧
+陲
+翌
+峁
+颅
+佛
+腹
+聋
+侯
+咎
+叟
+秀
+颇
+存
+较
+罪
+哄
+岗
+扫
+栏
+钾
+羌
+己
+璨
+枭
+霉
+煌
+涸
+衿
+键
+镝
+益
+岢
+奏
+连
+夯
+睿
+冥
+均
+糖
+狞
+蹊
+稻
+爸
+刿
+胥
+煜
+丽
+肿
+璃
+掸
+跚
+灾
+垂
+樾
+濑
+乎
+莲
+窄
+犹
+撮
+战
+馄
+软
+络
+显
+鸢
+胸
+宾
+妲
+恕
+埔
+蝌
+份
+遇
+巧
+瞟
+粒
+恰
+剥
+桡
+博
+讯
+凯
+堇
+阶
+滤
+卖
+斌
+骚
+彬
+兑
+磺
+樱
+舷
+两
+娱
+福
+仃
+差
+找
+桁
+净
+把
+阴
+污
+戬
+雷
+碓
+蕲
+楚
+罡
+焖
+抽
+妫
+咒
+仑
+闱
+尽
+邑
+菁
+爱
+贷
+沥
+鞑
+牡
+嗉
+崴
+骤
+塌
+嗦
+订
+拮
+滓
+捡
+锻
+次
+坪
+杩
+臃
+箬
+融
+珂
+鹗
+宗
+枚
+降
+鸬
+妯
+阄
+堰
+盐
+毅
+必
+杨
+崃
+俺
+甬
+状
+莘
+货
+耸
+菱
+腼
+铸
+唏
+痤
+孚
+澳
+懒
+溅
+翘
+疙
+杷
+淼
+缙
+骰
+喊
+悉
+砻
+坷
+艇
+赁
+界
+谤
+纣
+宴
+晃
+茹
+归
+饭
+梢
+铡
+街
+抄
+肼
+鬟
+苯
+颂
+撷
+戈
+炒
+咆
+茭
+瘙
+负
+仰
+客
+琉
+铢
+封
+卑
+珥
+椿
+镧
+窨
+鬲
+寿
+御
+袤
+铃
+萎
+砖
+餮
+脒
+裳
+肪
+孕
+嫣
+馗
+嵇
+恳
+氯
+江
+石
+褶
+冢
+祸
+阻
+狈
+羞
+银
+靳
+透
+咳
+叼
+敷
+芷
+啥
+它
+瓤
+兰
+痘
+懊
+逑
+肌
+往
+捺
+坊
+甩
+呻
+沦
+忘
+膻
+祟
+菅
+剧
+崆
+智
+坯
+臧
+霍
+墅
+攻
+眯
+倘
+拢
+骠
+铐
+庭
+岙
+瓠
+缺
+泥
+迢
+捶
+?
+?
+郏
+喙
+掷
+沌
+纯
+秘
+种
+听
+绘
+固
+螨
+团
+香
+盗
+妒
+埚
+蓝
+拖
+旱
+荞
+铀
+血
+遏
+汲
+辰
+叩
+拽
+幅
+硬
+惶
+桀
+漠
+措
+泼
+唑
+齐
+肾
+念
+酱
+虚
+屁
+耶
+旗
+砦
+闵
+婉
+馆
+拭
+绅
+韧
+忏
+窝
+醋
+葺
+顾
+辞
+倜
+堆
+辋
+逆
+玟
+贱
+疾
+董
+惘
+倌
+锕
+淘
+嘀
+莽
+俭
+笏
+绑
+鲷
+杈
+择
+蟀
+粥
+嗯
+驰
+逾
+案
+谪
+褓
+胫
+哩
+昕
+颚
+鲢
+绠
+躺
+鹄
+崂
+儒
+俨
+丝
+尕
+泌
+啊
+萸
+彰
+幺
+吟
+骄
+苣
+弦
+脊
+瑰
+诛
+镁
+析
+闪
+剪
+侧
+哟
+框
+螃
+守
+嬗
+燕
+狭
+铈
+缮
+概
+迳
+痧
+鲲
+俯
+售
+笼
+痣
+扉
+挖
+满
+咋
+援
+邱
+扇
+歪
+便
+玑
+绦
+峡
+蛇
+叨
+泽
+胃
+斓
+喋
+怂
+坟
+猪
+该
+蚬
+炕
+弥
+赞
+棣
+晔
+娠
+挲
+狡
+创
+疖
+铕
+镭
+稷
+挫
+弭
+啾
+翔
+粉
+履
+苘
+哦
+楼
+秕
+铂
+土
+锣
+瘟
+挣
+栉
+习
+享
+桢
+袅
+磨
+桂
+谦
+延
+坚
+蔚
+噗
+署
+谟
+猬
+钎
+恐
+嬉
+雒
+倦
+衅
+亏
+璩
+睹
+刻
+殿
+王
+算
+雕
+麻
+丘
+柯
+骆
+丸
+塍
+谚
+添
+鲈
+垓
+桎
+蚯
+芥
+予
+飕
+镦
+谌
+窗
+醚
+菀
+亮
+搪
+莺
+蒿
+羁
+足
+J
+真
+轶
+悬
+衷
+靛
+翊
+掩
+哒
+炅
+掐
+冼
+妮
+l
+谐
+稚
+荆
+擒
+犯
+陵
+虏
+浓
+崽
+刍
+陌
+傻
+孜
+千
+靖
+演
+矜
+钕
+煽
+杰
+酗
+渗
+伞
+栋
+俗
+泫
+戍
+罕
+沾
+疽
+灏
+煦
+芬
+磴
+叱
+阱
+榉
+湃
+蜀
+叉
+醒
+彪
+租
+郡
+篷
+屎
+良
+垢
+隗
+弱
+陨
+峪
+砷
+掴
+颁
+胎
+雯
+绵
+贬
+沐
+撵
+隘
+篙
+暖
+曹
+陡
+栓
+填
+臼
+彦
+瓶
+琪
+潼
+哪
+鸡
+摩
+啦
+俟
+锋
+域
+耻
+蔫
+疯
+纹
+撇
+毒
+绶
+痛
+酯
+忍
+爪
+赳
+歆
+嘹
+辕
+烈
+册
+朴
+钱
+吮
+毯
+癜
+娃
+谀
+邵
+厮
+炽
+璞
+邃
+丐
+追
+词
+瓒
+忆
+轧
+芫
+谯
+喷
+弟
+半
+冕
+裙
+掖
+墉
+绮
+寝
+苔
+势
+顷
+褥
+切
+衮
+君
+佳
+嫒
+蚩
+霞
+佚
+洙
+逊
+镖
+暹
+唛
+&
+殒
+顶
+碗
+獗
+轭
+铺
+蛊
+废
+恹
+汨
+崩
+珍
+那
+杵
+曲
+纺
+夏
+薰
+傀
+闳
+淬
+姘
+舀
+拧
+卷
+楂
+恍
+讪
+厩
+寮
+篪
+赓
+乘
+灭
+盅
+鞣
+沟
+慎
+挂
+饺
+鼾
+杳
+树
+缨
+丛
+絮
+娌
+臻
+嗳
+篡
+侩
+述
+衰
+矛
+圈
+蚜
+匕
+筹
+匿
+濞
+晨
+叶
+骋
+郝
+挚
+蚴
+滞
+增
+侍
+描
+瓣
+吖
+嫦
+蟒
+匾
+圣
+赌
+毡
+癞
+恺
+百
+曳
+需
+篓
+肮
+庖
+帏
+卿
+驿
+遗
+蹬
+鬓
+骡
+歉
+芎
+胳
+屐
+禽
+烦
+晌
+寄
+媾
+狄
+翡
+苒
+船
+廉
+终
+痞
+殇
+々
+畦
+饶
+改
+拆
+悻
+萄
+£
+瓿
+乃
+訾
+桅
+匮
+溧
+拥
+纱
+铍
+骗
+蕃
+龋
+缬
+父
+佐
+疚
+栎
+醍
+掳
+蓄
+x
+惆
+颜
+鲆
+榆
+〔
+猎
+敌
+暴
+谥
+鲫
+贾
+罗
+玻
+缄
+扦
+芪
+癣
+落
+徒
+臾
+恿
+猩
+托
+邴
+肄
+牵
+春
+陛
+耀
+刊
+拓
+蓓
+邳
+堕
+寇
+枉
+淌
+啡
+湄
+兽
+酷
+萼
+碚
+濠
+萤
+夹
+旬
+戮
+梭
+琥
+椭
+昔
+勺
+蜊
+绐
+晚
+孺
+僵
+宣
+摄
+冽
+旨
+萌
+忙
+蚤
+眉
+噼
+蟑
+付
+契
+瓜
+悼
+颡
+壁
+曾
+窕
+颢
+澎
+仿
+俑
+浑
+嵌
+浣
+乍
+碌
+褪
+乱
+蔟
+隙
+玩
+剐
+葫
+箫
+纲
+围
+伐
+决
+伙
+漩
+瑟
+刑
+肓
+镳
+缓
+蹭
+氨
+皓
+典
+畲
+坍
+铑
+檐
+塑
+洞
+倬
+储
+胴
+淳
+戾
+吐
+灼
+惺
+妙
+毕
+珐
+缈
+虱
+盖
+羰
+鸿
+磅
+谓
+髅
+娴
+苴
+唷
+蚣
+霹
+抨
+贤
+唠
+犬
+誓
+逍
+庠
+逼
+麓
+籼
+釉
+呜
+碧
+秧
+氩
+摔
+霄
+穸
+纨
+辟
+妈
+映
+完
+牛
+缴
+嗷
+炊
+恩
+荔
+茆
+掉
+紊
+慌
+莓
+羟
+阙
+萁
+磐
+另
+蕹
+辱
+鳐
+湮
+吡
+吩
+唐
+睦
+垠
+舒
+圜
+冗
+瞿
+溺
+芾
+囱
+匠
+僳
+汐
+菩
+饬
+漓
+黑
+霰
+浸
+濡
+窥
+毂
+蒡
+兢
+驻
+鹉
+芮
+诙
+迫
+雳
+厂
+忐
+臆
+猴
+鸣
+蚪
+栈
+箕
+羡
+渐
+莆
+捍
+眈
+哓
+趴
+蹼
+埕
+嚣
+骛
+宏
+淄
+斑
+噜
+严
+瑛
+垃
+椎
+诱
+压
+庾
+绞
+焘
+廿
+抡
+迄
+棘
+夫
+纬
+锹
+眨
+瞌
+侠
+脐
+竞
+瀑
+孳
+骧
+遁
+姜
+颦
+荪
+滚
+萦
+伪
+逸
+粳
+爬
+锁
+矣
+役
+趣
+洒
+颔
+诏
+逐
+奸
+甭
+惠
+攀
+蹄
+泛
+尼
+拼
+阮
+鹰
+亚
+颈
+惑
+勒
+际
+肛
+爷
+刚
+钨
+丰
+养
+冶
+鲽
+辉
+蔻
+画
+覆
+皴
+妊
+麦
+返
+醉
+皂
+擀
+酶
+凑
+粹
+悟
+诀
+硖
+港
+卜
+z
+杀
+涕
+舍
+铠
+抵
+弛
+段
+敝
+镐
+奠
+拂
+轴
+跛
+袱
+e
+t
+沉
+菇
+俎
+薪
+峦
+秭
+蟹
+历
+盟
+菠
+寡
+液
+肢
+喻
+染
+裱
+悱
+抱
+氙
+赤
+捅
+猛
+跑
+氮
+谣
+仁
+尺
+辊
+窍
+烙
+衍
+架
+擦
+倏
+璐
+瑁
+币
+楞
+胖
+夔
+趸
+邛
+惴
+饕
+虔
+蝎
+哉
+贝
+宽
+辫
+炮
+扩
+饲
+籽
+魏
+菟
+锰
+伍
+猝
+末
+琳
+哚
+蛎
+邂
+呀
+姿
+鄞
+却
+歧
+仙
+恸
+椐
+森
+牒
+寤
+袒
+婆
+虢
+雅
+钉
+朵
+贼
+欲
+苞
+寰
+故
+龚
+坭
+嘘
+咫
+礼
+硷
+兀
+睢
+汶
+’
+铲
+烧
+绕
+诃
+浃
+钿
+哺
+柜
+讼
+颊
+璁
+腔
+洽
+咐
+脲
+簌
+筠
+镣
+玮
+鞠
+谁
+兼
+姆
+挥
+梯
+蝴
+谘
+漕
+刷
+躏
+宦
+弼
+b
+垌
+劈
+麟
+莉
+揭
+笙
+渎
+仕
+嗤
+仓
+配
+怏
+抬
+错
+泯
+镊
+孰
+猿
+邪
+仍
+秋
+鼬
+壹
+歇
+吵
+炼
+<
+尧
+射
+柬
+廷
+胧
+霾
+凳
+隋
+肚
+浮
+梦
+祥
+株
+堵
+退
+L
+鹫
+跎
+凶
+毽
+荟
+炫
+栩
+玳
+甜
+沂
+鹿
+顽
+伯
+爹
+赔
+蛴
+徐
+匡
+欣
+狰
+缸
+雹
+蟆
+疤
+默
+沤
+啜
+痂
+衣
+禅
+w
+i
+h
+辽
+葳
+黝
+钗
+停
+沽
+棒
+馨
+颌
+肉
+吴
+硫
+悯
+劾
+娈
+马
+啧
+吊
+悌
+镑
+峭
+帆
+瀣
+涉
+咸
+疸
+滋
+泣
+翦
+拙
+癸
+钥
+蜒
++
+尾
+庄
+凝
+泉
+婢
+渴
+谊
+乞
+陆
+锉
+糊
+鸦
+淮
+I
+B
+N
+晦
+弗
+乔
+庥
+葡
+尻
+席
+橡
+傣
+渣
+拿
+惩
+麋
+斛
+缃
+矮
+蛏
+岘
+鸽
+姐
+膏
+催
+奔
+镒
+喱
+蠡
+摧
+钯
+胤
+柠
+拐
+璋
+鸥
+卢
+荡
+倾
+^
+_
+珀
+逄
+萧
+塾
+掇
+贮
+笆
+聂
+圃
+冲
+嵬
+M
+滔
+笕
+值
+炙
+偶
+蜱
+搐
+梆
+汪
+蔬
+腑
+鸯
+蹇
+敞
+绯
+仨
+祯
+谆
+梧
+糗
+鑫
+啸
+豺
+囹
+猾
+巢
+柄
+瀛
+筑
+踌
+沭
+暗
+苁
+鱿
+蹉
+脂
+蘖
+牢
+热
+木
+吸
+溃
+宠
+序
+泞
+偿
+拜
+檩
+厚
+朐
+毗
+螳
+吞
+媚
+朽
+担
+蝗
+橘
+畴
+祈
+糟
+盱
+隼
+郜
+惜
+珠
+裨
+铵
+焙
+琚
+唯
+咚
+噪
+骊
+丫
+滢
+勤
+棉
+呸
+咣
+淀
+隔
+蕾
+窈
+饨
+挨
+煅
+短
+匙
+粕
+镜
+赣
+撕
+墩
+酬
+馁
+豌
+颐
+抗
+酣
+氓
+佑
+搁
+哭
+递
+耷
+涡
+桃
+贻
+碣
+截
+瘦
+昭
+镌
+蔓
+氚
+甲
+猕
+蕴
+蓬
+散
+拾
+纛
+狼
+猷
+铎
+埋
+旖
+矾
+讳
+囊
+糜
+迈
+粟
+蚂
+紧
+鲳
+瘢
+栽
+稼
+羊
+锄
+斟
+睁
+桥
+瓮
+蹙
+祉
+醺
+鼻
+昱
+剃
+跳
+篱
+跷
+蒜
+翎
+宅
+晖
+嗑
+壑
+峻
+癫
+屏
+狠
+陋
+袜
+途
+憎
+祀
+莹
+滟
+佶
+溥
+臣
+约
+盛
+峰
+磁
+慵
+婪
+拦
+莅
+朕
+鹦
+粲
+裤
+哎
+疡
+嫖
+琵
+窟
+堪
+谛
+嘉
+儡
+鳝
+斩
+郾
+驸
+酊
+妄
+胜
+贺
+徙
+傅
+噌
+钢
+栅
+庇
+恋
+匝
+巯
+邈
+尸
+锚
+粗
+佟
+蛟
+薹
+纵
+蚊
+郅
+绢
+锐
+苗
+俞
+篆
+淆
+膀
+鲜
+煎
+诶
+秽
+寻
+涮
+刺
+怀
+噶
+巨
+褰
+魅
+灶
+灌
+桉
+藕
+谜
+舸
+薄
+搀
+恽
+借
+牯
+痉
+渥
+愿
+亓
+耘
+杠
+柩
+锔
+蚶
+钣
+珈
+喘
+蹒
+幽
+赐
+稗
+晤
+莱
+泔
+扯
+肯
+菪
+裆
+腩
+豉
+疆
+骜
+腐
+倭
+珏
+唔
+粮
+亡
+润
+慰
+伽
+橄
+玄
+誉
+醐
+胆
+龊
+粼
+塬
+陇
+彼
+削
+嗣
+绾
+芽
+妗
+垭
+瘴
+爽
+薏
+寨
+龈
+泠
+弹
+赢
+漪
+猫
+嘧
+涂
+恤
+圭
+茧
+烽
+屑
+痕
+巾
+赖
+荸
+凰
+腮
+畈
+亵
+蹲
+偃
+苇
+澜
+艮
+换
+骺
+烘
+苕
+梓
+颉
+肇
+哗
+悄
+氤
+涠
+葬
+屠
+鹭
+植
+竺
+佯
+诣
+鲇
+瘀
+鲅
+邦
+移
+滁
+冯
+耕
+癔
+戌
+茬
+沁
+巩
+悠
+湘
+洪
+痹
+锟
+循
+谋
+腕
+鳃
+钠
+捞
+焉
+迎
+碱
+伫
+急
+榷
+奈
+邝
+卯
+辄
+皲
+卟
+醛
+畹
+忧
+稳
+雄
+昼
+缩
+阈
+睑
+扌
+耗
+曦
+涅
+捏
+瞧
+邕
+淖
+漉
+铝
+耦
+禹
+湛
+喽
+莼
+琅
+诸
+苎
+纂
+硅
+始
+嗨
+傥
+燃
+臂
+赅
+嘈
+呆
+贵
+屹
+壮
+肋
+亍
+蚀
+卅
+豹
+腆
+邬
+迭
+浊
+}
+童
+螂
+捐
+圩
+勐
+触
+寞
+汊
+壤
+荫
+膺
+渌
+芳
+懿
+遴
+螈
+泰
+蓼
+蛤
+茜
+舅
+枫
+朔
+膝
+眙
+避
+梅
+判
+鹜
+璜
+牍
+缅
+垫
+藻
+黔
+侥
+惚
+懂
+踩
+腰
+腈
+札
+丞
+唾
+慈
+顿
+摹
+荻
+琬
+~
+斧
+沈
+滂
+胁
+胀
+幄
+莜
+Z
+匀
+鄄
+掌
+绰
+茎
+焚
+赋
+萱
+谑
+汁
+铒
+瞎
+夺
+蜗
+野
+娆
+冀
+弯
+篁
+懵
+灞
+隽
+芡
+脘
+俐
+辩
+芯
+掺
+喏
+膈
+蝈
+觐
+悚
+踹
+蔗
+熠
+鼠
+呵
+抓
+橼
+峨
+畜
+缔
+禾
+崭
+弃
+熊
+摒
+凸
+拗
+穹
+蒙
+抒
+祛
+劝
+闫
+扳
+阵
+醌
+踪
+喵
+侣
+搬
+仅
+荧
+赎
+蝾
+琦
+买
+婧
+瞄
+寓
+皎
+冻
+赝
+箩
+莫
+瞰
+郊
+笫
+姝
+筒
+枪
+遣
+煸
+袋
+舆
+痱
+涛
+母
+〇
+启
+践
+耙
+绲
+盘
+遂
+昊
+搞
+槿
+诬
+纰
+泓
+惨
+檬
+亻
+越
+C
+o
+憩
+熵
+祷
+钒
+暧
+塔
+阗
+胰
+咄
+娶
+魔
+琶
+钞
+邻
+扬
+杉
+殴
+咽
+弓
+髻
+】
+吭
+揽
+霆
+拄
+殖
+脆
+彻
+岩
+芝
+勃
+辣
+剌
+钝
+嘎
+甄
+佘
+皖
+伦
+授
+徕
+憔
+挪
+皇
+庞
+稔
+芜
+踏
+溴
+兖
+卒
+擢
+饥
+鳞
+煲
+‰
+账
+颗
+叻
+斯
+捧
+鳍
+琮
+讹
+蛙
+纽
+谭
+酸
+兔
+莒
+睇
+伟
+觑
+羲
+嗜
+宜
+褐
+旎
+辛
+卦
+诘
+筋
+鎏
+溪
+挛
+熔
+阜
+晰
+鳅
+丢
+奚
+灸
+呱
+献
+陉
+黛
+鸪
+甾
+萨
+疮
+拯
+洲
+疹
+辑
+叙
+恻
+谒
+允
+柔
+烂
+氏
+逅
+漆
+拎
+惋
+扈
+湟
+纭
+啕
+掬
+擞
+哥
+忽
+涤
+鸵
+靡
+郗
+瓷
+扁
+廊
+怨
+雏
+钮
+敦
+E
+懦
+憋
+汀
+拚
+啉
+腌
+岸
+f
+痼
+瞅
+尊
+咀
+眩
+飙
+忌
+仝
+迦
+熬
+毫
+胯
+篑
+茄
+腺
+凄
+舛
+碴
+锵
+诧
+羯
+後
+漏
+汤
+宓
+仞
+蚁
+壶
+谰
+皑
+铄
+棰
+罔
+辅
+晶
+苦
+牟
+闽
+\
+烃
+饮
+聿
+丙
+蛳
+朱
+煤
+涔
+鳖
+犁
+罐
+荼
+砒
+淦
+妤
+黏
+戎
+孑
+婕
+瑾
+戢
+钵
+枣
+捋
+砥
+衩
+狙
+桠
+稣
+阎
+肃
+梏
+诫
+孪
+昶
+婊
+衫
+嗔
+侃
+塞
+蜃
+樵
+峒
+貌
+屿
+欺
+缫
+阐
+栖
+诟
+珞
+荭
+吝
+萍
+嗽
+恂
+啻
+蜴
+磬
+峋
+俸
+豫
+谎
+徊
+镍
+韬
+魇
+晴
+U
+囟
+猜
+蛮
+坐
+囿
+伴
+亭
+肝
+佗
+蝠
+妃
+胞
+滩
+榴
+氖
+垩
+苋
+砣
+扪
+馏
+姓
+轩
+厉
+夥
+侈
+禀
+垒
+岑
+赏
+钛
+辐
+痔
+披
+纸
+碳
+“
+坞
+蠓
+挤
+荥
+沅
+悔
+铧
+帼
+蒌
+蝇
+a
+p
+y
+n
+g
+哀
+浆
+瑶
+凿
+桶
+馈
+皮
+奴
+苜
+佤
+伶
+晗
+铱
+炬
+优
+弊
+氢
+恃
+甫
+攥
+端
+锌
+灰
+稹
+炝
+曙
+邋
+亥
+眶
+碾
+拉
+萝
+绔
+捷
+浍
+腋
+姑
+菖
+凌
+涞
+麽
+锢
+桨
+潢
+绎
+镰
+殆
+锑
+渝
+铬
+困
+绽
+觎
+匈
+糙
+暑
+裹
+鸟
+盔
+肽
+迷
+綦
+亳
+佝
+俘
+钴
+觇
+骥
+仆
+疝
+跪
+婶
+郯
+瀹
+唉
+脖
+踞
+针
+晾
+忒
+扼
+瞩
+叛
+椒
+疟
+嗡
+邗
+肆
+跆
+玫
+忡
+捣
+咧
+唆
+艄
+蘑
+潦
+笛
+阚
+沸
+泻
+掊
+菽
+贫
+斥
+髂
+孢
+镂
+赂
+麝
+鸾
+屡
+衬
+苷
+恪
+叠
+希
+粤
+爻
+喝
+茫
+惬
+郸
+绻
+庸
+撅
+碟
+宄
+妹
+膛
+叮
+饵
+崛
+嗲
+椅
+冤
+搅
+咕
+敛
+尹
+垦
+闷
+蝉
+霎
+勰
+败
+蓑
+泸
+肤
+鹌
+幌
+焦
+浠
+鞍
+刁
+舰
+乙
+竿
+裔
+。
+茵
+函
+伊
+兄
+丨
+娜
+匍
+謇
+莪
+宥
+似
+蝽
+翳
+酪
+翠
+粑
+薇
+祢
+骏
+赠
+叫
+Q
+噤
+噻
+竖
+芗
+莠
+潭
+俊
+羿
+耜
+O
+郫
+趁
+嗪
+囚
+蹶
+芒
+洁
+笋
+鹑
+敲
+硝
+啶
+堡
+渲
+揩
+携
+宿
+遒
+颍
+扭
+棱
+割
+萜
+蔸
+葵
+琴
+捂
+饰
+衙
+耿
+掠
+募
+岂
+窖
+涟
+蔺
+瘤
+柞
+瞪
+怜
+匹
+距
+楔
+炜
+哆
+秦
+缎
+幼
+茁
+绪
+痨
+恨
+楸
+娅
+瓦
+桩
+雪
+嬴
+伏
+榔
+妥
+铿
+拌
+眠
+雍
+缇
+‘
+卓
+搓
+哌
+觞
+噩
+屈
+哧
+髓
+咦
+巅
+娑
+侑
+淫
+膳
+祝
+勾
+姊
+莴
+胄
+疃
+薛
+蜷
+胛
+巷
+芙
+芋
+熙
+闰
+勿
+窃
+狱
+剩
+钏
+幢
+陟
+铛
+慧
+靴
+耍
+k
+浙
+浇
+飨
+惟
+绗
+祜
+澈
+啼
+咪
+磷
+摞
+诅
+郦
+抹
+跃
+壬
+吕
+肖
+琏
+颤
+尴
+剡
+抠
+凋
+赚
+泊
+津
+宕
+殷
+倔
+氲
+漫
+邺
+涎
+怠
+$
+垮
+荬
+遵
+俏
+叹
+噢
+饽
+蜘
+孙
+筵
+疼
+鞭
+羧
+牦
+箭
+潴
+c
+眸
+祭
+髯
+啖
+坳
+愁
+芩
+驮
+倡
+巽
+穰
+沃
+胚
+怒
+凤
+槛
+剂
+趵
+嫁
+v
+邢
+灯
+鄢
+桐
+睽
+檗
+锯
+槟
+婷
+嵋
+圻
+诗
+蕈
+颠
+遭
+痢
+芸
+怯
+馥
+竭
+锗
+徜
+恭
+遍
+籁
+剑
+嘱
+苡
+龄
+僧
+桑
+潸
+弘
+澶
+楹
+悲
+讫
+愤
+腥
+悸
+谍
+椹
+呢
+桓
+葭
+攫
+阀
+翰
+躲
+敖
+柑
+郎
+笨
+橇
+呃
+魁
+燎
+脓
+葩
+磋
+垛
+玺
+狮
+沓
+砜
+蕊
+锺
+罹
+蕉
+翱
+虐
+闾
+巫
+旦
+茱
+嬷
+枯
+鹏
+贡
+芹
+汛
+矫
+绁
+拣
+禺
+佃
+讣
+舫
+惯
+乳
+趋
+疲
+挽
+岚
+虾
+衾
+蠹
+蹂
+飓
+氦
+铖
+孩
+稞
+瑜
+壅
+掀
+勘
+妓
+畅
+髋
+W
+庐
+牲
+蓿
+榕
+练
+垣
+唱
+邸
+菲
+昆
+婺
+穿
+绡
+麒
+蚱
+掂
+愚
+泷
+涪
+漳
+妩
+娉
+榄
+讷
+觅
+旧
+藤
+煮
+呛
+柳
+腓
+叭
+庵
+烷
+阡
+罂
+蜕
+擂
+猖
+咿
+媲
+脉
+【
+沏
+貅
+黠
+熏
+哲
+烁
+坦
+酵
+兜
+潇
+撒
+剽
+珩
+圹
+乾
+摸
+樟
+帽
+嗒
+襄
+魂
+轿
+憬
+锡
+〕
+喃
+皆
+咖
+隅
+脸
+残
+泮
+袂
+鹂
+珊
+囤
+捆
+咤
+误
+徨
+闹
+淙
+芊
+淋
+怆
+囗
+拨
+梳
+渤
+R
+G
+绨
+蚓
+婀
+幡
+狩
+麾
+谢
+唢
+裸
+旌
+伉
+纶
+裂
+驳
+砼
+咛
+澄
+樨
+蹈
+宙
+澍
+倍
+貔
+操
+勇
+蟠
+摈
+砧
+虬
+够
+缁
+悦
+藿
+撸
+艹
+摁
+淹
+豇
+虎
+榭
+吱
+d
+喧
+荀
+踱
+侮
+奋
+偕
+饷
+犍
+惮
+坑
+璎
+徘
+宛
+妆
+袈
+倩
+窦
+昂
+荏
+乖
+K
+怅
+撰
+鳙
+牙
+袁
+酞
+X
+痿
+琼
+闸
+雁
+趾
+荚
+虻
+涝
+《
+杏
+韭
+偈
+烤
+绫
+鞘
+卉
+症
+遢
+蓥
+诋
+杭
+荨
+匆
+竣
+簪
+辙
+敕
+虞
+丹
+缭
+咩
+黟
+m
+淤
+瑕
+咂
+铉
+硼
+茨
+嶂
+痒
+畸
+敬
+涿
+粪
+窘
+熟
+叔
+嫔
+盾
+忱
+裘
+憾
+梵
+赡
+珙
+咯
+娘
+庙
+溯
+胺
+葱
+痪
+摊
+荷
+卞
+乒
+髦
+寐
+铭
+坩
+胗
+枷
+爆
+溟
+嚼
+羚
+砬
+轨
+惊
+挠
+罄
+竽
+菏
+氧
+浅
+楣
+盼
+枢
+炸
+阆
+杯
+谏
+噬
+淇
+渺
+俪
+秆
+墓
+泪
+跻
+砌
+痰
+垡
+渡
+耽
+釜
+讶
+鳎
+煞
+呗
+韶
+舶
+绷
+鹳
+缜
+旷
+铊
+皱
+龌
+檀
+霖
+奄
+槐
+艳
+蝶
+旋
+哝
+赶
+骞
+蚧
+腊
+盈
+丁
+`
+蜚
+矸
+蝙
+睨
+嚓
+僻
+鬼
+醴
+夜
+彝
+磊
+笔
+拔
+栀
+糕
+厦
+邰
+纫
+逭
+纤
+眦
+膊
+馍
+躇
+烯
+蘼
+冬
+诤
+暄
+骶
+哑
+瘠
+」
+臊
+丕
+愈
+咱
+螺
+擅
+跋
+搏
+硪
+谄
+笠
+淡
+嘿
+骅
+谧
+鼎
+皋
+姚
+歼
+蠢
+驼
+耳
+胬
+挝
+涯
+狗
+蒽
+孓
+犷
+凉
+芦
+箴
+铤
+孤
+嘛
+坤
+V
+茴
+朦
+挞
+尖
+橙
+诞
+搴
+碇
+洵
+浚
+帚
+蜍
+漯
+柘
+嚎
+讽
+芭
+荤
+咻
+祠
+秉
+跖
+埃
+吓
+糯
+眷
+馒
+惹
+娼
+鲑
+嫩
+讴
+轮
+瞥
+靶
+褚
+乏
+缤
+宋
+帧
+删
+驱
+碎
+扑
+俩
+俄
+偏
+涣
+竹
+噱
+皙
+佰
+渚
+唧
+斡
+#
+镉
+刀
+崎
+筐
+佣
+夭
+贰
+肴
+峙
+哔
+艿
+匐
+牺
+镛
+缘
+仡
+嫡
+劣
+枸
+堀
+梨
+簿
+鸭
+蒸
+亦
+稽
+浴
+{
+衢
+束
+槲
+j
+阁
+揍
+疥
+棋
+潋
+聪
+窜
+乓
+睛
+插
+冉
+阪
+苍
+搽
+「
+蟾
+螟
+幸
+仇
+樽
+撂
+慢
+跤
+幔
+俚
+淅
+覃
+觊
+溶
+妖
+帛
+侨
+曰
+妾
+泗
+:
+瀘
+風
+(
+)
+紅
+紗
+瑭
+雲
+頭
+鶏
+財
+許
+樂
+焗
+麗
+—
+;
+滙
+東
+榮
+繪
+興
+…
+門
+業
+楊
+國
+顧
+盤
+寳
+龍
+鳳
+島
+誌
+緣
+結
+銭
+萬
+勝
+祎
+璟
+優
+歡
+臨
+時
+購
+★
+藍
+昇
+鐵
+觀
+勅
+農
+聲
+畫
+兿
+術
+發
+劉
+記
+專
+耑
+園
+書
+壴
+種
+●
+褀
+號
+銀
+匯
+敟
+锘
+葉
+橪
+廣
+進
+蒄
+鑽
+阝
+祙
+貢
+鍋
+豊
+夬
+喆
+團
+閣
+開
+燁
+賓
+館
+酡
+沔
+順
+硚
+劵
+饸
+陽
+車
+湓
+復
+萊
+氣
+軒
+華
+堃
+迮
+纟
+戶
+馬
+學
+裡
+電
+嶽
+獨
+燘
+袪
+環
+臺
+灣
+専
+賣
+孖
+聖
+攝
+線
+傢
+俬
+夢
+達
+莊
+喬
+貝
+薩
+劍
+羅
+壓
+棛
+饦
+尃
+璈
+囍
+醫
+鷄
+髙
+嬰
+啓
+約
+隹
+潔
+賴
+藝
+寶
+籣
+麺
+嶺
+√
+義
+網
+峩
+長
+∧
+魚
+機
+構
+②
+鳯
+偉
+㙟
+畵
+鴿
+詩
+溝
+嚞
+屌
+藔
+佧
+玥
+蘭
+織
+點
+砭
+鴨
+鋪
+銘
+廳
+弍
+創
+湯
+坶
+℃
+卩
+骝
+烜
+荘
+當
+潤
+扞
+係
+懷
+碶
+钅
+蚨
+讠
+☆
+叢
+爲
+埗
+涫
+塗
+→
+楽
+現
+鯨
+愛
+瑪
+鈺
+忄
+悶
+藥
+飾
+樓
+視
+孬
+燚
+苪
+師
+①
+丼
+锽
+韓
+標
+兒
+閏
+匋
+張
+漢
+髪
+會
+閑
+檔
+習
+裝
+の
+峯
+菘
+輝
+雞
+釣
+億
+浐
+姌
+饹
+晞
+廰
+嵯
+鷹
+負
+飲
+絲
+冚
+楗
+澤
+綫
+區
+←
+質
+靑
+揚
+③
+滬
+統
+産
+協
+﹑
+乸
+畐
+經
+運
+際
+洺
+岽
+為
+粵
+諾
+崋
+豐
+碁
+齋
+誠
+訂
+勑
+雙
+陳
+無
+泩
+媄
+夌
+刂
+嘢
+耄
+燴
+暃
+壽
+媽
+靈
+抻
+體
+唻
+冮
+甹
+鎮
+錦
+蜛
+蠄
+尓
+駕
+戀
+飬
+逹
+倫
+貴
+極
+寬
+磚
+嶪
+職
+間
+剎
+伈
+課
+飛
+橋
+瘊
+№
+譜
+骓
+圗
+滘
+縣
+粿
+咅
+養
+濤
+彳
+Ⅱ
+啰
+㴪
+見
+矞
+薬
+糁
+邨
+鲮
+顔
+罱
+選
+話
+贏
+氪
+俵
+競
+瑩
+繡
+枱
+綉
+獅
+爾
+™
+麵
+戋
+淩
+徳
+個
+劇
+場
+務
+簡
+寵
+實
+膠
+轱
+圖
+築
+嘣
+樹
+㸃
+營
+耵
+孫
+饃
+鄺
+飯
+麯
+遠
+輸
+坫
+孃
+乚
+閃
+鏢
+㎡
+題
+廠
+關
+↑
+爺
+將
+軍
+連
+篦
+覌
+參
+箸
+窠
+棽
+寕
+夀
+爰
+歐
+呙
+閥
+頡
+熱
+雎
+垟
+裟
+凬
+勁
+帑
+馕
+夆
+疌
+枼
+馮
+貨
+蒤
+樸
+彧
+旸
+靜
+龢
+暢
+㐱
+鳥
+珺
+鏡
+灡
+爭
+堷
+廚
+騰
+診
+蘇
+褔
+凱
+頂
+豕
+亞
+帥
+嘬
+⊥
+仺
+桖
+複
+饣
+絡
+穂
+顏
+棟
+納
+濟
+親
+設
+計
+攵
+埌
+烺
+頤
+燦
+蓮
+撻
+節
+講
+濱
+濃
+娽
+洳
+朿
+燈
+鈴
+護
+膚
+铔
+過
+補
+坋
+闿
+餘
+缐
+铞
+貿
+铪
+桼
+趙
+鍊
+㐂
+垚
+菓
+揸
+捲
+鐘
+滏
+爍
+輪
+燜
+鴻
+鮮
+動
+鹞
+鷗
+丄
+慶
+鉌
+翥
+飮
+腸
+漁
+覺
+來
+熘
+昴
+翏
+鲱
+圧
+鄉
+萭
+頔
+爐
+嫚
+貭
+類
+聯
+幛
+輕
+訓
+鑒
+夋
+锨
+芃
+珣
+扙
+嵐
+銷
+處
+語
+誘
+苝
+歸
+儀
+燒
+楿
+內
+粢
+葒
+奧
+麥
+礻
+滿
+蠔
+穵
+瞭
+態
+鱬
+榞
+硂
+鄭
+黃
+煙
+祐
+奓
+逺
+瑄
+獲
+聞
+薦
+讀
+這
+樣
+決
+問
+啟
+們
+執
+説
+轉
+單
+隨
+唘
+帶
+倉
+庫
+還
+贈
+尙
+皺
+■
+餅
+產
+∈
+報
+狀
+楓
+賠
+琯
+嗮
+禮
+傳
+≤
+嗞
+≥
+換
+咭
+↓
+曬
+応
+寫
+終
+様
+純
+費
+療
+聨
+凍
+壐
+郵
+黒
+∫
+製
+塊
+調
+軽
+確
+撃
+級
+馴
+Ⅲ
+涇
+繹
+數
+碼
+證
+狒
+処
+劑
+晧
+賀
+衆
+櫥
+兩
+陰
+絶
+對
+鯉
+憶
+◎
+蕒
+煖
+頓
+測
+試
+鼽
+僑
+碩
+妝
+帯
+≈
+鐡
+舖
+權
+喫
+倆
+該
+悅
+俫
+貼
+淨
+濕
+針
+適
+備
+給
+謢
+強
+觸
+衛
+與
+⊙
+緯
+變
+殺
+∩
+幚
+價
+▲
+離
+飄
+烏
+関
+閟
+邏
+輯
+鍵
+驗
+訣
+導
+歷
+屆
+層
+▼
+儱
+錄
+熳
+艦
+吋
+錶
+辧
+飼
+顯
+④
+禦
+販
+気
+対
+枰
+閩
+紀
+幹
+瞓
+貊
+淚
+△
+眞
+墊
+獻
+褲
+縫
+緑
+亜
+鉅
+餠
+{
+}
+◆
+蘆
+薈
+█
+◇
+溫
+彈
+晳
+粧
+犸
+穩
+訊
+崬
+凖
+熥
+舊
+條
+紋
+圍
+Ⅳ
+筆
+尷
+難
+雜
+錯
+綁
+識
+頰
+鎖
+艶
+□
+殁
+殼
+⑧
+├
+鵬
+糝
+綱
+盜
+饅
+醬
+籤
+蓋
+釀
+鹽
+據
+辦
+彐
+婦
+獸
+鲩
+伱
+蒟
+蒻
+齊
+袆
+腦
+寧
+凈
+妳
+煥
+詢
+偽
+謹
+啫
+鯽
+騷
+鱸
+損
+傷
+鎻
+髮
+買
+冏
+儥
+両
+∞
+載
+喰
+羙
+悵
+燙
+曉
+員
+組
+徹
+艷
+痠
+鋼
+鼙
+縮
+細
+嚒
+爯
+≠
+維
+鱻
+壇
+厍
+帰
+浥
+犇
+薡
+軎
+應
+醜
+刪
+緻
+鶴
+賜
+噁
+軌
+尨
+镔
+鷺
+槗
+彌
+葚
+濛
+請
+溇
+緹
+賢
+訪
+獴
+瑅
+資
+縤
+陣
+蕟
+栢
+韻
+祼
+恁
+伢
+謝
+劃
+涑
+總
+衖
+踺
+砋
+籃
+駿
+苼
+瘋
+昽
+紡
+驊
+腎
+響
+杋
+剛
+嚴
+禪
+歓
+槍
+傘
+檸
+檫
+炣
+勢
+鏜
+鎢
+銑
+尐
+減
+奪
+惡
+僮
+婭
+臘
+殻
+鉄
+∑
+蛲
+焼
+緖
+續
+紹
+懮
+⑤
+⑥
+⑦
+媪
+韂
+⑨
+⑩
+觽
+髃
+遽
+骃
+頉
+狎
+曩
+苌
+弒
+赀
+娡
+赟
+柰
+愍
+畤
+菑
+蚡
+鲧
+踰
+鬻
+笞
+阏
+橐
+哙
+馔
+遑
+圉
+轸
+彘
+驺
+豨
+扃
+逡
+苻
+曪
+焻
+彀
+恚
+絷
+郄
+赍
+薨
+雠
+鴈
+舁
+聒
+蒯
+廪
+闼
+辔
+诳
+黥
+顼
+辇
+筮
+媵
+瞽
+缗
+徼
+鼋
+箧
+龁
+醮
+瘗
+礶
+繇
+檄
+僖
+卬
+爇
+髡
+儣
+驩
+捽
+贽
+牝
+杼
+噫
+缯
+赧
+诮
+瘳
+獘
+篃
+絜
+杓
+溍
+笥
+鸱
+觥
+椟
+溲
+鞫
+猢
+笄
+翕
+嗥
+卺
+夡
+奭
+棂
+樗
+狲
+怙
+哂
+抟
+轵
+彊
+嬖
+僦
+裰
+舄
+拊
+旃
+俛
+瘥
+禳
+愆
+陬
+墀
+聩
+僊
+眛
+阍
+毐
+刭
+喾
+唿
+缑
+迨
+愦
+牖
+嚭
+邾
+悒
+殽
+斫
+兕
+铙
+镪
+踣
+胙
+臱
+骈
+旄
+豢
+帔
+僭
+忤
+棹
+诎
+氇
+獾
+殂
+倨
+詈
+頫
+掾
+鸩
+氆
+辎
+罴
+鄜
+珪
+曷
+膑
+牂
+捱
+怵
+怛
+觌
+舂
+廨
+怍
+欷
+汧
+鼍
+喟
+殓
+蓺
+奁
+鄗
+悝
+袴
+僇
+酹
+搒
+跽
+姁
+鞮
+纥
+梃
+卮
+肣
+湎
+揄
+迕
+汜
+髫
+炷
+汭
+挈
+蝄
+噙
+歔
+撺
+欤
+冑
+蹻
+鲠
+傒
+醦
+隰
+掼
+琖
+駆
+暲
+犒
+甑
+楫
+嫪
+裀
+贳
+劬
+龏
+酎
+逋
+眇
+佻
+幞
+鉏
+磔
+殄
+浞
+衽
+裾
+廛
+芈
+燔
+伛
+縠
+虮
+祓
+筰
+喁
+俦
+褫
+僰
+旻
+搢
+茕
+柈
+绖
+畑
+鳏
+溷
+楯
+祇
+怼
+褊
+缧
+齮
+蓐
+怿
+豳
+犴
+窋
+酆
+谶
+讙
+镬
+襦
+纮
+舐
+黙
+縯
+蹀
+枥
+豸
+揶
+闇
+焒
+匳
+髭
+鲰
+筴
+弁
+揆
+跸
+搠
+缞
+旒
+屣
+孱
+槁
+榼
+夤
+埶
+愠
+欻
+刽
+刎
+骖
+冁
+釂
+麤
+珰
+谮
+埒
+耎
+噉
+蟜
+秏
+呶
+悞
+猱
+镵
+鸮
+趺
+簏
+坼
+凫
+诂
+骀
+谲
+薮
+亶
+黾
+螫
+嶲
+茀
+蓍
+遘
+乩
+褴
+郈
+踽
+叵
+伋
+襆
+伧
+醳
+鄠
+圄
+楮
+迓
+锱
+腉
+纡
+愀
+滈
+杪
+椀
+懑
+劓
+囫
+脔
+巉
+缒
+蝼
+醢
+嗫
+勖
+噭
+猊
+儇
+觳
+缟
+郐
+剜
+徭
+愎
+魋
+殛
+篾
+躞
+纔
+粝
+穑
+钲
+徂
+棓
+囵
+怫
+屦
+歘
+缱
+荦
+愬
+嗛
+铩
+馐
+媸
+曛
+蹰
+窭
+亹
+駹
+嫜
+姞
+赇
+樭
+澙
+笮
+孀
+狻
+榇
+侪
+盍
+堙
+毶
+癀
+镞
+酤
+譄
+薜
+郿
+埽
+阃
+遶
+酺
+辂
+鷪
+貋
+刳
+恫
+挹
+铳
+蒍
+纻
+旘
+耨
+翮
+洹
+坌
+捭
+睒
+轺
+崚
+仫
+庑
+邽
+麃
+縻
+瞋
+螭
+埤
+啁
+讦
+妁
+桞
+匏
+杌
+魑
+峇
+斄
+缶
+酩
+酢
+潏
+韪
+侔
+郪
+踔
+皁
+蜔
+魍
+祧
+粜
+晡
+蹩
+畎
+啱
+窳
+瞾
+舡
+葴
+耋
+鲐
+踧
+遫
+踟
+溊
+觜
+涒
+茔
+谸
+跬
+浿
+轘
+郇
+姮
+奡
+钤
+俅
+獬
+儆
+餍
+胾
+碛
+魭
+喑
+哏
+嶓
+俳
+蟭
+躅
+羖
+羑
+雩
+焜
+鸷
+箦
+铚
+缳
+酇
+罃
+罅
+庳
+褛
+罥
+蒺
+禨
+戕
+岬
+痍
+窴
+邠
+诨
+狁
+顒
+戆
+窎
+儙
+螾
+镕
+跣
+繻
+赜
+槃
+趄
+嬛
+睚
+跹
+壖
+戗
+沬
+畼
+嚋
+珮
+娀
+谇
+欃
+龂
+鲋
+鹆
+郕
+疴
+讧
+惇
+跂
+扢
+赪
+鈇
+釐
+槊
+寘
+暾
+莩
+钹
+犨
+刓
+逶
+澝
+嬃
+黡
+沕
+恝
+洟
+緃
+媢
+霣
+慝
+炟
+皤
+囐
+瞀
+烝
+瓻
+醵
+殪
+樯
+缵
+伻
+玊
+觚
+踯
+噔
+忪
+峣
+搤
+嗾
+鞚
+巂
+蘧
+榱
+锾
+隳
+饟
+馎
+驵
+骘
+髀
+髑
+鮼
+鲔
+鹘
+鹚
+刖
+啐
+嘭
+嚬
+嚰
+圯
+嫄
+寖
+嶶
+帇
+幤
+悫
+慙
+揜
+撝
+昃
+玕
+璆
+玃
+猃
+狃
+祊
+燹
+燠
+熛
+窣
+窬
+糌
+紬
+濩
+飧
+肸
+臬
+荜
+襜
+觖
+豭
+贇
+檠
+檇
+邘
+鄏
+鑙
+氅
+柢
+悭
+鄳
+蒗
+虺
+沇
+薤
+墠
+唶
+骍
+帨
+逖
+鹣
+臛
+鹖
+磛
+弢
+懜
+闟
+遹
+垝
+杅
+笤
+佈
+嚅
+蝮
+谳
+眢
+∵
+枵
+騳
+嗌
+玦
+嗄
+劙
+騠
+蚰
+趱
+珅
+洫
+颀
+趹
+蛩
+馓
+轫
+叡
+蒉
+睪
+漦
+胝
+瘐
+逦
+嶷
+傕
+斲
+瘵
+縢
+渖
+灊
+訇
+歃
+讵
+嫱
+狝
+脁
+堌
+塩
+茞
+嶋
+檑
+佺
+皞
+竩
+暘
+訸
+亷
+皊
+澛
+酂
+壎
+戡
+橦
+嬿
+錡
+柵
+蜾
+鉍
+玱
+虓
+钖
+缲
+鵾
+栌
+鞒
+锒
+樑
+赑
+泘
+垱
+貟
+崙
+沄
+廼
+鲖
+夼
+钌
+擖
+棻
+菂
+淏
+湲
+晙
+鶄
+潆
+箅
+甡
+炘
+溦
+崑
+铓
+芏
+颋
+飏
+俣
+琲
+鎔
+誊
+秈
+筲
+耖
+柟
+玢
+洑
+埇
+琤
+桯
+洧
+湜
+枧
+紘
+伭
+岺
+倥
+郃
+镫
+堉
+埸
+摺
+窺
+捩
+潁
+愷
+氫
+卲
+铴
+霂
+阌
+韜
+玓
+茚
+仂
+冾
+钸
+褙
+硋
+龑
+蘋
+卻
+訚
+硊
+矬
+堨
+镙
+炤
+黉
+燀
+捃
+娒
+沚
+铗
+陸
+轹
+垾
+苈
+絨
+鏮
+茳
+辻
+仵
+澔
+胨
+燉
+浬
+鑑
+轳
+牮
+袷
+炻
+燊
+霈
+垵
+裢
+倖
+貮
+瑀
+芨
+浉
+闶
+鳢
+砩
+铼
+鏐
+瑸
+筘
+濬
+钭
+範
+琭
+箨
+昫
+耩
+缡
+岵
+迺
+暎
+蔴
+轾
+糸
+塥
+馇
+圊
+睥
+鈊
+铫
+俤
+砟
+韨
+鶯
+塅
+犄
+矼
+骉
+翃
+璠
+鋆
+牤
+湧
+劢
+瑱
+圬
+菉
+镡
+崟
+笪
+廘
+硐
+辚
+囝
+滹
+埏
+俙
+靭
+琇
+聶
+澴
+蘅
+褡
+笳
+桫
+烔
+磙
+诖
+倞
+鞥
+璘
+樘
+苧
+郉
+翀
+焩
+酴
+曌
+夿
+劼
+饫
+掛
+蔵
+枟
+鮦
+麴
+岠
+焓
+缷
+駜
+漴
+舣
+蛱
+凃
+翚
+婥
+銛
+禇
+圪
+柃
+艽
+镆
+橺
+鞔
+舨
+蝥
+钫
+漈
+鄧
+玎
+洸
+蒨
+驖
+贶
+谠
+舳
+蒔
+僕
+棬
+側
+锞
+頣
+琎
+锍
+鉮
+崾
+浛
+埝
+邙
+甍
+硎
+菰
+蓁
+浯
+韡
+苳
+硭
+鎳
+翾
+鷇
+艋
+鹍
+禢
+埴
+昉
+桴
+査
+琍
+垐
+忭
+枨
+釭
+瘛
+淠
+漷
+泃
+蚈
+妉
+舾
+∮
+沺
+撖
+菭
+奤
+犭
+竝
+骢
+湔
+锜
+嶝
+挏
+沨
+蕻
+朊
+枘
+梶
+逷
+顸
+竑
+踅
+佾
+瑢
+鹁
+朣
+屺
+闩
+槠
+甦
+玠
+玭
+勍
+汎
+迴
+厐
+剀
+胓
+勔
+侉
+澥
+鼐
+嘏
+仉
+柽
+澂
+塚
+陔
+堽
+玙
+伲
+鋉

+ 435 - 0
ocr/tools/infer/predict_det_pytorch.py

@@ -0,0 +1,435 @@
+# encoding=utf8
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import io
+import logging
+import os
+import sys
+sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
+import requests
+from format_convert import _global
+from format_convert.utils import judge_error_code, log, namespace_to_dict, get_platform, file_lock
+
+os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
+import cv2
+import numpy as np
+import time
+import sys
+os.environ['FLAGS_eager_delete_tensor_gb'] = '0'
+import ocr.tools.infer.utility as utility
+from ocr.ppocr.utils.logging import get_logger
+from ocr.ppocr.utils.utility import get_image_file_list, check_and_read_gif
+from ocr.ppocr.data import create_operators, transform
+from ocr.ppocr.postprocess import build_post_process
+from format_convert.max_compute_config import max_compute
+
+import torch
+from torch import nn
+from ocr.tools.infer.torch_det_model import DB_ResNet_18
+import gc
+
+MAX_COMPUTE = max_compute
+logger = get_logger()
+
+
+class TextDetector(object):
+    shrink_memory_count = 0
+
+    def __init__(self, args):
+        self.args = args
+        self.det_algorithm = args.det_algorithm
+        pre_process_list = [{
+            'DetResizeForTest': None
+        }, {
+            'NormalizeImage': {
+                'std': [0.229, 0.224, 0.225],
+                'mean': [0.485, 0.456, 0.406],
+                'scale': '1./255.',
+                'order': 'hwc'
+            }
+        }, {
+            'ToCHWImage': None
+        }, {
+            'KeepKeys': {
+                'keep_keys': ['image', 'shape']
+            }
+        }]
+        postprocess_params = {}
+        if self.det_algorithm == "DB":
+            postprocess_params['name'] = 'DBPostProcess'
+            postprocess_params["thresh"] = args.det_db_thresh
+            postprocess_params["box_thresh"] = args.det_db_box_thresh
+            postprocess_params["max_candidates"] = 1000
+            postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio
+            postprocess_params["use_dilation"] = args.use_dilation
+        elif self.det_algorithm == "EAST":
+            postprocess_params['name'] = 'EASTPostProcess'
+            postprocess_params["score_thresh"] = args.det_east_score_thresh
+            postprocess_params["cover_thresh"] = args.det_east_cover_thresh
+            postprocess_params["nms_thresh"] = args.det_east_nms_thresh
+        elif self.det_algorithm == "SAST":
+            pre_process_list[0] = {
+                'DetResizeForTest': {
+                    'resize_long': args.det_limit_side_len
+                }
+            }
+            postprocess_params['name'] = 'SASTPostProcess'
+            postprocess_params["score_thresh"] = args.det_sast_score_thresh
+            postprocess_params["nms_thresh"] = args.det_sast_nms_thresh
+            self.det_sast_polygon = args.det_sast_polygon
+            if self.det_sast_polygon:
+                postprocess_params["sample_pts_num"] = 6
+                postprocess_params["expand_scale"] = 1.2
+                postprocess_params["shrink_ratio_of_width"] = 0.2
+            else:
+                postprocess_params["sample_pts_num"] = 2
+                postprocess_params["expand_scale"] = 1.0
+                postprocess_params["shrink_ratio_of_width"] = 0.3
+        else:
+            logger.info("unknown det_algorithm:{}".format(self.det_algorithm))
+            sys.exit(0)
+
+        self.preprocess_op = create_operators(pre_process_list)
+        self.postprocess_op = build_post_process(postprocess_params)
+
+        det_model_path = args.det_model_dir
+        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+        model = DB_ResNet_18()
+        mode_state_dict = torch.load(det_model_path, self.device)['state_dict']
+        if str(self.device) == 'cpu':  # cpu处理时精度调整,加速推理
+            for name, value in mode_state_dict.items():
+                if get_platform() != "Windows":
+                    value = value.double()
+                value = torch.where((value < 1.0e-23) & (value > 0.0), 1.0e-23, value)
+                value = torch.where((value > -1.0e-23) & (value < 0.0), -1.0e-23, value)
+                mode_state_dict[name] = value
+
+        model.load_state_dict(mode_state_dict)
+
+        self.predictor = model
+        self.predictor.to(self.device)
+        self.predictor.eval()
+
+        # self.predictor, self.input_tensor, self.output_tensors = utility.create_predictor(
+        #     args, 'det', logger)  # paddle.jit.load(args.det_model_dir)
+        # self.predictor.eval()
+
+    def order_points_clockwise(self, pts):
+        """
+        reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py
+        # sort the points based on their x-coordinates
+        """
+        xSorted = pts[np.argsort(pts[:, 0]), :]
+
+        # grab the left-most and right-most points from the sorted
+        # x-roodinate points
+        leftMost = xSorted[:2, :]
+        rightMost = xSorted[2:, :]
+
+        # now, sort the left-most coordinates according to their
+        # y-coordinates so we can grab the top-left and bottom-left
+        # points, respectively
+        leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
+        (tl, bl) = leftMost
+
+        rightMost = rightMost[np.argsort(rightMost[:, 1]), :]
+        (tr, br) = rightMost
+
+        rect = np.array([tl, tr, br, bl], dtype="float32")
+        return rect
+
+    def clip_det_res(self, points, img_height, img_width):
+        for pno in range(points.shape[0]):
+            points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
+            points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
+        return points
+
+    def filter_tag_det_res(self, dt_boxes, image_shape):
+        img_height, img_width = image_shape[0:2]
+        dt_boxes_new = []
+        for box in dt_boxes:
+            box = self.order_points_clockwise(box)
+            box = self.clip_det_res(box, img_height, img_width)
+            rect_width = int(np.linalg.norm(box[0] - box[1]))
+            rect_height = int(np.linalg.norm(box[0] - box[3]))
+            if rect_width <= 3 or rect_height <= 3:
+                continue
+            dt_boxes_new.append(box)
+        dt_boxes = np.array(dt_boxes_new)
+        return dt_boxes
+
+    def filter_tag_det_res_only_clip(self, dt_boxes, image_shape):
+        img_height, img_width = image_shape[0:2]
+        dt_boxes_new = []
+        for box in dt_boxes:
+            box = self.clip_det_res(box, img_height, img_width)
+            dt_boxes_new.append(box)
+        dt_boxes = np.array(dt_boxes_new)
+        return dt_boxes
+
+    def __call__(self, img):
+        # cv2.imwrite("/data2/znj/format_conversion_maxcompute/ocr/temp_image/temp.jpg",img)
+        ori_im = img.copy()
+        data = {'image': img}
+        data = transform(data, self.preprocess_op)
+        img, shape_list = data
+        if img is None:
+            return None, 0
+        img = np.expand_dims(img, axis=0)
+        shape_list = np.expand_dims(shape_list, axis=0)
+        img = img.copy()
+        starttime = time.time()
+
+        # self.input_tensor.copy_from_cpu(img)
+        img = torch.from_numpy(img).float()
+        img = img.to(self.device)
+        try:
+            # 加锁,防止太多大图片同时预测,爆显存
+            if ori_im.shape[0] > 1024 and ori_im.shape[1] > 1024 and get_platform() != "Windows" and not max_compute:
+                time2 = time.time()
+                lock_file_sub = 'ocr'
+                lock_file = os.path.abspath(os.path.dirname(__file__)) + "/" + lock_file_sub + ".lock"
+                f = file_lock(lock_file)
+                log("get file_lock " + lock_file_sub + " time " + str(time.time()-time2))
+                with torch.no_grad():
+                    out = self.predictor(img)
+                f.close()
+            else:
+                with torch.no_grad():
+                    out = self.predictor(img)
+        except RuntimeError:
+            log("ocr/tools/infer/predict_det.py predict.run error! maybe no gpu memory!")
+            log("predictor shrink memory!")
+            # self.predictor.clear_intermediate_tensor()
+            # self.predictor.try_shrink_memory()
+            if str(self.device)!='cpu':
+                torch.cuda.empty_cache()
+                gc.collect()
+            raise RuntimeError
+
+        # outputs = []
+        # for output_tensor in self.output_tensors:
+        #     output = output_tensor.copy_to_cpu()
+        #     outputs.append(output)
+        out = out.cpu().numpy()
+
+        preds = {}
+        preds['maps'] = out
+
+        # if self.det_algorithm == "EAST":
+        #     preds['f_geo'] = outputs[0]
+        #     preds['f_score'] = outputs[1]
+        # elif self.det_algorithm == 'SAST':
+        #     preds['f_border'] = outputs[0]
+        #     preds['f_score'] = outputs[1]
+        #     preds['f_tco'] = outputs[2]
+        #     preds['f_tvo'] = outputs[3]
+        # elif self.det_algorithm == 'DB':
+        #     preds['maps'] = outputs[0]
+        # else:
+        #     raise NotImplementedError
+        post_result = self.postprocess_op(preds, shape_list)
+        dt_boxes = post_result[0]['points']
+        if self.det_algorithm == "SAST" and self.det_sast_polygon:
+            dt_boxes = self.filter_tag_det_res_only_clip(dt_boxes, ori_im.shape)
+        else:
+            dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape)
+        elapse = time.time() - starttime
+
+        # 释放内存
+        # print("TextDetector", self.predictor)
+        # if TextDetector.shrink_memory_count % 100 == 0:
+            # print("TextDetector shrink memory")
+        # self.predictor.clear_intermediate_tensor()
+        # self.predictor.try_shrink_memory()
+        # TextDetector.shrink_memory_count += 1
+        if str(self.device) != 'cpu':
+            torch.cuda.empty_cache()
+            # gc.collect()
+
+        return dt_boxes, elapse
+
+
+class TextDetector2(object):
+    shrink_memory_count = 0
+
+    def __init__(self, args):
+        self.args = args
+        self.det_algorithm = args.det_algorithm
+        pre_process_list = [{
+            'DetResizeForTest': None
+        }, {
+            'NormalizeImage': {
+                'std': [0.229, 0.224, 0.225],
+                'mean': [0.485, 0.456, 0.406],
+                'scale': '1./255.',
+                'order': 'hwc'
+            }
+        }, {
+            'ToCHWImage': None
+        }, {
+            'KeepKeys': {
+                'keep_keys': ['image', 'shape']
+            }
+        }]
+        postprocess_params = {}
+        if self.det_algorithm == "DB":
+            postprocess_params['name'] = 'DBPostProcess'
+            postprocess_params["thresh"] = args.det_db_thresh
+            postprocess_params["box_thresh"] = args.det_db_box_thresh
+            postprocess_params["max_candidates"] = 1000
+            postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio
+            postprocess_params["use_dilation"] = args.use_dilation
+        else:
+            logger.info("unknown det_algorithm:{}".format(self.det_algorithm))
+            sys.exit(0)
+
+        self.preprocess_op = create_operators(pre_process_list)
+        self.postprocess_op = build_post_process(postprocess_params)
+
+    def order_points_clockwise(self, pts):
+        """
+        reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py
+        # sort the points based on their x-coordinates
+        """
+        xSorted = pts[np.argsort(pts[:, 0]), :]
+
+        # grab the left-most and right-most points from the sorted
+        # x-roodinate points
+        leftMost = xSorted[:2, :]
+        rightMost = xSorted[2:, :]
+
+        # now, sort the left-most coordinates according to their
+        # y-coordinates so we can grab the top-left and bottom-left
+        # points, respectively
+        leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
+        (tl, bl) = leftMost
+
+        rightMost = rightMost[np.argsort(rightMost[:, 1]), :]
+        (tr, br) = rightMost
+
+        rect = np.array([tl, tr, br, bl], dtype="float32")
+        return rect
+
+    def clip_det_res(self, points, img_height, img_width):
+        for pno in range(points.shape[0]):
+            points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
+            points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
+        return points
+
+    def filter_tag_det_res(self, dt_boxes, image_shape):
+        img_height, img_width = image_shape[0:2]
+        dt_boxes_new = []
+        for box in dt_boxes:
+            box = self.order_points_clockwise(box)
+            box = self.clip_det_res(box, img_height, img_width)
+            rect_width = int(np.linalg.norm(box[0] - box[1]))
+            rect_height = int(np.linalg.norm(box[0] - box[3]))
+            if rect_width <= 3 or rect_height <= 3:
+                continue
+            dt_boxes_new.append(box)
+        dt_boxes = np.array(dt_boxes_new)
+        return dt_boxes
+
+    def filter_tag_det_res_only_clip(self, dt_boxes, image_shape):
+        img_height, img_width = image_shape[0:2]
+        dt_boxes_new = []
+        for box in dt_boxes:
+            box = self.clip_det_res(box, img_height, img_width)
+            dt_boxes_new.append(box)
+        dt_boxes = np.array(dt_boxes_new)
+        return dt_boxes
+
+    def __call__(self, img):
+        from format_convert.convert_need_interface import from_gpu_interface_redis
+        # 预处理
+        ori_im = img.copy()
+        data = {'image': img}
+        data = transform(data, self.preprocess_op)
+        img, shape_list = data
+        if img is None:
+            return None, 0
+        img = np.expand_dims(img, axis=0)
+        shape_list = np.expand_dims(shape_list, axis=0)
+        img = img.copy()
+        starttime = time.time()
+
+        # # 压缩numpy
+        # compressed_array = io.BytesIO()
+        # np.savez_compressed(compressed_array, img)
+        # compressed_array.seek(0)
+        # img = compressed_array.read()
+
+        # 调用GPU接口
+        _dict = {"inputs": img, "args": str(namespace_to_dict(self.args)), "md5": _global.get("md5")}
+        result = from_gpu_interface_redis(_dict, model_type="ocr", predictor_type="det")
+        if judge_error_code(result):
+            logging.error("from_gpu_interface failed! " + str(result))
+            raise requests.exceptions.RequestException
+
+        _preds = result.get("preds")
+        gpu_time = result.get("gpu_time")
+
+        # # 解压numpy
+        # decompressed_array = io.BytesIO()
+        # decompressed_array.write(_preds)
+        # decompressed_array.seek(0)
+        # _preds = np.load(decompressed_array, allow_pickle=True)['arr_0']
+        # log("inputs.shape" + str(_preds.shape))
+
+        # 后处理
+        preds = {}
+        if self.det_algorithm == 'DB':
+            preds['maps'] = _preds
+        else:
+            raise NotImplementedError
+
+        post_result = self.postprocess_op(preds, shape_list)
+        dt_boxes = post_result[0]['points']
+        dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape)
+        elapse = time.time() - starttime
+
+        log("ocr model predict time - det - time " + str(gpu_time))
+        return dt_boxes, elapse
+
+
+if __name__ == "__main__":
+    args = utility.parse_args()
+    image_file_list = get_image_file_list(args.image_dir)
+    text_detector = TextDetector(args)
+    count = 0
+    total_time = 0
+    draw_img_save = "./inference_results"
+    if not os.path.exists(draw_img_save):
+        os.makedirs(draw_img_save)
+    for image_file in image_file_list:
+        img, flag = check_and_read_gif(image_file)
+        if not flag:
+            img = cv2.imread(image_file)
+        if img is None:
+            logger.info("error in loading image:{}".format(image_file))
+            continue
+        dt_boxes, elapse = text_detector(img)
+        if count > 0:
+            total_time += elapse
+        count += 1
+        logger.info("Predict time of {}: {}".format(image_file, elapse))
+        src_im = utility.draw_text_det_res(dt_boxes, image_file)
+        img_name_pure = os.path.split(image_file)[-1]
+        img_path = os.path.join(draw_img_save,
+                                "det_res_{}".format(img_name_pure))
+        cv2.imwrite(img_path, src_im)
+        logger.info("The visualized image saved in {}".format(img_path))
+    if count > 1:
+        logger.info("Avg Time: {}".format(total_time / (count - 1)))

+ 464 - 0
ocr/tools/infer/predict_rec_pytorch.py

@@ -0,0 +1,464 @@
+# encoding=utf8
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import io
+import logging
+import os
+import sys
+# __dir__ = os.path.dirname(os.path.abspath(__file__))
+import zlib
+
+import requests
+# sys.path.append(__dir__)
+# sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
+
+sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
+os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
+import cv2
+import numpy as np
+import math
+import time
+import traceback
+os.environ['FLAGS_eager_delete_tensor_gb'] = '0'
+import paddle
+
+import ocr.tools.infer.utility as utility
+from ocr.ppocr.postprocess import build_post_process
+from ocr.ppocr.utils.logging import get_logger
+from ocr.ppocr.utils.utility import get_image_file_list, check_and_read_gif
+
+from format_convert.utils import judge_error_code, log, namespace_to_dict,get_platform
+from format_convert import _global
+
+import torch
+from torch import nn
+from ocr.tools.infer.torch_rec_model import Rec_ResNet_34
+import gc
+
+logger = get_logger()
+
+
+class TextRecognizer(object):
+    shrink_memory_count = 0
+
+    def __init__(self, args):
+        self.rec_image_shape = [int(v) for v in args.rec_image_shape.split(",")]
+        self.character_type = args.rec_char_type
+        self.rec_batch_num = args.rec_batch_num
+        self.rec_algorithm = args.rec_algorithm
+        postprocess_params = {
+            'name': 'CTCLabelDecode',
+            "character_type": args.rec_char_type,
+            "character_dict_path": args.rec_char_dict_path,
+            # "use_space_char": args.use_space_char
+            "use_space_char": False
+        }
+        # if self.rec_algorithm == "SRN":
+        #     postprocess_params = {
+        #         'name': 'SRNLabelDecode',
+        #         "character_type": args.rec_char_type,
+        #         "character_dict_path": args.rec_char_dict_path,
+        #         "use_space_char": args.use_space_char
+        #     }
+        # elif self.rec_algorithm == "RARE":
+        #     postprocess_params = {
+        #         'name': 'AttnLabelDecode',
+        #         "character_type": args.rec_char_type,
+        #         "character_dict_path": args.rec_char_dict_path,
+        #         "use_space_char": args.use_space_char
+        #     }
+        self.postprocess_op = build_post_process(postprocess_params)
+        # self.predictor, self.input_tensor, self.output_tensors = \
+        #     utility.create_predictor(args, 'rec', logger)
+
+        rec_model_path = args.rec_model_dir
+        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+        model = Rec_ResNet_34()
+        mode_state_dict = torch.load(rec_model_path, self.device)['state_dict']
+        if str(self.device)=='cpu': # cpu处理时精度调整,加速推理
+            for name, value in mode_state_dict.items():
+                if get_platform() != "Windows":
+                    value = value.double()
+                value = torch.where((value < 1.0e-23) & (value > 0.0), 1.0e-23, value)
+                value = torch.where((value > -1.0e-23) & (value < 0.0), -1.0e-23, value)
+                mode_state_dict[name] = value
+
+        model.load_state_dict(mode_state_dict)
+
+        self.predictor = model
+        self.predictor.to(self.device)
+        self.predictor.eval()
+
+    def resize_norm_img(self, img, max_wh_ratio):
+        h, w = img.shape[:2]
+        imgC, imgH, imgW = self.rec_image_shape
+        assert imgC == img.shape[2]
+        # print('max_wh_ratio', max_wh_ratio)
+        if max_wh_ratio < 0.1:
+            # if h > imgW:
+            #     resized_image = cv2.resize(img, (w, imgW))
+            # else:
+            #     resized_image = img
+
+            # max_wh_ratio h是w的10倍,直接跳过
+            resized_w = None
+        else:
+            if self.character_type == "ch":
+                imgW = int((32 * max_wh_ratio))
+
+            ratio = w / float(h)
+            if math.ceil(imgH * ratio) > imgW:
+                resized_w = imgW
+            else:
+                resized_w = int(math.ceil(imgH * ratio))
+
+            try:
+                resized_image = cv2.resize(img, (resized_w, imgH))
+            except:
+                log("predict_rec.py resize_norm_img resize shape " + str((resized_w, imgH, imgW, h, w, ratio, max_wh_ratio)) + ' ' + str(self.rec_image_shape))
+                raise
+
+        resized_image = resized_image.astype('float32')
+        resized_image = resized_image.transpose((2, 0, 1)) / 255
+        resized_image -= 0.5
+        resized_image /= 0.5
+        padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
+        if resized_w is not None:
+            padding_im[:, :, 0:resized_w] = resized_image
+        return padding_im
+
+    def resize_norm_img_srn(self, img, image_shape):
+        imgC, imgH, imgW = image_shape
+
+        img_black = np.zeros((imgH, imgW))
+        im_hei = img.shape[0]
+        im_wid = img.shape[1]
+
+        if im_wid <= im_hei * 1:
+            img_new = cv2.resize(img, (imgH * 1, imgH))
+        elif im_wid <= im_hei * 2:
+            img_new = cv2.resize(img, (imgH * 2, imgH))
+        elif im_wid <= im_hei * 3:
+            img_new = cv2.resize(img, (imgH * 3, imgH))
+        else:
+            img_new = cv2.resize(img, (imgW, imgH))
+
+        img_np = np.asarray(img_new)
+        img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
+        img_black[:, 0:img_np.shape[1]] = img_np
+        img_black = img_black[:, :, np.newaxis]
+
+        row, col, c = img_black.shape
+        c = 1
+
+        return np.reshape(img_black, (c, row, col)).astype(np.float32)
+
+    def srn_other_inputs(self, image_shape, num_heads, max_text_length):
+
+        imgC, imgH, imgW = image_shape
+        feature_dim = int((imgH / 8) * (imgW / 8))
+
+        encoder_word_pos = np.array(range(0, feature_dim)).reshape(
+            (feature_dim, 1)).astype('int64')
+        gsrm_word_pos = np.array(range(0, max_text_length)).reshape(
+            (max_text_length, 1)).astype('int64')
+
+        gsrm_attn_bias_data = np.ones((1, max_text_length, max_text_length))
+        gsrm_slf_attn_bias1 = np.triu(gsrm_attn_bias_data, 1).reshape(
+            [-1, 1, max_text_length, max_text_length])
+        gsrm_slf_attn_bias1 = np.tile(
+            gsrm_slf_attn_bias1,
+            [1, num_heads, 1, 1]).astype('float32') * [-1e9]
+
+        gsrm_slf_attn_bias2 = np.tril(gsrm_attn_bias_data, -1).reshape(
+            [-1, 1, max_text_length, max_text_length])
+        gsrm_slf_attn_bias2 = np.tile(
+            gsrm_slf_attn_bias2,
+            [1, num_heads, 1, 1]).astype('float32') * [-1e9]
+
+        encoder_word_pos = encoder_word_pos[np.newaxis, :]
+        gsrm_word_pos = gsrm_word_pos[np.newaxis, :]
+
+        return [
+            encoder_word_pos, gsrm_word_pos, gsrm_slf_attn_bias1,
+            gsrm_slf_attn_bias2
+        ]
+
+    def process_image_srn(self, img, image_shape, num_heads, max_text_length):
+        norm_img = self.resize_norm_img_srn(img, image_shape)
+        norm_img = norm_img[np.newaxis, :]
+
+        [encoder_word_pos, gsrm_word_pos, gsrm_slf_attn_bias1, gsrm_slf_attn_bias2] = \
+            self.srn_other_inputs(image_shape, num_heads, max_text_length)
+
+        gsrm_slf_attn_bias1 = gsrm_slf_attn_bias1.astype(np.float32)
+        gsrm_slf_attn_bias2 = gsrm_slf_attn_bias2.astype(np.float32)
+        encoder_word_pos = encoder_word_pos.astype(np.int64)
+        gsrm_word_pos = gsrm_word_pos.astype(np.int64)
+
+        return (norm_img, encoder_word_pos, gsrm_word_pos, gsrm_slf_attn_bias1,
+                gsrm_slf_attn_bias2)
+
+    def __call__(self, img_list):
+        img_num = len(img_list)
+        # Calculate the aspect ratio of all text bars
+        width_list = []
+        i = 0
+        for img in img_list:
+            # cv2.imwrite('D:/myProject/format_conversion_maxcompute/ocr/test/'+str(i)+'.jpg',img)
+            # i+=1
+            # cv2.imshow('img', img)
+            # cv2.waitKey(1000)
+            width_list.append(img.shape[1] / float(img.shape[0]))
+        # Sorting can speed up the recognition process
+        indices = np.argsort(np.array(width_list))
+
+        # rec_res = []
+        rec_res = [['', 0.0]] * img_num
+        batch_num = self.rec_batch_num
+        elapse = 0
+        for beg_img_no in range(0, img_num, batch_num):
+            end_img_no = min(img_num, beg_img_no + batch_num)
+            norm_img_batch = []
+            max_wh_ratio = 0
+            for ino in range(beg_img_no, end_img_no):
+                # h, w = img_list[ino].shape[0:2]
+                h, w = img_list[indices[ino]].shape[0:2]
+                wh_ratio = w * 1.0 / h
+                max_wh_ratio = max(max_wh_ratio, wh_ratio)
+            # print('max_wh_ratio',max_wh_ratio)
+            for ino in range(beg_img_no, end_img_no):
+                if self.rec_algorithm != "SRN":
+                    # print('max_wh_ratio', max_wh_ratio)
+                    norm_img = self.resize_norm_img(img_list[indices[ino]],
+                                                    max_wh_ratio)
+                    # cv2.imshow('img', norm_img.transpose(1,2,0))
+                    # cv2.waitKey(1000)
+                    norm_img = norm_img[np.newaxis, :]
+                    norm_img_batch.append(norm_img)
+                else:
+                    # norm_img = self.process_image_srn(
+                    #     img_list[indices[ino]], self.rec_image_shape, 8, 25)
+                    # encoder_word_pos_list = []
+                    # gsrm_word_pos_list = []
+                    # gsrm_slf_attn_bias1_list = []
+                    # gsrm_slf_attn_bias2_list = []
+                    # encoder_word_pos_list.append(norm_img[1])
+                    # gsrm_word_pos_list.append(norm_img[2])
+                    # gsrm_slf_attn_bias1_list.append(norm_img[3])
+                    # gsrm_slf_attn_bias2_list.append(norm_img[4])
+                    # norm_img_batch.append(norm_img[0])
+                    pass
+            norm_img_batch = np.concatenate(norm_img_batch)
+            norm_img_batch = norm_img_batch.copy()
+
+            if self.rec_algorithm == "SRN":
+                # starttime = time.time()
+                # encoder_word_pos_list = np.concatenate(encoder_word_pos_list)
+                # gsrm_word_pos_list = np.concatenate(gsrm_word_pos_list)
+                # gsrm_slf_attn_bias1_list = np.concatenate(
+                #     gsrm_slf_attn_bias1_list)
+                # gsrm_slf_attn_bias2_list = np.concatenate(
+                #     gsrm_slf_attn_bias2_list)
+                #
+                # inputs = [
+                #     norm_img_batch,
+                #     encoder_word_pos_list,
+                #     gsrm_word_pos_list,
+                #     gsrm_slf_attn_bias1_list,
+                #     gsrm_slf_attn_bias2_list,
+                # ]
+                # input_names = self.predictor.get_input_names()
+                # for i in range(len(input_names)):
+                #     input_tensor = self.predictor.get_input_handle(input_names[
+                #         i])
+                #     input_tensor.copy_from_cpu(inputs[i])
+                # self.predictor.run()
+                # outputs = []
+                # for output_tensor in self.output_tensors:
+                #     output = output_tensor.copy_to_cpu()
+                #     outputs.append(output)
+                # preds = {"predict": outputs[2]}
+                pass
+            else:
+                starttime = time.time()
+
+                tensor = torch.from_numpy(norm_img_batch).float()
+                start_time = time.time()
+                tensor = tensor.to(self.device)
+                with torch.no_grad():
+                    out = self.predictor(tensor)
+                logging.info("ocr model predict time - rec" + str(time.time()-start_time))
+                out = out.cpu().numpy()
+                preds = out
+
+            # print("tools/infer/predict_rec preds", preds)
+            rec_result = self.postprocess_op(preds)
+            for rno in range(len(rec_result)):
+                # print("predict_rec", img_num, batch_num, beg_img_no,
+                #       indices[beg_img_no + rno], len(rec_res))
+                rec_res[indices[beg_img_no + rno]] = rec_result[rno]
+            elapse += time.time() - starttime
+            # 释放内存
+            # self.predictor.clear_intermediate_tensor()
+            # self.predictor.try_shrink_memory()
+
+            # gc.collect()
+            if str(self.device)!='cpu':
+                torch.cuda.empty_cache()
+            #     gc.collect()
+        return rec_res, elapse
+
+
+class TextRecognizer2(object):
+    shrink_memory_count = 0
+
+    def __init__(self, args):
+        self.rec_image_shape = [int(v) for v in args.rec_image_shape.split(",")]
+        self.character_type = args.rec_char_type
+        self.rec_batch_num = args.rec_batch_num
+        self.rec_algorithm = args.rec_algorithm
+        postprocess_params = {
+            'name': 'CTCLabelDecode',
+            "character_type": args.rec_char_type,
+            "character_dict_path": args.rec_char_dict_path,
+            "use_space_char": args.use_space_char
+        }
+        self.postprocess_op = build_post_process(postprocess_params)
+        self.args = args
+        # self.predictor, self.input_tensor, self.output_tensors = \
+        #     utility.create_predictor(args, 'rec', logger)
+
+    def resize_norm_img(self, img, max_wh_ratio):
+        imgC, imgH, imgW = self.rec_image_shape
+        assert imgC == img.shape[2]
+        if self.character_type == "ch":
+            imgW = int((32 * max_wh_ratio))
+        h, w = img.shape[:2]
+        ratio = w / float(h)
+        if math.ceil(imgH * ratio) > imgW:
+            resized_w = imgW
+        else:
+            resized_w = int(math.ceil(imgH * ratio))
+        # print("predict_rec.py resize_norm_img resize shape", (resized_w, imgH))
+        resized_image = cv2.resize(img, (resized_w, imgH))
+        resized_image = resized_image.astype('float32')
+        resized_image = resized_image.transpose((2, 0, 1)) / 255
+        resized_image -= 0.5
+        resized_image /= 0.5
+        padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
+        padding_im[:, :, 0:resized_w] = resized_image
+        return padding_im
+
+    def __call__(self, img_list):
+        from format_convert.convert_need_interface import from_gpu_interface_redis
+        img_num = len(img_list)
+        # Calculate the aspect ratio of all text bars
+        width_list = []
+        for img in img_list:
+            width_list.append(img.shape[1] / float(img.shape[0]))
+
+        # Sorting can speed up the recognition process
+        indices = np.argsort(np.array(width_list))
+
+        rec_res = [['', 0.0]] * img_num
+        batch_num = self.rec_batch_num
+        elapse = 0
+        all_gpu_time = 0
+        for beg_img_no in range(0, img_num, batch_num):
+            # 预处理
+            end_img_no = min(img_num, beg_img_no + batch_num)
+            norm_img_batch = []
+            max_wh_ratio = 0
+            for ino in range(beg_img_no, end_img_no):
+                h, w = img_list[indices[ino]].shape[0:2]
+                wh_ratio = w * 1.0 / h
+                max_wh_ratio = max(max_wh_ratio, wh_ratio)
+            for ino in range(beg_img_no, end_img_no):
+                norm_img = self.resize_norm_img(img_list[indices[ino]],
+                                                max_wh_ratio)
+                norm_img = norm_img[np.newaxis, :]
+                norm_img_batch.append(norm_img)
+            norm_img_batch = np.concatenate(norm_img_batch)
+            norm_img_batch = norm_img_batch.copy()
+            starttime = time.time()
+
+            # # 压缩numpy
+            # compressed_array = io.BytesIO()
+            # np.savez_compressed(compressed_array, norm_img_batch)
+            # compressed_array.seek(0)
+            # norm_img_batch = compressed_array.read()
+
+            # 调用GPU接口
+            _dict = {"inputs": norm_img_batch, "args": str(namespace_to_dict(self.args)), "md5": _global.get("md5")}
+            result = from_gpu_interface_redis(_dict, model_type="ocr", predictor_type="rec")
+            if judge_error_code(result):
+                logging.error("from_gpu_interface failed! " + str(result))
+                raise requests.exceptions.RequestException
+
+            preds = result.get("preds")
+            gpu_time = result.get("gpu_time")
+            all_gpu_time += round(gpu_time, 2)
+
+            # # 解压numpy
+            # decompressed_array = io.BytesIO()
+            # decompressed_array.write(preds)
+            # decompressed_array.seek(0)
+            # preds = np.load(decompressed_array, allow_pickle=True)['arr_0']
+            # log("inputs.shape" + str(preds.shape))
+
+            # 后处理
+            rec_result = self.postprocess_op(preds)
+            for rno in range(len(rec_result)):
+                rec_res[indices[beg_img_no + rno]] = rec_result[rno]
+            elapse += time.time() - starttime
+
+        log("ocr model predict time - rec - time " + str(all_gpu_time) + " - num " + str(img_num))
+        return rec_res, elapse
+
+
+def main(args):
+    image_file_list = get_image_file_list(args.image_dir)
+    text_recognizer = TextRecognizer(args)
+    valid_image_file_list = []
+    img_list = []
+    for image_file in image_file_list:
+        img, flag = check_and_read_gif(image_file)
+        if not flag:
+            img = cv2.imread(image_file)
+        if img is None:
+            logger.info("error in loading image:{}".format(image_file))
+            continue
+        valid_image_file_list.append(image_file)
+        img_list.append(img)
+    try:
+        rec_res, predict_time = text_recognizer(img_list)
+    except:
+        logger.info(traceback.format_exc())
+        logger.info(
+            "ERROR!!!! \n"
+            "Please read the FAQ:https://github.com/PaddlePaddle/PaddleOCR#faq \n"
+            "If your model has tps module:  "
+            "TPS does not support variable shape.\n"
+            "Please set --rec_image_shape='3,32,100' and --rec_char_type='en' ")
+        exit()
+    for ino in range(len(img_list)):
+        logger.info("Predicts of {}:{}".format(valid_image_file_list[ino],
+                                               rec_res[ino]))
+    logger.info("Total predict time for {} images, cost: {:.3f}".format(
+        len(img_list), predict_time))
+
+
+if __name__ == "__main__":
+    main(utility.parse_args())

+ 64 - 3
ocr/tools/infer/predict_system.py

@@ -28,12 +28,15 @@ import time
 from PIL import Image
 os.environ['FLAGS_eager_delete_tensor_gb'] = '0'
 import utility as utility
-import ocr.tools.infer.predict_rec as predict_rec
-import ocr.tools.infer.predict_det as predict_det
+# import ocr.tools.infer.predict_rec as predict_rec
+import ocr.tools.infer.predict_rec_pytorch as predict_rec # pytorch rec model
+# import ocr.tools.infer.predict_det as predict_det
+import ocr.tools.infer.predict_det_pytorch as predict_det # pytorch det model
 import ocr.tools.infer.predict_cls as predict_cls
 from ocr.ppocr.utils.utility import get_image_file_list, check_and_read_gif
 from ocr.ppocr.utils.logging import get_logger
 from ocr.tools.infer.utility import draw_ocr_box_txt
+from format_convert.utils import has_intersection
 
 logger = get_logger()
 
@@ -76,7 +79,8 @@ class TextSystem(object):
             borderMode=cv2.BORDER_REPLICATE,
             flags=cv2.INTER_CUBIC)
         dst_img_height, dst_img_width = dst_img.shape[0:2]
-        if dst_img_height * 1.0 / dst_img_width >= 1.5:
+        # if dst_img_height * 1.0 / dst_img_width >= 1.5:
+        if dst_img_height * 1.0 / dst_img_width >= 2.0:
             dst_img = np.rot90(dst_img)
         return dst_img
 
@@ -87,6 +91,8 @@ class TextSystem(object):
             logger.info(bno, rec_res[bno])
 
     def __call__(self, img):
+        # cv2.imshow('img',img)
+        # cv2.waitKey(0)
         ori_im = img.copy()
         dt_boxes, elapse = self.text_detector(img)
         logger.info("dt_boxes num : {}, elapse : {}".format(
@@ -112,6 +118,12 @@ class TextSystem(object):
             len(rec_res), elapse))
         # self.print_draw_crop_rec_res(img_crop_list, rec_res)
         filter_boxes, filter_rec_res = [], []
+
+        # dt_boxes 上下重合检测框修正
+        # t1 = time.time()
+        dt_boxes = boxex_points_fixup(dt_boxes)
+        # print("boxex_points_fixup cost:",time.time()-t1)
+
         for box, rec_reuslt in zip(dt_boxes, rec_res):
             text, score = rec_reuslt
             if score >= self.drop_score:
@@ -119,6 +131,53 @@ class TextSystem(object):
                 filter_rec_res.append(rec_reuslt)
         return filter_boxes, filter_rec_res
 
+def boxex_points_fixup(dt_boxes):
+    # 检查框全部转换为矩形
+    # for i in range(len(dt_boxes)):
+    #     box1 = dt_boxes[i]
+    #     x_list = [box1[0][0],box1[1][0],box1[2][0],box1[3][0]]
+    #     y_list = [box1[0][1],box1[1][1],box1[2][1],box1[3][1]]
+    #     x_max = max(x_list)
+    #     x_min = min(x_list)
+    #     y_max = max(y_list)
+    #     y_min = min(y_list)
+    #     dt_boxes[i] = np.array([[x_min,y_min],[x_max,y_min],[x_max,y_max],[x_min,y_max]])
+
+
+    for i in range(len(dt_boxes)):
+        box1 = dt_boxes[i]
+        box1_point3 = box1[2]
+        box1_point4 = box1[3] # 四边形底边的两点坐标
+        bottom_line = (min(box1_point3[0],box1_point4[0]),max(box1_point3[0],box1_point4[0]))
+        bottom_line_len = abs(bottom_line[1]-bottom_line[0])
+
+        for j in range(i+1,len(dt_boxes)):
+            box2 = dt_boxes[j]
+            box2_point1 = box2[0]
+            box2_point2 = box2[1] # 四边形顶边的两点坐标
+            top_line = (min(box2_point1[0], box2_point2[0]), max(box2_point1[0], box2_point2[0]))
+            top_line_len = abs(top_line[1]-top_line[0])
+            if has_intersection(box1, box2):  # 四边形框是否有交集
+                if not (min(top_line)>=max(bottom_line) or min(bottom_line)>=max(top_line)):  # x轴方向上有交集
+                    # 求重合部分y中间值
+                    mid_y = ((box2_point1[1] + box2_point2[1]) / 2 + (box1_point3[1] + box1_point4[1]) / 2) // 2
+                    if not mid_y:
+                        continue
+                    max_line_len = max(bottom_line_len,top_line_len)
+                    cross_line_len = bottom_line_len + top_line_len - \
+                                     (max(bottom_line[1],bottom_line[0],top_line[1],top_line[0]) - min(bottom_line[1],bottom_line[0],top_line[1],top_line[0]))
+                    # print(cross_line_len,max_line_len,cross_line_len/max_line_len)
+                    if cross_line_len/max_line_len>=0.55: # 重合比例
+                        box1[2] = [box1_point3[0],mid_y]
+                        box1[3] = [box1_point4[0],mid_y]
+                        box2[0] = [box2_point1[0],mid_y]
+                        box2[1] = [box2_point2[0],mid_y]
+                        break
+
+
+
+    return dt_boxes
+
 
 def sorted_boxes(dt_boxes):
     """
@@ -187,3 +246,5 @@ def main(args):
 
 if __name__ == "__main__":
     main(utility.parse_args())
+
+    pass

+ 358 - 0
ocr/tools/infer/torch_det_model.py

@@ -0,0 +1,358 @@
+import torch
+import torch.nn as nn
+import numpy as np
+import math
+import os
+from torch.nn import functional as F
+import torch.nn.init as init
+import logging
+
+
+class HSwish(nn.Module):
+    def forward(self, x):
+        out = x * F.relu6(x + 3, inplace=True) / 6
+        return out
+
+class ConvBNACT(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
+                              stride=stride, padding=padding, groups=groups,
+                              bias=False)
+        self.bn = nn.BatchNorm2d(out_channels)
+        if act == 'relu':
+            self.act = nn.ReLU(inplace=True)
+        elif act == 'hard_swish':
+            self.act = HSwish()
+        elif act is None:
+            self.act = None
+
+    def forward(self, x):
+        x = self.conv(x)
+        x = self.bn(x)
+        if self.act is not None:
+            x = self.act(x)
+        return x
+
+
+class ConvBNACTWithPool(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size, groups=1, act=None):
+        super().__init__()
+        # self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)
+        self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
+
+        self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,
+                              padding=(kernel_size - 1) // 2,
+                              groups=groups,
+                              bias=False)
+        self.bn = nn.BatchNorm2d(out_channels)
+        if act is None:
+            self.act = None
+        else:
+            self.act = nn.ReLU(inplace=True)
+
+    def forward(self, x):
+        x = self.pool(x)
+        x = self.conv(x)
+        x = self.bn(x)
+        if self.act is not None:
+            x = self.act(x)
+        return x
+
+
+class ShortCut(nn.Module):
+    def __init__(self, in_channels, out_channels, stride, name, if_first=False):
+        super().__init__()
+        assert name is not None, 'shortcut must have name'
+
+        self.name = name
+        if in_channels != out_channels or stride != 1:
+            if if_first:
+                self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,
+                                      padding=0, groups=1, act=None)
+            else:
+                self.conv = ConvBNACTWithPool(in_channels=in_channels, out_channels=out_channels, kernel_size=1,
+                                              groups=1, act=None)
+        elif if_first:
+            self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,
+                                  padding=0, groups=1, act=None)
+        else:
+            self.conv = None
+
+    def forward(self, x):
+        if self.conv is not None:
+            x = self.conv(x)
+        return x
+
+
+class BottleneckBlock(nn.Module):
+    def __init__(self, in_channels, out_channels, stride, if_first, name):
+        super().__init__()
+        assert name is not None, 'bottleneck must have name'
+        self.name = name
+        self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0,
+                               groups=1, act='relu')
+        self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride,
+                               padding=1, groups=1, act='relu')
+        self.conv2 = ConvBNACT(in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, stride=1,
+                               padding=0, groups=1, act=None)
+        self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels * 4, stride=stride,
+                                 if_first=if_first, name=f'{name}_branch1')
+        self.relu = nn.ReLU(inplace=True)
+        self.output_channels = out_channels * 4
+
+    def forward(self, x):
+        y = self.conv0(x)
+        y = self.conv1(y)
+        y = self.conv2(y)
+        y = y + self.shortcut(x)
+        return self.relu(y)
+
+
+class BasicBlock(nn.Module):
+    def __init__(self, in_channels, out_channels, stride, if_first, name):
+        super().__init__()
+        assert name is not None, 'block must have name'
+        self.name = name
+
+        self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride,
+                               padding=1, groups=1, act='relu')
+        self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1,
+                               groups=1, act=None)
+        self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels, stride=stride,
+                                 name=f'{name}_branch1', if_first=if_first, )
+        self.relu = nn.ReLU(inplace=True)
+        self.output_channels = out_channels
+
+    def forward(self, x):
+        y = self.conv0(x)
+        y = self.conv1(y)
+        y = y + self.shortcut(x)
+        return self.relu(y)
+
+class ResNet(nn.Module):
+    def __init__(self, in_channels, layers, out_indices=[0, 1, 2, 3], pretrained=True, **kwargs):
+        """
+        the Resnet backbone network for detection module.
+        Args:
+            params(dict): the super parameters for network build
+        """
+        super().__init__()
+        supported_layers = {
+            18: {'depth': [2, 2, 2, 2], 'block_class': BasicBlock},
+            34: {'depth': [3, 4, 6, 3], 'block_class': BasicBlock},
+            50: {'depth': [3, 4, 6, 3], 'block_class': BottleneckBlock},
+            101: {'depth': [3, 4, 23, 3], 'block_class': BottleneckBlock},
+            152: {'depth': [3, 8, 36, 3], 'block_class': BottleneckBlock},
+            200: {'depth': [3, 12, 48, 3], 'block_class': BottleneckBlock}
+        }
+        assert layers in supported_layers, \
+            "supported layers are {} but input layer is {}".format(supported_layers, layers)
+        depth = supported_layers[layers]['depth']
+        block_class = supported_layers[layers]['block_class']
+        self.use_supervised = kwargs.get('use_supervised', False)
+        self.out_indices = out_indices
+        num_filters = [64, 128, 256, 512]
+        self.conv1 = nn.Sequential(
+            ConvBNACT(in_channels=in_channels, out_channels=32, kernel_size=3, stride=2, padding=1, act='relu'),
+            ConvBNACT(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1, act='relu'),
+            ConvBNACT(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1, act='relu')
+        )
+        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+        self.stages = nn.ModuleList()
+        self.out_channels = []
+        tmp_channels = []
+        in_ch = 64
+        for block_index in range(len(depth)):
+            block_list = []
+            for i in range(depth[block_index]):
+                if layers >= 50:
+                    if layers in [101, 152, 200] and block_index == 2:
+                        if i == 0:
+                            conv_name = "res" + str(block_index + 2) + "a"
+                        else:
+                            conv_name = "res" + str(block_index + 2) + "b" + str(i)
+                    else:
+                        conv_name = "res" + str(block_index + 2) + chr(97 + i)
+                else:
+                    conv_name = f'res{str(block_index + 2)}{chr(97 + i)}'
+                block_list.append(block_class(in_channels=in_ch, out_channels=num_filters[block_index],
+                                              stride=2 if i == 0 and block_index != 0 else 1,
+                                              if_first=block_index == i == 0, name=conv_name))
+                in_ch = block_list[-1].output_channels
+            tmp_channels.append(in_ch)
+            self.stages.append(nn.Sequential(*block_list))
+        for idx, ch in enumerate(tmp_channels):
+            if idx in self.out_indices:
+                self.out_channels.append(ch)
+        if pretrained:
+            ckpt_path = f'./weights/resnet{layers}_vd.pth'
+            logger = logging.getLogger('torchocr')
+            if os.path.exists(ckpt_path):
+                logger.info('load imagenet weights')
+                self.load_state_dict(torch.load(ckpt_path))
+            else:
+                logger.info(f'{ckpt_path} not exists')
+        if self.use_supervised:
+            ckpt_path = f'./weights/res_supervised_140w_387e.pth'
+            logger = logging.getLogger('torchocr')
+            if os.path.exists(ckpt_path):
+                logger.info('load supervised weights')
+                self.load_state_dict(torch.load(ckpt_path))
+            else:
+                logger.info(f'{ckpt_path} not exists')
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.pool1(x)
+        out = []
+        for idx, stage in enumerate(self.stages):
+            x = stage(x)
+            if idx in self.out_indices:
+                out.append(x)
+        return out
+
+def weights_init(m):
+
+    if isinstance(m, nn.Conv2d):
+        init.kaiming_normal_(m.weight.data)
+        if m.bias is not None:
+            init.normal_(m.bias.data)
+    elif isinstance(m, nn.ConvTranspose2d):
+        init.kaiming_normal_(m.weight.data)
+        if m.bias is not None:
+            init.normal_(m.bias.data)
+    elif isinstance(m, nn.BatchNorm2d):
+        init.normal_(m.weight.data, mean=1, std=0.02)
+        init.constant_(m.bias.data, 0)
+
+class DB_fpn(nn.Module):
+    def __init__(self, in_channels, out_channels=256, **kwargs):
+        """
+        :param in_channels: 基础网络输出的维度
+        :param kwargs:
+        """
+        super().__init__()
+        inplace = True
+        self.out_channels = out_channels
+        # reduce layers
+        self.in2_conv = nn.Conv2d(in_channels[0], self.out_channels, kernel_size=1, bias=False)
+        self.in3_conv = nn.Conv2d(in_channels[1], self.out_channels, kernel_size=1, bias=False)
+        self.in4_conv = nn.Conv2d(in_channels[2], self.out_channels, kernel_size=1, bias=False)
+        self.in5_conv = nn.Conv2d(in_channels[3], self.out_channels, kernel_size=1, bias=False)
+        # Smooth layers
+        self.p5_conv = nn.Conv2d(self.out_channels, self.out_channels // 4, kernel_size=3, padding=1, bias=False)
+        self.p4_conv = nn.Conv2d(self.out_channels, self.out_channels // 4, kernel_size=3, padding=1, bias=False)
+        self.p3_conv = nn.Conv2d(self.out_channels, self.out_channels // 4, kernel_size=3, padding=1, bias=False)
+        self.p2_conv = nn.Conv2d(self.out_channels, self.out_channels // 4, kernel_size=3, padding=1, bias=False)
+
+        self.in2_conv.apply(weights_init)
+        self.in3_conv.apply(weights_init)
+        self.in4_conv.apply(weights_init)
+        self.in5_conv.apply(weights_init)
+        self.p5_conv.apply(weights_init)
+        self.p4_conv.apply(weights_init)
+        self.p3_conv.apply(weights_init)
+        self.p2_conv.apply(weights_init)
+
+    def _interpolate_add(self, x, y):
+        return F.interpolate(x, scale_factor=2) + y
+
+    def _interpolate_cat(self, p2, p3, p4, p5):
+        p3 = F.interpolate(p3, scale_factor=2)
+        p4 = F.interpolate(p4, scale_factor=4)
+        p5 = F.interpolate(p5, scale_factor=8)
+        return torch.cat([p5, p4, p3, p2], dim=1)
+
+    def forward(self, x):
+        c2, c3, c4, c5 = x
+        in5 = self.in5_conv(c5)
+        in4 = self.in4_conv(c4)
+        in3 = self.in3_conv(c3)
+        in2 = self.in2_conv(c2)
+
+        out4 = self._interpolate_add(in5, in4)
+        out3 = self._interpolate_add(out4, in3)
+        out2 = self._interpolate_add(out3, in2)
+
+        p5 = self.p5_conv(in5)
+        p4 = self.p4_conv(out4)
+        p3 = self.p3_conv(out3)
+        p2 = self.p2_conv(out2)
+
+        x = self._interpolate_cat(p2, p3, p4, p5)
+        return x
+
+class Head(nn.Module):
+    def __init__(self, in_channels):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels // 4, kernel_size=3, padding=1,
+                               bias=False)
+        # self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels // 4, kernel_size=5, padding=2,
+        #                        bias=False)
+        self.conv_bn1 = nn.BatchNorm2d(in_channels // 4)
+        self.relu = nn.ReLU(inplace=True)
+        self.conv2 = nn.ConvTranspose2d(in_channels=in_channels // 4, out_channels=in_channels // 4, kernel_size=2,
+                                        stride=2)
+        self.conv_bn2 = nn.BatchNorm2d(in_channels // 4)
+        self.conv3 = nn.ConvTranspose2d(in_channels=in_channels // 4, out_channels=1, kernel_size=2, stride=2)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.conv_bn1(x)
+        x = self.relu(x)
+        x = self.conv2(x)
+        x = self.conv_bn2(x)
+        x = self.relu(x)
+        x = self.conv3(x)
+        x = torch.sigmoid(x)
+        return x
+
+class DBHead(nn.Module):
+    """
+    Differentiable Binarization (DB) for text detection:
+        see https://arxiv.org/abs/1911.08947
+    args:
+        params(dict): super parameters for build DB network
+    """
+
+    def __init__(self, in_channels, k=50):
+        super().__init__()
+        self.k = k
+        self.binarize = Head(in_channels)
+        self.thresh = Head(in_channels)
+        self.binarize.apply(weights_init)
+        self.thresh.apply(weights_init)
+
+    def step_function(self, x, y):
+        return torch.reciprocal(1 + torch.exp(-self.k * (x - y)))
+
+    def forward(self, x):
+        shrink_maps = self.binarize(x)
+        if not self.training:
+            return shrink_maps
+        threshold_maps = self.thresh(x)
+        binary_maps = self.step_function(shrink_maps, threshold_maps)
+        y = torch.cat((shrink_maps, threshold_maps, binary_maps), dim=1)
+        return y
+
+class DB_ResNet_18(nn.Module):
+    def __init__(self, ):
+        super().__init__()
+
+        self.backbone = ResNet(in_channels=3,layers=18,pretrained=False)
+
+        self.neck = DB_fpn(in_channels=self.backbone.out_channels,out_channels=256)
+
+        self.head = DBHead(self.neck.out_channels)
+
+
+    def forward(self, x):
+        x = self.backbone(x)
+        x = self.neck(x)
+        x = self.head(x)
+        return x
+
+
+

+ 311 - 0
ocr/tools/infer/torch_rec_model.py

@@ -0,0 +1,311 @@
+import torch
+import torch.nn as nn
+import numpy as np
+import math
+from torch.nn import functional as F
+
+
+class HSwish(nn.Module):
+    def forward(self, x):
+        out = x * F.relu6(x + 3, inplace=True) / 6
+        return out
+
+class ConvBNACT(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
+                              stride=stride, padding=padding, groups=groups,
+                              bias=False)
+        self.bn = nn.BatchNorm2d(out_channels)
+        if act == 'relu':
+            self.act = nn.ReLU()
+        elif act == 'hard_swish':
+            self.act = HSwish()
+        elif act is None:
+            self.act = None
+
+    def forward(self, x):
+        x = self.conv(x)
+        x = self.bn(x)
+        if self.act is not None:
+            x = self.act(x)
+        return x
+
+
+class ConvBNACTWithPool(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size, stride=1, groups=1, act=None):
+        super().__init__()
+        self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, padding=0, ceil_mode=True)
+
+        self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,
+                              padding=(kernel_size - 1) // 2,
+                              groups=groups,
+                              bias=False)
+        self.bn = nn.BatchNorm2d(out_channels)
+        if act is None:
+            self.act = None
+        else:
+            self.act = nn.ReLU()
+
+    def forward(self, x):
+        x = self.pool(x)
+        x = self.conv(x)
+        x = self.bn(x)
+        if self.act is not None:
+            x = self.act(x)
+        return x
+
+
+class ShortCut(nn.Module):
+    def __init__(self, in_channels, out_channels, stride, name, if_first=False):
+        super().__init__()
+        assert name is not None, 'shortcut must have name'
+
+        self.name = name
+        if in_channels != out_channels or stride[0] != 1:
+            if if_first:
+                self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,
+                                      padding=0, groups=1, act=None)
+            else:
+                self.conv = ConvBNACTWithPool(in_channels=in_channels, out_channels=out_channels, kernel_size=1,
+                                              stride=stride, groups=1, act=None)
+        elif if_first:
+            self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,
+                                  padding=0, groups=1, act=None)
+        else:
+            self.conv = None
+
+
+    def forward(self, x):
+        if self.conv is not None:
+            x = self.conv(x)
+        return x
+
+
+class BasicBlock(nn.Module):
+    def __init__(self, in_channels, out_channels, stride, if_first, name):
+        super().__init__()
+        assert name is not None, 'block must have name'
+        self.name = name
+
+        self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride,
+                               padding=1, groups=1, act='relu')
+        self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1,
+                               groups=1, act=None)
+        self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels, stride=stride,
+                                 name=f'{name}_branch1', if_first=if_first, )
+        self.relu = nn.ReLU()
+        self.output_channels = out_channels
+
+    def forward(self, x):
+        y = self.conv0(x)
+        y = self.conv1(y)
+        y = y + self.shortcut(x)
+        return self.relu(y)
+
+
+class BottleneckBlock(nn.Module):
+    def __init__(self, in_channels, out_channels, stride, if_first, name):
+        super().__init__()
+        assert name is not None, 'bottleneck must have name'
+        self.name = name
+        self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0,
+                               groups=1, act='relu')
+        self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride,
+                               padding=1, groups=1, act='relu')
+        self.conv2 = ConvBNACT(in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, stride=1,
+                               padding=0, groups=1, act=None)
+        self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels * 4, stride=stride,
+                                 if_first=if_first, name=f'{name}_branch1')
+        self.relu = nn.ReLU()
+        self.output_channels = out_channels * 4
+
+    def forward(self, x):
+        y = self.conv0(x)
+        y = self.conv1(y)
+        y = self.conv2(y)
+        y = y + self.shortcut(x)
+        return self.relu(y)
+
+
+class ResNet(nn.Module):
+    def __init__(self, in_channels, layers, **kwargs):
+        super().__init__()
+        supported_layers = {
+            18: {'depth': [2, 2, 2, 2], 'block_class': BasicBlock},
+            34: {'depth': [3, 4, 6, 3], 'block_class': BasicBlock},
+            50: {'depth': [3, 4, 6, 3], 'block_class': BottleneckBlock},
+            101: {'depth': [3, 4, 23, 3], 'block_class': BottleneckBlock},
+            152: {'depth': [3, 8, 36, 3], 'block_class': BottleneckBlock},
+            200: {'depth': [3, 12, 48, 3], 'block_class': BottleneckBlock}
+        }
+        assert layers in supported_layers, "supported layers are {} but input layer is {}".format(supported_layers,
+                                                                                                  layers)
+
+        depth = supported_layers[layers]['depth']
+        block_class = supported_layers[layers]['block_class']
+
+        num_filters = [64, 128, 256, 512]
+        self.conv1 = nn.Sequential(
+            ConvBNACT(in_channels=in_channels, out_channels=32, kernel_size=3, stride=1, padding=1, act='relu'),
+            ConvBNACT(in_channels=32, out_channels=32, kernel_size=3, stride=1, act='relu', padding=1),
+            ConvBNACT(in_channels=32, out_channels=64, kernel_size=3, stride=1, act='relu', padding=1)
+        )
+
+        self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+        self.stages = nn.ModuleList()
+        in_ch = 64
+        for block_index in range(len(depth)):
+            block_list = []
+            for i in range(depth[block_index]):
+                if layers >= 50:
+                    if layers in [101, 152, 200] and block_index == 2:
+                        if i == 0:
+                            conv_name = "res" + str(block_index + 2) + "a"
+                        else:
+                            conv_name = "res" + str(block_index + 2) + "b" + str(i)
+                    else:
+                        conv_name = "res" + str(block_index + 2) + chr(97 + i)
+                else:
+                    conv_name = f'res{str(block_index + 2)}{chr(97 + i)}'
+                if i == 0 and block_index != 0:
+                    stride = (2, 1)
+                else:
+                    stride = (1, 1)
+                block_list.append(block_class(in_channels=in_ch, out_channels=num_filters[block_index],
+                                              stride=stride,
+                                              if_first=block_index == i == 0, name=conv_name))
+                in_ch = block_list[-1].output_channels
+            self.stages.append(nn.Sequential(*block_list))
+        self.out_channels = in_ch
+        self.out = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.pool1(x)
+        for stage in self.stages:
+            x = stage(x)
+        x = self.out(x)
+        return x
+
+
+class Im2Seq(nn.Module):
+    def __init__(self, in_channels, **kwargs):
+        super().__init__()
+        self.out_channels = in_channels
+
+    def forward(self, x):
+        B, C, H, W = x.shape
+        assert H == 1
+        x = x.reshape(B, C, H * W)
+        x = x.permute((0, 2, 1))
+        return x
+
+
+
+class CTCHead(nn.Module):
+    def __init__(self,
+                 in_channels=192,
+                 out_channels=6624,
+                 fc_decay=0.0004,
+                 mid_channels=None,
+                 return_feats=False,
+                 **kwargs):
+        super(CTCHead, self).__init__()
+        if mid_channels is None:
+            self.fc = nn.Linear(
+                in_channels,
+                out_channels)
+        else:
+            self.fc1 = nn.Linear(
+                in_channels,
+                mid_channels)
+            self.fc2 = nn.Linear(
+                mid_channels,
+                out_channels)
+        self.in_channels = in_channels
+        self.out_channels = out_channels
+        self.mid_channels = mid_channels
+        self.return_feats = return_feats
+        self.apply(self._init_weights)
+        print('---------model weight inits-----------')
+
+    def _init_weights(self, m):
+        if isinstance(m, nn.Linear):
+            stdv = 1.0 / math.sqrt(self.in_channels * 1.0)
+            nn.init.uniform_(m.weight, -stdv, stdv)
+            nn.init.uniform_(m.bias, -stdv, stdv)
+
+    def forward(self, x):
+        if self.mid_channels is None:
+            predicts = self.fc(x)
+        else:
+            x = self.fc1(x)
+            predicts = self.fc2(x)
+
+        if self.return_feats:
+            result = (predicts, x)
+        else:
+            result = (predicts, None)
+        return result[0]
+
+
+class EncoderWithRNN(nn.Module):
+    def __init__(self, in_channels,**kwargs):
+        super(EncoderWithRNN, self).__init__()
+        hidden_size = kwargs.get('hidden_size', 256)
+        self.out_channels = hidden_size * 2
+        self.lstm = nn.LSTM(in_channels, hidden_size, bidirectional=True, num_layers=2,batch_first=True)
+
+    def forward(self, x):
+        self.lstm.flatten_parameters()
+        x, _ = self.lstm(x)
+        return x
+
+class SequenceEncoder(nn.Module):
+    def __init__(self, in_channels, encoder_type='rnn',  **kwargs):
+        super(SequenceEncoder, self).__init__()
+        self.encoder_reshape = Im2Seq(in_channels)
+        self.out_channels = self.encoder_reshape.out_channels
+        if encoder_type == 'reshape':
+            self.only_reshape = True
+        else:
+            support_encoder_dict = {
+                'reshape': Im2Seq,
+                'rnn': EncoderWithRNN
+            }
+            assert encoder_type in support_encoder_dict, '{} must in {}'.format(
+                encoder_type, support_encoder_dict.keys())
+
+            self.encoder = support_encoder_dict[encoder_type](
+                self.encoder_reshape.out_channels,**kwargs)
+            self.out_channels = self.encoder.out_channels
+            self.only_reshape = False
+
+    def forward(self, x):
+        x = self.encoder_reshape(x)
+        if not self.only_reshape:
+            x = self.encoder(x)
+
+        return x
+
+
+class Rec_ResNet_34(nn.Module):
+    def __init__(self,class_nums=None):
+        super(Rec_ResNet_34, self).__init__()
+        self.backbone = ResNet(in_channels=3,layers=34)
+        hidden_size = 256
+        self.neck = SequenceEncoder(in_channels=512,encoder_type='rnn',hidden_size=hidden_size)
+        if class_nums:
+            self.class_nums = class_nums
+        else:
+            self.class_nums = 7551
+        self.head = CTCHead(in_channels=hidden_size*2,out_channels=self.class_nums + 1,mid_channels=None)
+        # self.head = CTCHead(in_channels=2304,out_channels=7546,mid_channels=200)
+
+    def forward(self, x):
+        x = self.backbone(x)
+        x = self.neck(x)
+        x = self.head(x)
+        return x