utility.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import argparse
  15. import logging
  16. import os
  17. import sys
  18. import time
  19. sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
  20. import cv2
  21. import numpy as np
  22. import json
  23. from PIL import Image, ImageDraw, ImageFont
  24. import math
  25. os.environ['FLAGS_eager_delete_tensor_gb'] = '0'
  26. from paddle import inference
  27. def parse_args(return_parse=False):
  28. def str2bool(v):
  29. return v.lower() in ("true", "t", "1")
  30. parser = argparse.ArgumentParser()
  31. # params for prediction engine
  32. parser.add_argument("--use_gpu", type=str2bool, default=True)
  33. parser.add_argument("--ir_optim", type=str2bool, default=True)
  34. parser.add_argument("--use_tensorrt", type=str2bool, default=False)
  35. parser.add_argument("--use_fp16", type=str2bool, default=False)
  36. parser.add_argument("--gpu_mem", type=int, default=500)
  37. # params for text detector
  38. parser.add_argument("--image_dir", type=str)
  39. parser.add_argument("--det_algorithm", type=str, default='DB')
  40. parser.add_argument("--det_model_dir", type=str)
  41. parser.add_argument("--det_limit_side_len", type=float, default=960)
  42. parser.add_argument("--det_limit_type", type=str, default='max')
  43. # DB parmas
  44. parser.add_argument("--det_db_thresh", type=float, default=0.3)
  45. parser.add_argument("--det_db_box_thresh", type=float, default=0.5)
  46. parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6)
  47. parser.add_argument("--max_batch_size", type=int, default=10)
  48. parser.add_argument("--use_dilation", type=bool, default=False)
  49. # EAST parmas
  50. parser.add_argument("--det_east_score_thresh", type=float, default=0.8)
  51. parser.add_argument("--det_east_cover_thresh", type=float, default=0.1)
  52. parser.add_argument("--det_east_nms_thresh", type=float, default=0.2)
  53. # SAST parmas
  54. parser.add_argument("--det_sast_score_thresh", type=float, default=0.5)
  55. parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2)
  56. parser.add_argument("--det_sast_polygon", type=bool, default=False)
  57. # params for text recognizer
  58. parser.add_argument("--rec_algorithm", type=str, default='CRNN')
  59. parser.add_argument("--rec_model_dir", type=str)
  60. parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320")
  61. parser.add_argument("--rec_char_type", type=str, default='ch')
  62. parser.add_argument("--rec_batch_num", type=int, default=6)
  63. parser.add_argument("--max_text_length", type=int, default=25)
  64. parser.add_argument(
  65. "--rec_char_dict_path",
  66. type=str,
  67. default="./ppocr/utils/ppocr_keys_v1.txt")
  68. parser.add_argument("--use_space_char", type=str2bool, default=True)
  69. parser.add_argument(
  70. "--vis_font_path", type=str, default="./doc/fonts/simfang.ttf")
  71. parser.add_argument("--drop_score", type=float, default=0.5)
  72. # params for text classifier
  73. parser.add_argument("--use_angle_cls", type=str2bool, default=False)
  74. parser.add_argument("--cls_model_dir", type=str)
  75. parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192")
  76. parser.add_argument("--label_list", type=list, default=['0', '180'])
  77. parser.add_argument("--cls_batch_num", type=int, default=6)
  78. parser.add_argument("--cls_thresh", type=float, default=0.9)
  79. parser.add_argument("--enable_mkldnn", type=str2bool, default=False)
  80. parser.add_argument("--use_pdserving", type=str2bool, default=False)
  81. if return_parse:
  82. return parser
  83. return parser.parse_args()
  84. def create_predictor(args, mode, logger):
  85. if mode == "det":
  86. model_dir = args.det_model_dir
  87. elif mode == 'cls':
  88. model_dir = args.cls_model_dir
  89. else:
  90. model_dir = args.rec_model_dir
  91. if model_dir is None:
  92. logger.info("not find {} model file path {}".format(mode, model_dir))
  93. sys.exit(0)
  94. model_file_path = model_dir + "/inference.pdmodel"
  95. params_file_path = model_dir + "/inference.pdiparams"
  96. if not os.path.exists(model_file_path):
  97. logger.info("not find model file path {}".format(model_file_path))
  98. sys.exit(0)
  99. if not os.path.exists(params_file_path):
  100. logger.info("not find params file path {}".format(params_file_path))
  101. sys.exit(0)
  102. config = inference.Config(model_file_path, params_file_path)
  103. if args.use_gpu:
  104. config.enable_use_gpu(args.gpu_mem, 0)
  105. if args.use_tensorrt:
  106. config.enable_tensorrt_engine(
  107. precision_mode=inference.PrecisionType.Half
  108. if args.use_fp16 else inference.PrecisionType.Float32,
  109. max_batch_size=args.max_batch_size)
  110. else:
  111. config.disable_gpu()
  112. config.set_cpu_math_library_num_threads(1)
  113. if args.enable_mkldnn:
  114. # cache 10 different shapes for mkldnn to avoid memory leak
  115. config.set_mkldnn_cache_capacity(1)
  116. config.enable_mkldnn()
  117. # TODO LDOUBLEV: fix mkldnn bug when bach_size > 1
  118. #config.set_mkldnn_op({'conv2d', 'depthwise_conv2d', 'pool2d', 'batch_norm'})
  119. args.rec_batch_num = 1
  120. # config.disable_gpu()
  121. # config.enable_use_gpu(args.gpu_mem, 0)
  122. config.enable_memory_optim()
  123. config.set_cpu_math_library_num_threads(1)
  124. config.disable_glog_info()
  125. config.switch_use_feed_fetch_ops(False)
  126. config.switch_specify_input_names(True)
  127. #
  128. config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
  129. config.delete_pass('conv_elementwise_add_act_fuse_pass')
  130. config.delete_pass('conv_elementwise_add2_act_fuse_pass')
  131. # config.switch_use_feed_fetch_ops(False)
  132. # create predictor
  133. predictor = inference.create_predictor(config)
  134. input_names = predictor.get_input_names()
  135. for name in input_names:
  136. input_tensor = predictor.get_input_handle(name)
  137. output_names = predictor.get_output_names()
  138. output_tensors = []
  139. for output_name in output_names:
  140. output_tensor = predictor.get_output_handle(output_name)
  141. output_tensors.append(output_tensor)
  142. return predictor, input_tensor, output_tensors
  143. def draw_text_det_res(dt_boxes, img_path):
  144. src_im = cv2.imread(img_path)
  145. for box in dt_boxes:
  146. box = np.array(box).astype(np.int32).reshape(-1, 2)
  147. cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)
  148. return src_im
  149. def resize_img(img, input_size=600):
  150. """
  151. resize img and limit the longest side of the image to input_size
  152. """
  153. img = np.array(img)
  154. im_shape = img.shape
  155. im_size_max = np.max(im_shape[0:2])
  156. im_scale = float(input_size) / float(im_size_max)
  157. img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
  158. return img
  159. def draw_ocr(image,
  160. boxes,
  161. txts=None,
  162. scores=None,
  163. drop_score=0.5,
  164. font_path="./doc/simfang.ttf"):
  165. """
  166. Visualize the results of OCR detection and recognition
  167. args:
  168. image(Image|array): RGB image
  169. boxes(list): boxes with shape(N, 4, 2)
  170. txts(list): the texts
  171. scores(list): txxs corresponding scores
  172. drop_score(float): only scores greater than drop_threshold will be visualized
  173. font_path: the path of font which is used to draw text
  174. return(array):
  175. the visualized img
  176. """
  177. if scores is None:
  178. scores = [1] * len(boxes)
  179. box_num = len(boxes)
  180. for i in range(box_num):
  181. if scores is not None and (scores[i] < drop_score or
  182. math.isnan(scores[i])):
  183. continue
  184. box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
  185. image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
  186. if txts is not None:
  187. img = np.array(resize_img(image, input_size=600))
  188. txt_img = text_visual(
  189. txts,
  190. scores,
  191. img_h=img.shape[0],
  192. img_w=600,
  193. threshold=drop_score,
  194. font_path=font_path)
  195. img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
  196. return img
  197. return image
  198. def draw_ocr_box_txt(image,
  199. boxes,
  200. txts,
  201. scores=None,
  202. drop_score=0.5,
  203. font_path="./doc/simfang.ttf"):
  204. h, w = image.height, image.width
  205. img_left = image.copy()
  206. img_right = Image.new('RGB', (w, h), (255, 255, 255))
  207. import random
  208. random.seed(0)
  209. draw_left = ImageDraw.Draw(img_left)
  210. draw_right = ImageDraw.Draw(img_right)
  211. for idx, (box, txt) in enumerate(zip(boxes, txts)):
  212. if scores is not None and scores[idx] < drop_score:
  213. continue
  214. color = (random.randint(0, 255), random.randint(0, 255),
  215. random.randint(0, 255))
  216. draw_left.polygon(box, fill=color)
  217. draw_right.polygon(
  218. [
  219. box[0][0], box[0][1], box[1][0], box[1][1], box[2][0],
  220. box[2][1], box[3][0], box[3][1]
  221. ],
  222. outline=color)
  223. box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][
  224. 1])**2)
  225. box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][
  226. 1])**2)
  227. if box_height > 2 * box_width:
  228. font_size = max(int(box_width * 0.9), 10)
  229. font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
  230. cur_y = box[0][1]
  231. for c in txt:
  232. char_size = font.getsize(c)
  233. draw_right.text(
  234. (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font)
  235. cur_y += char_size[1]
  236. else:
  237. font_size = max(int(box_height * 0.8), 10)
  238. font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
  239. draw_right.text(
  240. [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font)
  241. img_left = Image.blend(image, img_left, 0.5)
  242. img_show = Image.new('RGB', (w * 2, h), (255, 255, 255))
  243. img_show.paste(img_left, (0, 0, w, h))
  244. img_show.paste(img_right, (w, 0, w * 2, h))
  245. return np.array(img_show)
  246. def str_count(s):
  247. """
  248. Count the number of Chinese characters,
  249. a single English character and a single number
  250. equal to half the length of Chinese characters.
  251. args:
  252. s(string): the input of string
  253. return(int):
  254. the number of Chinese characters
  255. """
  256. import string
  257. count_zh = count_pu = 0
  258. s_len = len(s)
  259. en_dg_count = 0
  260. for c in s:
  261. if c in string.ascii_letters or c.isdigit() or c.isspace():
  262. en_dg_count += 1
  263. elif c.isalpha():
  264. count_zh += 1
  265. else:
  266. count_pu += 1
  267. return s_len - math.ceil(en_dg_count / 2)
  268. def text_visual(texts,
  269. scores,
  270. img_h=400,
  271. img_w=600,
  272. threshold=0.,
  273. font_path="./doc/simfang.ttf"):
  274. """
  275. create new blank img and draw txt on it
  276. args:
  277. texts(list): the text will be draw
  278. scores(list|None): corresponding score of each txt
  279. img_h(int): the height of blank img
  280. img_w(int): the width of blank img
  281. font_path: the path of font which is used to draw text
  282. return(array):
  283. """
  284. if scores is not None:
  285. assert len(texts) == len(
  286. scores), "The number of txts and corresponding scores must match"
  287. def create_blank_img():
  288. blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255
  289. blank_img[:, img_w - 1:] = 0
  290. blank_img = Image.fromarray(blank_img).convert("RGB")
  291. draw_txt = ImageDraw.Draw(blank_img)
  292. return blank_img, draw_txt
  293. blank_img, draw_txt = create_blank_img()
  294. font_size = 20
  295. txt_color = (0, 0, 0)
  296. font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
  297. gap = font_size + 5
  298. txt_img_list = []
  299. count, index = 1, 0
  300. for idx, txt in enumerate(texts):
  301. index += 1
  302. if scores[idx] < threshold or math.isnan(scores[idx]):
  303. index -= 1
  304. continue
  305. first_line = True
  306. while str_count(txt) >= img_w // font_size - 4:
  307. tmp = txt
  308. txt = tmp[:img_w // font_size - 4]
  309. if first_line:
  310. new_txt = str(index) + ': ' + txt
  311. first_line = False
  312. else:
  313. new_txt = ' ' + txt
  314. draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
  315. txt = tmp[img_w // font_size - 4:]
  316. if count >= img_h // gap - 1:
  317. txt_img_list.append(np.array(blank_img))
  318. blank_img, draw_txt = create_blank_img()
  319. count = 0
  320. count += 1
  321. if first_line:
  322. new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx])
  323. else:
  324. new_txt = " " + txt + " " + '%.3f' % (scores[idx])
  325. draw_txt.text((0, gap * count), new_txt, txt_color, font=font)
  326. # whether add new blank img or not
  327. if count >= img_h // gap - 1 and idx + 1 < len(texts):
  328. txt_img_list.append(np.array(blank_img))
  329. blank_img, draw_txt = create_blank_img()
  330. count = 0
  331. count += 1
  332. txt_img_list.append(np.array(blank_img))
  333. if len(txt_img_list) == 1:
  334. blank_img = np.array(txt_img_list[0])
  335. else:
  336. blank_img = np.concatenate(txt_img_list, axis=1)
  337. return np.array(blank_img)
  338. def base64_to_cv2(b64str):
  339. import base64
  340. data = base64.b64decode(b64str.encode('utf8'))
  341. data = np.fromstring(data, np.uint8)
  342. data = cv2.imdecode(data, cv2.IMREAD_COLOR)
  343. return data
  344. def draw_boxes(image, boxes, scores=None, drop_score=0.5):
  345. if scores is None:
  346. scores = [1] * len(boxes)
  347. for (box, score) in zip(boxes, scores):
  348. if score < drop_score:
  349. continue
  350. box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64)
  351. image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
  352. return image
  353. if __name__ == '__main__':
  354. test_img = "./doc/test_v2"
  355. predict_txt = "./doc/predict.txt"
  356. f = open(predict_txt, 'r')
  357. data = f.readlines()
  358. img_path, anno = data[0].strip().split('\t')
  359. img_name = os.path.basename(img_path)
  360. img_path = os.path.join(test_img, img_name)
  361. image = Image.open(img_path)
  362. data = json.loads(anno)
  363. boxes, txts, scores = [], [], []
  364. for dic in data:
  365. boxes.append(dic['points'])
  366. txts.append(dic['transcription'])
  367. scores.append(round(dic['scores'], 3))
  368. new_img = draw_ocr(image, boxes, txts, scores)
  369. cv2.imwrite(img_name, new_img)