predict_system.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import sys
  16. __dir__ = os.path.dirname(os.path.abspath(__file__))
  17. sys.path.append(__dir__)
  18. sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
  19. sys.path.append(os.path.abspath(os.path.join(__dir__, '../../..')))
  20. os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
  21. # print("sys.path", sys.path)
  22. import cv2
  23. import copy
  24. import numpy as np
  25. import time
  26. from PIL import Image
  27. os.environ['FLAGS_eager_delete_tensor_gb'] = '0'
  28. import utility as utility
  29. import ocr.tools.infer.predict_rec as predict_rec
  30. import ocr.tools.infer.predict_det as predict_det
  31. import ocr.tools.infer.predict_cls as predict_cls
  32. from ocr.ppocr.utils.utility import get_image_file_list, check_and_read_gif
  33. from ocr.ppocr.utils.logging import get_logger
  34. from ocr.tools.infer.utility import draw_ocr_box_txt
  35. logger = get_logger()
  36. class TextSystem(object):
  37. def __init__(self, args):
  38. self.text_detector = predict_det.TextDetector(args)
  39. self.text_recognizer = predict_rec.TextRecognizer(args)
  40. self.use_angle_cls = args.use_angle_cls
  41. self.drop_score = args.drop_score
  42. if self.use_angle_cls:
  43. self.text_classifier = predict_cls.TextClassifier(args)
  44. def get_rotate_crop_image(self, img, points):
  45. '''
  46. img_height, img_width = img.shape[0:2]
  47. left = int(np.min(points[:, 0]))
  48. right = int(np.max(points[:, 0]))
  49. top = int(np.min(points[:, 1]))
  50. bottom = int(np.max(points[:, 1]))
  51. img_crop = img[top:bottom, left:right, :].copy()
  52. points[:, 0] = points[:, 0] - left
  53. points[:, 1] = points[:, 1] - top
  54. '''
  55. img_crop_width = int(
  56. max(
  57. np.linalg.norm(points[0] - points[1]),
  58. np.linalg.norm(points[2] - points[3])))
  59. img_crop_height = int(
  60. max(
  61. np.linalg.norm(points[0] - points[3]),
  62. np.linalg.norm(points[1] - points[2])))
  63. pts_std = np.float32([[0, 0], [img_crop_width, 0],
  64. [img_crop_width, img_crop_height],
  65. [0, img_crop_height]])
  66. M = cv2.getPerspectiveTransform(points, pts_std)
  67. dst_img = cv2.warpPerspective(
  68. img,
  69. M, (img_crop_width, img_crop_height),
  70. borderMode=cv2.BORDER_REPLICATE,
  71. flags=cv2.INTER_CUBIC)
  72. dst_img_height, dst_img_width = dst_img.shape[0:2]
  73. if dst_img_height * 1.0 / dst_img_width >= 1.5:
  74. dst_img = np.rot90(dst_img)
  75. return dst_img
  76. def print_draw_crop_rec_res(self, img_crop_list, rec_res):
  77. bbox_num = len(img_crop_list)
  78. for bno in range(bbox_num):
  79. cv2.imwrite("./output/img_crop_%d.jpg" % bno, img_crop_list[bno])
  80. logger.info(bno, rec_res[bno])
  81. def __call__(self, img):
  82. ori_im = img.copy()
  83. dt_boxes, elapse = self.text_detector(img)
  84. logger.info("dt_boxes num : {}, elapse : {}".format(
  85. len(dt_boxes), elapse))
  86. if dt_boxes is None:
  87. return None, None
  88. img_crop_list = []
  89. dt_boxes = sorted_boxes(dt_boxes)
  90. for bno in range(len(dt_boxes)):
  91. tmp_box = copy.deepcopy(dt_boxes[bno])
  92. img_crop = self.get_rotate_crop_image(ori_im, tmp_box)
  93. img_crop_list.append(img_crop)
  94. if self.use_angle_cls:
  95. img_crop_list, angle_list, elapse = self.text_classifier(
  96. img_crop_list)
  97. logger.info("cls num : {}, elapse : {}".format(
  98. len(img_crop_list), elapse))
  99. rec_res, elapse = self.text_recognizer(img_crop_list)
  100. logger.info("rec_res num : {}, elapse : {}".format(
  101. len(rec_res), elapse))
  102. # self.print_draw_crop_rec_res(img_crop_list, rec_res)
  103. filter_boxes, filter_rec_res = [], []
  104. for box, rec_reuslt in zip(dt_boxes, rec_res):
  105. text, score = rec_reuslt
  106. if score >= self.drop_score:
  107. filter_boxes.append(box)
  108. filter_rec_res.append(rec_reuslt)
  109. return filter_boxes, filter_rec_res
  110. def sorted_boxes(dt_boxes):
  111. """
  112. Sort text boxes in order from top to bottom, left to right
  113. args:
  114. dt_boxes(array):detected text boxes with shape [4, 2]
  115. return:
  116. sorted boxes(array) with shape [4, 2]
  117. """
  118. num_boxes = dt_boxes.shape[0]
  119. sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
  120. _boxes = list(sorted_boxes)
  121. for i in range(num_boxes - 1):
  122. if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \
  123. (_boxes[i + 1][0][0] < _boxes[i][0][0]):
  124. tmp = _boxes[i]
  125. _boxes[i] = _boxes[i + 1]
  126. _boxes[i + 1] = tmp
  127. return _boxes
  128. def main(args):
  129. image_file_list = get_image_file_list(args.image_dir)
  130. text_sys = TextSystem(args)
  131. is_visualize = True
  132. font_path = args.vis_font_path
  133. drop_score = args.drop_score
  134. for image_file in image_file_list:
  135. img, flag = check_and_read_gif(image_file)
  136. if not flag:
  137. img = cv2.imread(image_file)
  138. if img is None:
  139. logger.info("error in loading image:{}".format(image_file))
  140. continue
  141. starttime = time.time()
  142. dt_boxes, rec_res = text_sys(img)
  143. elapse = time.time() - starttime
  144. logger.info("Predict time of %s: %.3fs" % (image_file, elapse))
  145. for text, score in rec_res:
  146. logger.info("{}, {:.3f}".format(text, score))
  147. if is_visualize:
  148. image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
  149. boxes = dt_boxes
  150. txts = [rec_res[i][0] for i in range(len(rec_res))]
  151. scores = [rec_res[i][1] for i in range(len(rec_res))]
  152. draw_img = draw_ocr_box_txt(
  153. image,
  154. boxes,
  155. txts,
  156. scores,
  157. drop_score=drop_score,
  158. font_path=font_path)
  159. draw_img_save = "./inference_results/"
  160. if not os.path.exists(draw_img_save):
  161. os.makedirs(draw_img_save)
  162. cv2.imwrite(
  163. os.path.join(draw_img_save, os.path.basename(image_file)),
  164. draw_img[:, :, ::-1])
  165. logger.info("The visualized image saved in {}".format(
  166. os.path.join(draw_img_save, os.path.basename(image_file))))
  167. if __name__ == "__main__":
  168. main(utility.parse_args())