convert_image.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. import logging
  2. import os
  3. import sys
  4. sys.path.append(os.path.dirname(__file__) + "/../")
  5. from pdfminer.layout import LTLine
  6. import traceback
  7. import cv2
  8. from format_convert import get_memory_info
  9. from format_convert.utils import judge_error_code, add_div, LineTable, get_table_html
  10. from format_convert.table_correct import get_rotated_image
  11. from format_convert.convert_need_interface import from_otr_interface, from_ocr_interface
  12. def image_process(image_np, image_path, is_from_pdf, use_ocr=True):
  13. from format_convert.convert_tree import _Table, _Sentence
  14. def get_cluster(t_list, b_list, axis):
  15. zip_list = list(zip(t_list, b_list))
  16. if len(zip_list) == 0:
  17. return t_list, b_list
  18. if len(zip_list[0]) > 0:
  19. zip_list.sort(key=lambda x: x[1][axis][1])
  20. cluster_list = []
  21. margin = 5
  22. for text, bbox in zip_list:
  23. _find = 0
  24. for cluster in cluster_list:
  25. if abs(cluster[1] - bbox[axis][1]) <= margin:
  26. cluster[0].append([text, bbox])
  27. cluster[1] = bbox[axis][1]
  28. _find = 1
  29. break
  30. if not _find:
  31. cluster_list.append([[[text, bbox]], bbox[axis][1]])
  32. new_text_list = []
  33. new_bbox_list = []
  34. for cluster in cluster_list:
  35. # print("=============convert_image")
  36. # print("cluster_list", cluster)
  37. center_y = 0
  38. for text, bbox in cluster[0]:
  39. center_y += bbox[axis][1]
  40. center_y = int(center_y / len(cluster[0]))
  41. for text, bbox in cluster[0]:
  42. bbox[axis][1] = center_y
  43. new_text_list.append(text)
  44. new_bbox_list.append(bbox)
  45. # print("cluster_list", cluster)
  46. return new_text_list, new_bbox_list
  47. def merge_textbox(textbox_list, in_objs):
  48. delete_obj = []
  49. threshold = 5
  50. for k in range(len(textbox_list)):
  51. tb1 = textbox_list[k]
  52. if tb1 not in in_objs and tb1 not in delete_obj:
  53. for m in range(k+1, len(textbox_list)):
  54. tb2 = textbox_list[m]
  55. if abs(tb1.bbox[1]-tb2.bbox[1]) <= threshold \
  56. and abs(tb1.bbox[3]-tb2.bbox[3]) <= threshold:
  57. if tb1.bbox[0] <= tb2.bbox[0]:
  58. tb1.text = tb1.text + tb2.text
  59. else:
  60. tb1.text = tb2.text + tb1.text
  61. tb1.bbox[0] = min(tb1.bbox[0], tb2.bbox[0])
  62. tb1.bbox[2] = max(tb1.bbox[2], tb2.bbox[2])
  63. delete_obj.append(tb2)
  64. for _obj in delete_obj:
  65. if _obj in textbox_list:
  66. textbox_list.remove(_obj)
  67. return textbox_list
  68. logging.info("into image_preprocess")
  69. try:
  70. # 图片倾斜校正,写入原来的图片路径
  71. print("image_process", image_path)
  72. g_r_i = get_rotated_image(image_np, image_path)
  73. if g_r_i == [-1]:
  74. return [-1]
  75. image_np = cv2.imread(image_path)
  76. if image_np is None:
  77. return []
  78. # otr需要图片resize成模型所需大小, 写入另一个路径
  79. best_h, best_w = get_best_predict_size(image_np)
  80. image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  81. image_resize_path = image_path.split(".")[0] + "_resize_otr." + image_path.split(".")[-1]
  82. cv2.imwrite(image_resize_path, image_resize)
  83. # 调用otr模型接口
  84. with open(image_resize_path, "rb") as f:
  85. image_bytes = f.read()
  86. list_line = from_otr_interface(image_bytes, is_from_pdf)
  87. if judge_error_code(list_line):
  88. return list_line
  89. # otr resize后得到的bbox根据比例还原
  90. ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
  91. for i in range(len(list_line)):
  92. point = list_line[i]
  93. list_line[i] = [int(point[0]*ratio[1]), int(point[1]*ratio[0]),
  94. int(point[2]*ratio[1]), int(point[3]*ratio[0])]
  95. # ocr图片过大内存溢出,需resize
  96. threshold = 3000
  97. if image_np.shape[0] >= threshold or image_np.shape[1] >= threshold:
  98. best_h, best_w = get_best_predict_size2(image_np, threshold)
  99. image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  100. image_resize_path = image_path.split(".")[0] + "_resize_ocr." + image_path.split(".")[-1]
  101. cv2.imwrite(image_resize_path, image_resize)
  102. # 调用ocr模型接口
  103. with open(image_resize_path, "rb") as f:
  104. image_bytes = f.read()
  105. text_list, bbox_list = from_ocr_interface(image_bytes, True)
  106. if judge_error_code(text_list):
  107. return text_list
  108. # ocr resize后的bbox还原
  109. ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
  110. for i in range(len(bbox_list)):
  111. point = bbox_list[i]
  112. bbox_list[i] = [[int(point[0][0]*ratio[1]), int(point[0][1]*ratio[0])],
  113. [int(point[1][0]*ratio[1]), int(point[1][1]*ratio[0])],
  114. [int(point[2][0]*ratio[1]), int(point[2][1]*ratio[0])],
  115. [int(point[3][0]*ratio[1]), int(point[3][1]*ratio[0])]]
  116. # 对文字框的y进行聚类
  117. text_list, bbox_list = get_cluster(text_list, bbox_list, 0)
  118. text_list, bbox_list = get_cluster(text_list, bbox_list, 2)
  119. # 调用现成方法形成表格
  120. try:
  121. from format_convert.convert_tree import TableLine
  122. list_lines = []
  123. for line in list_line:
  124. list_lines.append(LTLine(1, (line[0], line[1]), (line[2], line[3])))
  125. from format_convert.convert_tree import TextBox
  126. list_text_boxes = []
  127. for i in range(len(bbox_list)):
  128. bbox = bbox_list[i]
  129. b_text = text_list[i]
  130. list_text_boxes.append(TextBox([bbox[0][0], bbox[0][1],
  131. bbox[2][0], bbox[2][1]], b_text))
  132. lt = LineTable()
  133. tables, obj_in_table, _ = lt.recognize_table(list_text_boxes, list_lines, False)
  134. # 合并同一行textbox
  135. list_text_boxes = merge_textbox(list_text_boxes, obj_in_table)
  136. obj_list = []
  137. for table in tables:
  138. obj_list.append(_Table(table["table"], table["bbox"]))
  139. for text_box in list_text_boxes:
  140. if text_box not in obj_in_table:
  141. obj_list.append(_Sentence(text_box.get_text(), text_box.bbox))
  142. return obj_list
  143. except:
  144. traceback.print_exc()
  145. return [-8]
  146. except Exception as e:
  147. logging.info("image_preprocess error")
  148. print("image_preprocess", traceback.print_exc())
  149. return [-1]
  150. @get_memory_info.memory_decorator
  151. def picture2text(path, html=False):
  152. logging.info("into picture2text")
  153. try:
  154. # 判断图片中表格
  155. img = cv2.imread(path)
  156. if img is None:
  157. return [-3]
  158. text = image_process(img, path)
  159. if judge_error_code(text):
  160. return text
  161. if html:
  162. text = add_div(text)
  163. return [text]
  164. except Exception as e:
  165. logging.info("picture2text error!")
  166. print("picture2text", traceback.print_exc())
  167. return [-1]
  168. def get_best_predict_size(image_np, times=64):
  169. sizes = []
  170. for i in range(1, 100):
  171. if i*times <= 1300:
  172. sizes.append(i*times)
  173. sizes.sort(key=lambda x: x, reverse=True)
  174. min_len = 10000
  175. best_height = sizes[0]
  176. for height in sizes:
  177. if abs(image_np.shape[0] - height) < min_len:
  178. min_len = abs(image_np.shape[0] - height)
  179. best_height = height
  180. min_len = 10000
  181. best_width = sizes[0]
  182. for width in sizes:
  183. if abs(image_np.shape[1] - width) < min_len:
  184. min_len = abs(image_np.shape[1] - width)
  185. best_width = width
  186. return best_height, best_width
  187. def get_best_predict_size2(image_np, threshold=3000):
  188. h, w = image_np.shape[:2]
  189. scale = threshold / max(h, w)
  190. h = int(h * scale)
  191. w = int(w * scale)
  192. return h, w
  193. class ImageConvert:
  194. def __init__(self, path, unique_type_dir):
  195. from format_convert.convert_tree import _Document
  196. self._doc = _Document(path)
  197. self.path = path
  198. self.unique_type_dir = unique_type_dir
  199. def init_package(self):
  200. # 各个包初始化
  201. try:
  202. with open(self.path, "rb") as f:
  203. self.image = f.read()
  204. except:
  205. logging.info("cannot open image!")
  206. traceback.print_exc()
  207. self._doc.error_code = [-3]
  208. def convert(self):
  209. from format_convert.convert_tree import _Page, _Image
  210. self.init_package()
  211. if self._doc.error_code is not None:
  212. return
  213. _page = _Page(None, 0)
  214. _image = _Image(self.image, self.path)
  215. _page.add_child(_image)
  216. self._doc.add_child(_page)
  217. def get_html(self):
  218. try:
  219. self.convert()
  220. except:
  221. traceback.print_exc()
  222. self._doc.error_code = [-1]
  223. if self._doc.error_code is not None:
  224. return self._doc.error_code
  225. return self._doc.get_html()