convert_image.py 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. import logging
  2. import os
  3. import sys
  4. sys.path.append(os.path.dirname(__file__) + "/../")
  5. from pdfminer.layout import LTLine
  6. import traceback
  7. import cv2
  8. from format_convert import get_memory_info
  9. from format_convert.utils import judge_error_code, add_div, LineTable, get_table_html
  10. from format_convert.table_correct import get_rotated_image
  11. from format_convert.convert_need_interface import from_otr_interface, from_ocr_interface
  12. def image_process(image_np, image_path, use_ocr=True):
  13. from format_convert.convert_tree import _Table, _Sentence
  14. def get_cluster(t_list, b_list, axis):
  15. zip_list = list(zip(t_list, b_list))
  16. if len(zip_list[0]) > 0:
  17. zip_list.sort(key=lambda x: x[1][axis][1])
  18. cluster_list = []
  19. margin = 5
  20. for text, bbox in zip_list:
  21. _find = 0
  22. for cluster in cluster_list:
  23. if abs(cluster[1] - bbox[axis][1]) <= margin:
  24. cluster[0].append([text, bbox])
  25. cluster[1] = bbox[axis][1]
  26. _find = 1
  27. break
  28. if not _find:
  29. cluster_list.append([[[text, bbox]], bbox[axis][1]])
  30. new_text_list = []
  31. new_bbox_list = []
  32. for cluster in cluster_list:
  33. # print("=============convert_image")
  34. # print("cluster_list", cluster)
  35. center_y = 0
  36. for text, bbox in cluster[0]:
  37. center_y += bbox[axis][1]
  38. center_y = int(center_y / len(cluster[0]))
  39. for text, bbox in cluster[0]:
  40. bbox[axis][1] = center_y
  41. new_text_list.append(text)
  42. new_bbox_list.append(bbox)
  43. # print("cluster_list", cluster)
  44. return new_text_list, new_bbox_list
  45. def merge_textbox(textbox_list, in_objs):
  46. delete_obj = []
  47. threshold = 5
  48. for k in range(len(textbox_list)):
  49. tb1 = textbox_list[k]
  50. if tb1 not in in_objs and tb1 not in delete_obj:
  51. for m in range(k+1, len(textbox_list)):
  52. tb2 = textbox_list[m]
  53. if abs(tb1.bbox[1]-tb2.bbox[1]) <= threshold \
  54. and abs(tb1.bbox[3]-tb2.bbox[3]) <= threshold:
  55. if tb1.bbox[0] <= tb2.bbox[0]:
  56. tb1.text = tb1.text + tb2.text
  57. else:
  58. tb1.text = tb2.text + tb1.text
  59. tb1.bbox[0] = min(tb1.bbox[0], tb2.bbox[0])
  60. tb1.bbox[2] = max(tb1.bbox[2], tb2.bbox[2])
  61. delete_obj.append(tb2)
  62. for _obj in delete_obj:
  63. if _obj in textbox_list:
  64. textbox_list.remove(_obj)
  65. return textbox_list
  66. logging.info("into image_preprocess")
  67. try:
  68. # 图片倾斜校正,写入原来的图片路径
  69. print("image_process", image_path)
  70. g_r_i = get_rotated_image(image_np, image_path)
  71. if g_r_i == [-1]:
  72. return [-1]
  73. # otr需要图片resize, 写入另一个路径
  74. image_np = cv2.imread(image_path)
  75. if image_np is None:
  76. return []
  77. best_h, best_w = get_best_predict_size(image_np)
  78. image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  79. # image_resize_path = image_path[:-4] + "_resize" + image_path[-4:]
  80. image_resize_path = image_path.split(".")[0] + "_resize." + image_path.split(".")[-1]
  81. cv2.imwrite(image_resize_path, image_resize)
  82. # 调用otr模型接口
  83. with open(image_resize_path, "rb") as f:
  84. image_bytes = f.read()
  85. list_line = from_otr_interface(image_bytes)
  86. if judge_error_code(list_line):
  87. return list_line
  88. # 将resize后得到的bbox根据比例还原
  89. ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
  90. for i in range(len(list_line)):
  91. point = list_line[i]
  92. list_line[i] = [int(point[0]*ratio[1]), int(point[1]*ratio[0]),
  93. int(point[2]*ratio[1]), int(point[3]*ratio[0])]
  94. # 调用ocr模型接口
  95. with open(image_path, "rb") as f:
  96. image_bytes = f.read()
  97. text_list, bbox_list = from_ocr_interface(image_bytes, True)
  98. if judge_error_code(text_list):
  99. return text_list
  100. # 对文字框的y进行聚类
  101. text_list, bbox_list = get_cluster(text_list, bbox_list, 0)
  102. # text_list, bbox_list = get_cluster(text_list, bbox_list, 1)
  103. text_list, bbox_list = get_cluster(text_list, bbox_list, 2)
  104. # text_list, bbox_list = get_cluster(text_list, bbox_list, 3)
  105. # 调用现成方法形成表格
  106. try:
  107. from format_convert.convert_tree import TableLine
  108. list_lines = []
  109. for line in list_line:
  110. list_lines.append(LTLine(1, (line[0], line[1]), (line[2], line[3])))
  111. from format_convert.convert_tree import TextBox
  112. list_text_boxes = []
  113. for i in range(len(bbox_list)):
  114. bbox = bbox_list[i]
  115. b_text = text_list[i]
  116. list_text_boxes.append(TextBox([bbox[0][0], bbox[0][1],
  117. bbox[2][0], bbox[2][1]], b_text))
  118. lt = LineTable()
  119. tables, obj_in_table, _ = lt.recognize_table(list_text_boxes, list_lines, False)
  120. # 合并同一行textbox
  121. list_text_boxes = merge_textbox(list_text_boxes, obj_in_table)
  122. obj_list = []
  123. for table in tables:
  124. obj_list.append(_Table(table["table"], table["bbox"]))
  125. for text_box in list_text_boxes:
  126. if text_box not in obj_in_table:
  127. obj_list.append(_Sentence(text_box.get_text(), text_box.bbox))
  128. return obj_list
  129. except:
  130. traceback.print_exc()
  131. return [-8]
  132. except Exception as e:
  133. logging.info("image_preprocess error")
  134. print("image_preprocess", traceback.print_exc())
  135. return [-1]
  136. @get_memory_info.memory_decorator
  137. def picture2text(path, html=False):
  138. logging.info("into picture2text")
  139. try:
  140. # 判断图片中表格
  141. img = cv2.imread(path)
  142. if img is None:
  143. return [-3]
  144. text = image_process(img, path)
  145. if judge_error_code(text):
  146. return text
  147. if html:
  148. text = add_div(text)
  149. return [text]
  150. except Exception as e:
  151. logging.info("picture2text error!")
  152. print("picture2text", traceback.print_exc())
  153. return [-1]
  154. def get_best_predict_size(image_np, times=64):
  155. sizes = []
  156. for i in range(1, 100):
  157. if i*times <= 3000:
  158. sizes.append(i*times)
  159. sizes.sort(key=lambda x: x, reverse=True)
  160. min_len = 10000
  161. best_height = sizes[0]
  162. for height in sizes:
  163. if abs(image_np.shape[0] - height) < min_len:
  164. min_len = abs(image_np.shape[0] - height)
  165. best_height = height
  166. min_len = 10000
  167. best_width = sizes[0]
  168. for width in sizes:
  169. if abs(image_np.shape[1] - width) < min_len:
  170. min_len = abs(image_np.shape[1] - width)
  171. best_width = width
  172. return best_height, best_width
  173. class ImageConvert:
  174. def __init__(self, path, unique_type_dir):
  175. from format_convert.convert_tree import _Document
  176. self._doc = _Document(path)
  177. self.path = path
  178. self.unique_type_dir = unique_type_dir
  179. def init_package(self):
  180. # 各个包初始化
  181. try:
  182. with open(self.path, "rb") as f:
  183. self.image = f.read()
  184. except:
  185. logging.info("cannot open image!")
  186. traceback.print_exc()
  187. self._doc.error_code = [-3]
  188. def convert(self):
  189. from format_convert.convert_tree import _Page, _Image
  190. self.init_package()
  191. if self._doc.error_code is not None:
  192. return
  193. _page = _Page(None, 0)
  194. _image = _Image(self.image, self.path)
  195. _page.add_child(_image)
  196. self._doc.add_child(_page)
  197. def get_html(self):
  198. try:
  199. self.convert()
  200. except:
  201. traceback.print_exc()
  202. self._doc.error_code = [-1]
  203. if self._doc.error_code is not None:
  204. return self._doc.error_code
  205. return self._doc.get_html()