convert_image.py 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. # encoding=utf8
  2. import copy
  3. import inspect
  4. import io
  5. import logging
  6. import os
  7. import re
  8. import sys
  9. import time
  10. from glob import glob
  11. import requests
  12. import numpy as np
  13. from PIL import Image
  14. sys.path.append(os.path.dirname(__file__) + "/../")
  15. from pdfminer.layout import LTLine
  16. import traceback
  17. import cv2
  18. from isr.pre_process import count_red_pixel
  19. from format_convert.utils import judge_error_code, add_div, LineTable, get_table_html, get_logger, log, \
  20. memory_decorator, pil_resize, np2bytes, ocr_cant_read, get_garble_code2, line_iou, image_rotate
  21. from format_convert.convert_need_interface import from_otr_interface, from_ocr_interface, from_gpu_interface_redis, \
  22. from_idc_interface, from_isr_interface
  23. from format_convert.table_correct import get_rotated_image
  24. from botr.extract_table import get_table
  25. def image_process(image_np, image_path, is_from_pdf=False, is_from_docx=False,
  26. b_table_from_text=False, pdf_obj_list=[], pdf_layout_size=(), is_reverse=False):
  27. from format_convert.convert_tree import _Table, _Sentence
  28. def get_cluster(t_list, b_list, axis):
  29. zip_list = list(zip(t_list, b_list))
  30. if len(zip_list) == 0:
  31. return t_list, b_list
  32. if len(zip_list[0]) > 0:
  33. zip_list.sort(key=lambda x: x[1][axis][1])
  34. cluster_list = []
  35. margin = 5
  36. for text, bbox in zip_list:
  37. _find = 0
  38. for cluster in cluster_list:
  39. if abs(cluster[1] - bbox[axis][1]) <= margin:
  40. cluster[0].append([text, bbox])
  41. cluster[1] = bbox[axis][1]
  42. _find = 1
  43. break
  44. if not _find:
  45. cluster_list.append([[[text, bbox]], bbox[axis][1]])
  46. new_text_list = []
  47. new_bbox_list = []
  48. for cluster in cluster_list:
  49. # print("=============convert_image")
  50. # print("cluster_list", cluster)
  51. center_y = 0
  52. for text, bbox in cluster[0]:
  53. center_y += bbox[axis][1]
  54. center_y = int(center_y / len(cluster[0]))
  55. for text, bbox in cluster[0]:
  56. bbox[axis][1] = center_y
  57. new_text_list.append(text)
  58. new_bbox_list.append(bbox)
  59. # print("cluster_list", cluster)
  60. return new_text_list, new_bbox_list
  61. def merge_textbox(textbox_list, in_objs):
  62. delete_obj = []
  63. threshold = 5
  64. textbox_list.sort(key=lambda x:x.bbox[0])
  65. for k in range(len(textbox_list)):
  66. tb1 = textbox_list[k]
  67. if tb1 not in in_objs and tb1 not in delete_obj:
  68. for m in range(k+1, len(textbox_list)):
  69. tb2 = textbox_list[m]
  70. if tb2 in in_objs:
  71. continue
  72. if abs(tb1.bbox[1]-tb2.bbox[1]) <= threshold \
  73. and abs(tb1.bbox[3]-tb2.bbox[3]) <= threshold:
  74. if tb1.bbox[0] <= tb2.bbox[0]:
  75. tb1.text = tb1.text + tb2.text
  76. else:
  77. tb1.text = tb2.text + tb1.text
  78. tb1.bbox[0] = min(tb1.bbox[0], tb2.bbox[0])
  79. tb1.bbox[2] = max(tb1.bbox[2], tb2.bbox[2])
  80. delete_obj.append(tb2)
  81. for _obj in delete_obj:
  82. if _obj in textbox_list:
  83. textbox_list.remove(_obj)
  84. return textbox_list
  85. def resize_process(_image_np):
  86. # 整体分辨率限制
  87. threshold = 2048
  88. if _image_np.shape[0] > threshold or _image_np.shape[1] > threshold:
  89. h, w = get_best_predict_size2(_image_np, threshold=threshold)
  90. log("global image resize " + str(_image_np.shape[:2]) + " -> " + str(h) + "," + str(w))
  91. _image_np = pil_resize(_image_np, h, w)
  92. return _image_np
  93. def idc_process(_image_np, return_angle=False):
  94. # 图片倾斜校正,写入原来的图片路径
  95. # print("image_process", image_path)
  96. # g_r_i = get_rotated_image(_image_np, image_path)
  97. # if judge_error_code(g_r_i):
  98. # if is_from_docx:
  99. # return []
  100. # else:
  101. # return g_r_i
  102. # _image_np = cv2.imread(image_path)
  103. # if _image_np is None:
  104. # return []
  105. # return _image_np
  106. # if _image_np is None:
  107. # return []
  108. # idc模型实现图片倾斜校正
  109. h, w = get_best_predict_size2(_image_np, 1080)
  110. image_resize = pil_resize(_image_np, h, w)
  111. # image_resize_path = image_path.split(".")[0] + "_resize_idc." + image_path.split(".")[-1]
  112. # cv2.imwrite(image_resize_path, image_resize)
  113. # with open(image_resize_path, "rb") as f:
  114. # image_bytes = f.read()
  115. image_bytes = np2bytes(image_resize)
  116. angle = from_idc_interface(image_bytes)
  117. log('idc_process angle ' + str(angle))
  118. if judge_error_code(angle):
  119. if return_angle:
  120. if is_from_docx:
  121. return [], []
  122. else:
  123. return angle, angle
  124. else:
  125. if is_from_docx:
  126. return []
  127. else:
  128. return angle
  129. # 根据角度旋转
  130. # _image_pil = Image.fromarray(_image_np)
  131. # _image_np = np.array(_image_pil.rotate(angle, expand=1))
  132. _image_np = image_rotate(_image_np, angle)
  133. # 写入
  134. # idc_path = image_path.split(".")[0] + "_idc." + image_path.split(".")[-1]
  135. # cv2.imwrite(idc_path, image_np)
  136. if return_angle:
  137. return _image_np, angle
  138. return _image_np
  139. def isr_process(_image_np):
  140. log("isr_process image shape " + str(_image_np.shape))
  141. image_np_copy = copy.deepcopy(_image_np)
  142. # isr模型去除印章
  143. _isr_time = time.time()
  144. if count_red_pixel(_image_np):
  145. # 红色像素达到一定值才过模型
  146. image_bytes = np2bytes(_image_np)
  147. _image_np = from_isr_interface(image_bytes)
  148. if judge_error_code(_image_np):
  149. if is_from_docx:
  150. return []
  151. else:
  152. return _image_np
  153. # [1]代表检测不到印章,直接返回
  154. if isinstance(_image_np, list) and _image_np == [1]:
  155. log("no seals detected!")
  156. _image_np = image_np_copy
  157. log("isr total time "+str(time.time()-_isr_time))
  158. return _image_np
  159. def ocr_process(_image_np, _threshold=2048):
  160. log("ocr_process image shape " + str(_image_np.shape))
  161. # ocr图片过大内存溢出,需resize
  162. # 大图按比例缩小,小图维持不变;若统一拉伸成固定大小如1024会爆显存
  163. ratio = (1, 1)
  164. if _image_np.shape[0] > _threshold or _image_np.shape[1] > _threshold:
  165. best_h, best_w = get_best_predict_size2(_image_np, _threshold)
  166. _image_np = pil_resize(_image_np, best_h, best_w)
  167. log("ocr_process image resize " + str(_image_np.shape))
  168. ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
  169. # 大图片ocr加锁,防止爆显存
  170. # if _image_np.shape[0] >= 1024 and _image_np.shape[1] >= 1024:
  171. # file_lock = True
  172. # else:
  173. # file_lock = False
  174. # 调用ocr模型接口
  175. image_bytes = np2bytes(_image_np)
  176. text_list, bbox_list = from_ocr_interface(image_bytes, is_table=1)
  177. if judge_error_code(text_list):
  178. return text_list, text_list
  179. for i in range(len(bbox_list)):
  180. point = bbox_list[i]
  181. bbox_list[i] = [[int(point[0][0]*ratio[0]), int(point[0][1]*ratio[1])],
  182. [int(point[1][0]*ratio[0]), int(point[1][1]*ratio[1])],
  183. [int(point[2][0]*ratio[0]), int(point[2][1]*ratio[1])],
  184. [int(point[3][0]*ratio[0]), int(point[3][1]*ratio[1])]]
  185. # 去除水印字 根据识别是否为矩形框
  186. temp_text_list = []
  187. temp_bbox_list = []
  188. water_mark_dict = {}
  189. for i in range(len(bbox_list)):
  190. bbox = bbox_list[i]
  191. text = text_list[i]
  192. if len(re.findall('[\u4e00-\u9fa5]', text)) == len(text):
  193. if (abs(bbox[0][1] - bbox[1][1]) <= 2 and abs(bbox[2][1] - bbox[3][1]) <= 2) \
  194. or (abs(bbox[0][0] - bbox[3][0]) <= 4 and abs(bbox[2][0] - bbox[1][0]) <= 4):
  195. temp_text_list.append(text)
  196. temp_bbox_list.append(bbox)
  197. else:
  198. if text in water_mark_dict.keys():
  199. water_mark_dict[text] += [bbox]
  200. else:
  201. water_mark_dict[text] = [bbox]
  202. else:
  203. temp_text_list.append(text)
  204. temp_bbox_list.append(bbox)
  205. # 数量多的才算水印
  206. for text in water_mark_dict.keys():
  207. bbox_list = water_mark_dict.get(text)
  208. if len(bbox_list) < 3:
  209. for bbox in bbox_list:
  210. temp_text_list.append(text)
  211. temp_bbox_list.append(bbox)
  212. text_list = temp_text_list
  213. bbox_list = temp_bbox_list
  214. return text_list, bbox_list
  215. def otr_process(_image_np):
  216. log("otr_process image shape " + str(_image_np.shape))
  217. # otr模型识别表格,需要图片resize成模型所需大小, 写入另一个路径
  218. best_h, best_w = get_best_predict_size(_image_np)
  219. image_resize = pil_resize(_image_np, best_h, best_w)
  220. # image_resize_path = image_path.split(".")[0] + "_resize_otr." + image_path.split(".")[-1]
  221. # cv2.imwrite(image_resize_path, image_resize)
  222. # 调用otr模型接口
  223. # with open(image_resize_path, "rb") as f:
  224. # image_bytes = f.read()
  225. image_bytes = np2bytes(image_resize)
  226. list_line = from_otr_interface(image_bytes, is_from_pdf)
  227. if judge_error_code(list_line):
  228. if is_from_docx:
  229. return []
  230. else:
  231. return list_line
  232. # otr resize后得到的bbox根据比例还原
  233. start_time = time.time()
  234. ratio = (_image_np.shape[0]/best_h, _image_np.shape[1]/best_w)
  235. for i in range(len(list_line)):
  236. point = list_line[i]
  237. list_line[i] = [int(point[0]*ratio[1]), int(point[1]*ratio[0]),
  238. int(point[2]*ratio[1]), int(point[3]*ratio[0])]
  239. log("otr resize bbox recover " + str(time.time()-start_time))
  240. return list_line
  241. def botr_process(_image_np, table_list2, text_list2, box_list2, text_box_list2, obj_in_table_list2,
  242. from_pdf=False, pdf_obj_list=[], pdf_layout_size=()):
  243. if from_pdf:
  244. # 交叉验证 ocr结果与pdf obj,暂时使用pdf提取的
  245. h_ratio = _image_np.shape[0] / pdf_layout_size[1]
  246. w_ratio = _image_np.shape[1] / pdf_layout_size[0]
  247. pdf_text_list = []
  248. pdf_box_list = []
  249. for obj in pdf_obj_list:
  250. # pdf坐标是上下颠倒的
  251. obj.bbox = (obj.bbox[0], pdf_layout_size[1]-obj.bbox[1],
  252. obj.bbox[2], pdf_layout_size[1]-obj.bbox[3])
  253. # 根据两个页面大小比例调整坐标
  254. obj.bbox = (obj.bbox[0]*w_ratio, obj.bbox[1]*h_ratio,
  255. obj.bbox[2]*w_ratio, obj.bbox[3]*h_ratio)
  256. # 剔除水印字
  257. text = re.sub('[\n ]', '', obj.get_text())
  258. if len(text) == 1 and abs(obj.bbox[0] - obj.bbox[2]) >= 70:
  259. continue
  260. pdf_box_list.append([[int(obj.bbox[0]), int(obj.bbox[3])],
  261. [],
  262. [int(obj.bbox[2]), int(obj.bbox[1])],
  263. []
  264. ])
  265. pdf_text_list.append(re.sub('[\n]', '', obj.get_text()))
  266. pdf_text_box_list = get_text_box_obj(pdf_text_list, pdf_box_list)
  267. text_list2 = pdf_text_list
  268. box_list2 = pdf_box_list
  269. text_box_list2 = pdf_text_box_list
  270. _text_box_list, _table_list, _obj_in_table_list = get_table(_image_np, table_list2, text_list2, box_list2, text_box_list2)
  271. # 保存无边框表格文件
  272. if _table_list:
  273. try:
  274. save_b_table(_image_np, text_box_list2, from_pdf)
  275. except:
  276. pass
  277. # print('_text_box_list', _text_box_list)
  278. # print('_table_list', _table_list)
  279. if from_pdf:
  280. text_box_list2 = []
  281. table_list2 = []
  282. if _table_list and _text_box_list:
  283. text_box_list2 += _text_box_list
  284. text_box_list2 = list(set(text_box_list2))
  285. # table_list2 += _table_list
  286. # obj_in_table_list2 = obj_in_table_list2.union(_obj_in_table_list)
  287. return text_box_list2, _table_list, _obj_in_table_list
  288. def table_process(list_line, list_text_boxes, _image_np):
  289. # 调用现成方法形成表格
  290. try:
  291. if list_line:
  292. # 排除掉短且经过文字bbox中间的竖线
  293. temp_list = []
  294. for line in list_line:
  295. find_cnt = 0
  296. if abs(line[0]-line[2]) < abs(line[1]-line[3]) and abs(line[1] - line[3]) <= _image_np.shape[0] / 20:
  297. for t_obj in list_text_boxes:
  298. # if not (t_obj.bbox[1] <= line[1] <= t_obj.bbox[3] or t_obj.bbox[1] <= line[3] <= t_obj.bbox[3]):
  299. # continue
  300. if line_iou([[t_obj.bbox[1], 0], [t_obj.bbox[3], 0]], [[line[1], 0], [line[3], 0]]) < 0.3:
  301. continue
  302. if abs(t_obj.bbox[0]-t_obj.bbox[2])/5 + min(t_obj.bbox[0], t_obj.bbox[2]) <= line[0] <= abs(t_obj.bbox[0]-t_obj.bbox[2])/5*4 + min(t_obj.bbox[0], t_obj.bbox[2]) and (t_obj.bbox[0]-t_obj.bbox[2]) <= 60:
  303. # print('match', line[0], t_obj.bbox[0], t_obj.bbox[2], t_obj.get_text())
  304. find_cnt += 1
  305. if find_cnt >= 2:
  306. break
  307. if find_cnt >= 2:
  308. continue
  309. temp_list.append(line)
  310. list_line = temp_list
  311. from format_convert.convert_tree import TableLine
  312. list_lines = []
  313. for line in list_line:
  314. list_lines.append(LTLine(1, (line[0], line[1]), (line[2], line[3])))
  315. lt = LineTable()
  316. tables, obj_in_table, _, connect_textbox_list = lt.recognize_table(list_text_boxes, list_lines,
  317. sourceP_LB=False, splited=False,
  318. from_pdf=is_from_pdf,
  319. is_reverse=is_reverse)
  320. # 需分割textbox
  321. if connect_textbox_list:
  322. list_text_boxes = table_textbox_split(_image_np, connect_textbox_list, list_text_boxes)
  323. # 新的textbox,重新做表格
  324. tables, obj_in_table, _, connect_textbox_list = lt.recognize_table(list_text_boxes, list_lines,
  325. sourceP_LB=False, splited=True,
  326. from_pdf=is_from_pdf,
  327. is_reverse=is_reverse)
  328. if not tables:
  329. return list_text_boxes, tables, obj_in_table
  330. return list_text_boxes, tables, obj_in_table
  331. else:
  332. return list_text_boxes, [], set()
  333. except:
  334. traceback.print_exc()
  335. return [-8], [-8], [-8]
  336. def slice_process(_image_np):
  337. slice_flag = need_image_slice(image_np)
  338. log("need_image_slice " + str(slice_flag) + " " + str(image_np.shape))
  339. _image_np_list = [_image_np]
  340. if slice_flag:
  341. # 长图分割
  342. _image_np_list = image_slice_new(_image_np)
  343. angle_dict = {}
  344. for im in _image_np_list:
  345. _, angle = idc_process(im, return_angle=True)
  346. if angle in [0, 360]:
  347. angle = 0
  348. if angle in angle_dict.keys():
  349. angle_dict[angle] += 1
  350. else:
  351. angle_dict[angle] = 1
  352. # idc不太准,有0度就直接使用
  353. if 0 in angle_dict.keys():
  354. log('image_slice 0 in angle_dict')
  355. angle = 0
  356. else:
  357. angle_list = [[key, value] for key, value in angle_dict.items()]
  358. angle_list.sort(key=lambda x: x[1])
  359. log('image_slice angle_list ' + str(angle_list))
  360. angle = angle_list[-1][0]
  361. for i in range(len(_image_np_list)):
  362. _image_np_list[i] = image_rotate(_image_np_list[i], angle)
  363. if angle in [180]:
  364. _image_np_list.reverse()
  365. if len(_image_np_list) < 1:
  366. log("image_slice failed!")
  367. _image_np_list = [_image_np]
  368. return _image_np_list
  369. def get_text_box_obj(_text_list, _bbox_list):
  370. from format_convert.convert_tree import TextBox
  371. _text_box_list = []
  372. for i in range(len(_bbox_list)):
  373. bbox = _bbox_list[i]
  374. b_text = _text_list[i]
  375. _text_box_list.append(TextBox([bbox[0][0], bbox[0][1],
  376. bbox[2][0], bbox[2][1]], b_text))
  377. return _text_box_list
  378. def save_b_table(image_np2, text_box_list2, from_pdf=False):
  379. _start_time = time.time()
  380. _path = '/data/fangjiasheng/format_conversion_maxcompute/save_b_table'
  381. # _path = 'D:/Project/format_conversion_maxcompute/save_b_table'
  382. max_index = 20000
  383. if os.path.exists(_path):
  384. file_list = glob(_path + '/*')
  385. if file_list:
  386. file_index_list = [int(re.split('[/.\\\\-]', x)[-3]) for x in file_list]
  387. file_index_list.sort(key=lambda x: x)
  388. index = file_index_list[-1] + 1
  389. else:
  390. index = 0
  391. if index > max_index:
  392. return
  393. # 文件md5
  394. from format_convert import _global
  395. _md5 = _global.get("md5")
  396. _image_path = _path + '/' + str(index) + '-' + str(_md5) + '.png'
  397. cv2.imwrite(_image_path, image_np2)
  398. log('save b_table image success!')
  399. # if from_pdf:
  400. # _file_path = _path + '/' + str(_md5) + '-' + str(index) + '.txt'
  401. # new_text_box_list2 = [str(x) + '\n' for x in text_box_list2]
  402. # with open(_file_path, 'w') as f:
  403. # f.writelines(new_text_box_list2)
  404. # log('save b_table txt success!')
  405. log('save_b_table cost: ' + str(time.time()-_start_time))
  406. def table_textbox_split(image_np2, connect_textbox_list, textbox_list):
  407. """
  408. 两个单元格里的文本被ocr识别为一个,需分开才能准确放进表格
  409. :return:
  410. """
  411. split_bbox_list = []
  412. split_text_list = []
  413. splited_textbox_list = []
  414. for textbox in connect_textbox_list:
  415. bbox = textbox.bbox
  416. bbox = [[bbox[0], bbox[1]], [], [bbox[2], bbox[3]], []]
  417. sub_image_np = image_np2[int(bbox[0][1]):int(bbox[2][1]), int(bbox[0][0]):int(bbox[2][0]), :]
  418. split_index_list = []
  419. # 从左到右遍历img
  420. for i in range(5, sub_image_np.shape[1]-5):
  421. # 找表格分割线,这一列都为黑色像素
  422. if np.where(sub_image_np[:, i, 0] < 200)[0].size >= sub_image_np.shape[0]:
  423. split_index_list.append(i)
  424. # 判断两线之间宽度,去重
  425. if len(split_index_list) > 1:
  426. last_index = split_index_list[0]
  427. temp_list = []
  428. delete_list = []
  429. for index in split_index_list[1:]:
  430. if index in delete_list:
  431. continue
  432. if index - last_index <= 5:
  433. delete_list.append(index)
  434. else:
  435. last_index = index
  436. temp_list.append(last_index)
  437. split_index_list = temp_list
  438. # n条以上分割线,有问题
  439. if len(split_index_list) == 0 or len(split_index_list) >= 2:
  440. # print('len(split_index_list)', len(split_index_list), split_index_list)
  441. continue
  442. else:
  443. # 根据index拆开图片,重新ocr
  444. split_index_list.insert(0, 0)
  445. print('split_index_list1', split_index_list)
  446. for _i, index in enumerate(split_index_list):
  447. if _i == len(split_index_list) - 1:
  448. split_image_np = sub_image_np[:, index:, :]
  449. split_bbox_list.append([[bbox[0][0]+index, bbox[0][1]], [], [bbox[2][0], bbox[2][1]], []])
  450. else:
  451. next_index = split_index_list[_i+1]
  452. split_image_np = sub_image_np[:, index:next_index, :]
  453. split_bbox_list.append([[bbox[0][0]+index, bbox[0][1]], [], [bbox[0][0]+next_index, bbox[2][1]], []])
  454. # ocr
  455. split_image_bytes = np2bytes(split_image_np)
  456. text_list2, bbox_list2 = from_ocr_interface(split_image_bytes, is_table=1, only_rec=1)
  457. # print('text_list2', text_list2)
  458. # print('bbox_list2', split_bbox_list)
  459. if judge_error_code(text_list2):
  460. text2 = ''
  461. else:
  462. if text_list2:
  463. text2 = text_list2[0]
  464. else:
  465. text2 = ''
  466. split_text_list.append(text2)
  467. splited_textbox_list.append(textbox)
  468. if split_text_list and split_bbox_list:
  469. split_textbox_list = get_text_box_obj(split_text_list, split_bbox_list)
  470. for tb in splited_textbox_list:
  471. if tb in textbox_list:
  472. textbox_list.remove(tb)
  473. textbox_list += split_textbox_list
  474. return textbox_list
  475. log("into image_preprocess")
  476. try:
  477. if image_np is None:
  478. log("image_preprocess image_np is None")
  479. return []
  480. if image_np.shape[0] <= 20 or image_np.shape[1] <= 20:
  481. log('image_np.shape[0] <= 20 or image_np.shape[1] <= 20')
  482. return []
  483. if not b_table_from_text:
  484. # 判断是否需要长图分割
  485. idc_flag = False
  486. image_np_list = slice_process(image_np)
  487. if len(image_np_list) > 1:
  488. idc_flag = True
  489. reverse_flag = 0
  490. table_textbox_list = []
  491. for image_np in image_np_list:
  492. # 整体分辨率限制
  493. image_np = resize_process(image_np)
  494. # 印章去除
  495. image_np = isr_process(image_np)
  496. if isinstance(image_np, list):
  497. return image_np
  498. # 文字识别
  499. text_list, box_list = ocr_process(image_np)
  500. if judge_error_code(text_list):
  501. return text_list
  502. # 判断ocr识别是否正确
  503. # print('ocr_cant_read(text_list, box_list)', ocr_cant_read(text_list, box_list), idc_flag, text_list)
  504. if ocr_cant_read(text_list, box_list) and not idc_flag:
  505. # 方向分类
  506. image_np, angle = idc_process(image_np, return_angle=True)
  507. if isinstance(image_np, list):
  508. return image_np
  509. # 如果角度不变,旋转180
  510. if angle in [0, 360]:
  511. pass
  512. # log('ocr_cant_read image_rotate 180')
  513. # image_np = image_rotate(image_np, angle=180)
  514. # reverse_flag = 1
  515. # image_pil = Image.fromarray(image_np)
  516. # image_np = np.array(image_pil.rotate(180, expand=1))
  517. # cv2.imshow("idc_process", image_np)
  518. # cv2.waitKey(0)
  519. # 文字识别
  520. text_list1, box_list_1 = ocr_process(image_np)
  521. if judge_error_code(text_list1):
  522. return text_list1
  523. if len(text_list1) > 0 and ocr_cant_read(text_list1, box_list_1) and is_from_pdf:
  524. return [-16]
  525. # 比较字数
  526. # print("ocr process", len("".join(text_list)), len("".join(text_list1)))
  527. if len("".join(text_list)) < len("".join(text_list1)):
  528. text_list = text_list1
  529. box_list = box_list_1
  530. # 表格识别
  531. line_list = otr_process(image_np)
  532. if judge_error_code(line_list):
  533. return line_list
  534. # 生成TextBox对象
  535. text_box_list = get_text_box_obj(text_list, box_list)
  536. # for t in text_box_list:
  537. # print('text_box0', t.get_text())
  538. # 表格生成
  539. text_box_list, table_list, obj_in_table_list = table_process(line_list, text_box_list, image_np)
  540. # for t in text_box_list:
  541. # print('text_box1', t.get_text())
  542. # print('table_list', table_list)
  543. # for t in obj_in_table_list:
  544. # print('obj_text_box2', t.get_text())
  545. if judge_error_code(table_list):
  546. return table_list
  547. # 无边框表格识别
  548. start_time = time.time()
  549. text_box_list, b_table_list, b_obj_in_table_list = botr_process(image_np, table_list,
  550. text_list, box_list,
  551. text_box_list,
  552. obj_in_table_list,
  553. b_table_from_text,
  554. pdf_obj_list,
  555. pdf_layout_size,
  556. )
  557. log('botr process cost: ' + str(time.time()-start_time))
  558. # 合并非表格的同一行TextBox
  559. text_box_list = merge_textbox(text_box_list, obj_in_table_list)
  560. table_textbox_list.append([table_list, b_table_list, obj_in_table_list, text_box_list])
  561. if reverse_flag:
  562. table_textbox_list.reverse()
  563. for i in range(len(image_np_list)):
  564. image_np_list[i] = image_rotate(image_np_list[i], angle=180)
  565. image_np_list.reverse()
  566. # index = 0
  567. # for image_np in image_np_list:
  568. # cv2.imshow(str(index) + '.jpg', image_np)
  569. # cv2.waitKey(0)
  570. # index += 1
  571. # 对象生成
  572. all_obj_list = []
  573. _add_y = 0
  574. for table_list, b_table_list, obj_in_table_list, text_box_list in table_textbox_list:
  575. obj_list = []
  576. for table in table_list:
  577. _table_bbox = [table["bbox"][0], table["bbox"][1] + _add_y]
  578. _table = _Table(table["table"], _table_bbox)
  579. obj_list.append(_table)
  580. for table in b_table_list:
  581. _table_bbox = [table["bbox"][0], table["bbox"][1] + _add_y]
  582. _table = _Table(table["table"], _table_bbox)
  583. obj_list.append(_table)
  584. for text_box in text_box_list:
  585. if text_box not in obj_in_table_list:
  586. text_box.bbox[1] += _add_y
  587. obj_list.append(_Sentence(text_box.get_text(), text_box.bbox))
  588. # 多图修正y
  589. if len(image_np_list) > 1:
  590. list_y = []
  591. for obj in obj_list:
  592. obj.y += _add_y
  593. list_y.append(obj.y)
  594. if len(list_y) > 0:
  595. _add_y += max(list_y)
  596. # 合并
  597. all_obj_list += obj_list
  598. # 无边框表格图片
  599. else:
  600. all_obj_list = []
  601. table_list = []
  602. text_list = []
  603. box_list = []
  604. text_box_list = []
  605. obj_in_table_list = set()
  606. # 表格识别
  607. line_list = otr_process(image_np)
  608. if judge_error_code(line_list):
  609. return line_list
  610. # 生成TextBox对象
  611. text_box_list = get_text_box_obj(text_list, box_list)
  612. # 表格生成
  613. text_box_list, table_list, obj_in_table_list = table_process(line_list, text_box_list, image_np)
  614. if judge_error_code(table_list):
  615. return table_list
  616. # 无边框表格识别
  617. start_time = time.time()
  618. text_box_list, table_list, obj_in_table_list = botr_process(image_np, table_list,
  619. text_list, box_list,
  620. text_box_list,
  621. obj_in_table_list,
  622. b_table_from_text,
  623. pdf_obj_list,
  624. pdf_layout_size,
  625. )
  626. log('botr process cost: ' + str(time.time()-start_time))
  627. # 合并非表格的同一行TextBox
  628. text_box_list = merge_textbox(text_box_list, obj_in_table_list)
  629. # 对象生成
  630. obj_list = []
  631. # print('table_list', table_list)
  632. for table in table_list:
  633. _table = _Table(table["table"], table["bbox"])
  634. obj_list.append(_table)
  635. for text_box in text_box_list:
  636. if text_box not in obj_in_table_list:
  637. obj_list.append(_Sentence(text_box.get_text(), text_box.bbox))
  638. # 合并
  639. all_obj_list += obj_list
  640. return all_obj_list
  641. except Exception as e:
  642. log("image_preprocess error")
  643. traceback.print_exc()
  644. return [-1]
  645. @memory_decorator
  646. def picture2text(path, html=False):
  647. log("into picture2text")
  648. try:
  649. # 判断图片中表格
  650. img = cv2.imread(path)
  651. if img is None:
  652. return [-3]
  653. text = image_process(img, path)
  654. if judge_error_code(text):
  655. return text
  656. if html:
  657. text = add_div(text)
  658. return [text]
  659. except Exception as e:
  660. log("picture2text error!")
  661. print("picture2text", traceback.print_exc())
  662. return [-1]
  663. def get_best_predict_size(image_np, times=64):
  664. sizes = []
  665. for i in range(1, 100):
  666. if i*times <= 1300:
  667. sizes.append(i*times)
  668. sizes.sort(key=lambda x: x, reverse=True)
  669. min_len = 10000
  670. best_height = sizes[0]
  671. for height in sizes:
  672. if abs(image_np.shape[0] - height) < min_len:
  673. min_len = abs(image_np.shape[0] - height)
  674. best_height = height
  675. min_len = 10000
  676. best_width = sizes[0]
  677. for width in sizes:
  678. if abs(image_np.shape[1] - width) < min_len:
  679. min_len = abs(image_np.shape[1] - width)
  680. best_width = width
  681. return best_height, best_width
  682. def get_best_predict_size2(image_np, threshold=3000):
  683. h, w = image_np.shape[:2]
  684. scale = threshold / max(h, w)
  685. h = int(h * scale)
  686. w = int(w * scale)
  687. return h, w
  688. def image_slice(image_np):
  689. """
  690. slice the image if the height is to large
  691. :return:
  692. """
  693. _sum = np.average(image_np, axis=1)
  694. list_white_line = []
  695. list_ave = list(_sum)
  696. for _i in range(len(list_ave)):
  697. if (list_ave[_i] > 250).all():
  698. list_white_line.append(_i)
  699. set_white_line = set(list_white_line)
  700. width = image_np.shape[1]
  701. height = image_np.shape[0]
  702. list_images = []
  703. _begin = 0
  704. _end = 0
  705. while 1:
  706. if _end > height:
  707. break
  708. _end += width
  709. while 1:
  710. if _begin in set_white_line:
  711. break
  712. if _begin > height:
  713. break
  714. _begin += 1
  715. _image = image_np[_begin:_end, ...]
  716. list_images.append(_image)
  717. _begin = _end
  718. log("image_slice into %d parts" % (len(list_images)))
  719. return list_images
  720. def image_slice_new(image_np):
  721. """
  722. 长图分割
  723. :return:
  724. """
  725. height, width = image_np.shape[:2]
  726. image_origin = copy.deepcopy(image_np)
  727. # 去除黑边
  728. image_np = remove_black_border(image_np)
  729. # 1. 转化成灰度图
  730. image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)
  731. # 2. 二值化
  732. ret, binary = cv2.threshold(image_np, 125, 255, cv2.THRESH_BINARY_INV)
  733. # 3. 膨胀和腐蚀操作的核函数
  734. kernal = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
  735. # 4. 膨胀一次,让轮廓突出
  736. dilation = cv2.dilate(binary, kernal, iterations=1)
  737. # dilation = np.add(np.int0(np.full(dilation.shape, 255)), -1 * np.int0(dilation))
  738. # dilation = np.uint8(dilation)
  739. # cv2.namedWindow("dilation", 0)
  740. # cv2.resizeWindow("dilation", 1000, 800)
  741. # cv2.imshow("dilation", dilation)
  742. # cv2.waitKey(0)
  743. # cv2.imwrite("error.jpg", dilation)
  744. # 预定义切割处
  745. slice_time = height // (width)
  746. slice_index_list = []
  747. for i in range(slice_time):
  748. if i < slice_time-1:
  749. slice_index = width + i * width
  750. else:
  751. slice_index = height
  752. slice_index_list.append(slice_index)
  753. # 在预定义切割处上下寻找合适的实际切割处
  754. max_distance = int(width / 4)
  755. real_slice_index_list = []
  756. for i in range(len(slice_index_list)):
  757. slice_index = slice_index_list[i]
  758. if i == len(slice_index_list) - 1:
  759. real_slice_index_list.append(int(slice_index))
  760. continue
  761. sub_dilation = dilation[slice_index-max_distance:slice_index+max_distance, :]
  762. # 按行求平均
  763. width_avg = np.average(np.float32(sub_dilation), axis=1)
  764. # 取最小的
  765. width_min_avg_index = np.argsort(width_avg, axis=0)[0]
  766. # width_min_avg = width_avg[width_min_avg_index] + slice_index - max_distance
  767. width_min_avg = width_min_avg_index + slice_index - max_distance
  768. real_slice_index_list.append(int(width_min_avg))
  769. # 切割
  770. image_list = []
  771. last_slice_index = 0
  772. print('real_slice_index_list', real_slice_index_list)
  773. for slice_index in real_slice_index_list:
  774. image_list.append(image_origin[last_slice_index:slice_index, :, :])
  775. last_slice_index = slice_index
  776. # i = 0
  777. # for im in image_list:
  778. # # print(im.shape)
  779. # # cv2.imwrite("error" + str(i) + ".jpg", im)
  780. # # i += 1
  781. # cv2.namedWindow("im", 0)
  782. # cv2.resizeWindow("im", 1000, 800)
  783. # cv2.imshow("im", im)
  784. # cv2.waitKey(0)
  785. log("image_slice into %d parts" % (len(image_list)))
  786. return image_list
  787. def need_image_slice(image_np):
  788. h, w = image_np.shape[:2]
  789. # if h > 3000 and w < 2000:
  790. # return True
  791. if 3. <= h / w and w >= 100:
  792. return True
  793. return False
  794. def remove_black_border(img_np):
  795. try:
  796. # 阈值
  797. threshold = 100
  798. # 转换为灰度图像
  799. gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
  800. # 获取图片尺寸
  801. h, w = gray.shape[:2]
  802. # 无法区分黑色区域超过一半的情况
  803. rowc = gray[:, int(1/2*w)]
  804. colc = gray[int(1/2*h), :]
  805. rowflag = np.argwhere(rowc > threshold)
  806. colflag = np.argwhere(colc > threshold)
  807. left, bottom, right, top = rowflag[0, 0], colflag[-1, 0], rowflag[-1, 0], colflag[0, 0]
  808. if left == right or top == bottom:
  809. raise
  810. # cv2.imshow('remove_black_border', img_np[left:right, top:bottom, :])
  811. # cv2.waitKey()
  812. log('remove_black_border success')
  813. return img_np[left:right, top:bottom, :]
  814. except:
  815. log('remove_black_border failed')
  816. traceback.print_exc()
  817. return img_np
  818. class ImageConvert:
  819. def __init__(self, path, unique_type_dir):
  820. from format_convert.convert_tree import _Document
  821. self._doc = _Document(path)
  822. self.path = path
  823. self.unique_type_dir = unique_type_dir
  824. def init_package(self):
  825. # 各个包初始化
  826. try:
  827. with open(self.path, "rb") as f:
  828. self.image = f.read()
  829. except:
  830. log("cannot open image!")
  831. traceback.print_exc()
  832. self._doc.error_code = [-3]
  833. def convert(self):
  834. from format_convert.convert_tree import _Page, _Image
  835. self.init_package()
  836. if self._doc.error_code is not None:
  837. return
  838. _page = _Page(None, 0)
  839. _image = _Image(self.image, self.path)
  840. _page.add_child(_image)
  841. self._doc.add_child(_page)
  842. def get_html(self):
  843. try:
  844. self.convert()
  845. except:
  846. traceback.print_exc()
  847. self._doc.error_code = [-1]
  848. if self._doc.error_code is not None:
  849. return self._doc.error_code
  850. return self._doc.get_html()
  851. def image_process_old(image_np, image_path, is_from_pdf=False, is_from_docx=False, use_ocr=True):
  852. from format_convert.convert_tree import _Table, _Sentence
  853. def get_cluster(t_list, b_list, axis):
  854. zip_list = list(zip(t_list, b_list))
  855. if len(zip_list) == 0:
  856. return t_list, b_list
  857. if len(zip_list[0]) > 0:
  858. zip_list.sort(key=lambda x: x[1][axis][1])
  859. cluster_list = []
  860. margin = 5
  861. for text, bbox in zip_list:
  862. _find = 0
  863. for cluster in cluster_list:
  864. if abs(cluster[1] - bbox[axis][1]) <= margin:
  865. cluster[0].append([text, bbox])
  866. cluster[1] = bbox[axis][1]
  867. _find = 1
  868. break
  869. if not _find:
  870. cluster_list.append([[[text, bbox]], bbox[axis][1]])
  871. new_text_list = []
  872. new_bbox_list = []
  873. for cluster in cluster_list:
  874. # print("=============convert_image")
  875. # print("cluster_list", cluster)
  876. center_y = 0
  877. for text, bbox in cluster[0]:
  878. center_y += bbox[axis][1]
  879. center_y = int(center_y / len(cluster[0]))
  880. for text, bbox in cluster[0]:
  881. bbox[axis][1] = center_y
  882. new_text_list.append(text)
  883. new_bbox_list.append(bbox)
  884. # print("cluster_list", cluster)
  885. return new_text_list, new_bbox_list
  886. def merge_textbox(textbox_list, in_objs):
  887. delete_obj = []
  888. threshold = 5
  889. textbox_list.sort(key=lambda x:x.bbox[0])
  890. for k in range(len(textbox_list)):
  891. tb1 = textbox_list[k]
  892. if tb1 not in in_objs and tb1 not in delete_obj:
  893. for m in range(k+1, len(textbox_list)):
  894. tb2 = textbox_list[m]
  895. if tb2 in in_objs:
  896. continue
  897. if abs(tb1.bbox[1]-tb2.bbox[1]) <= threshold \
  898. and abs(tb1.bbox[3]-tb2.bbox[3]) <= threshold:
  899. if tb1.bbox[0] <= tb2.bbox[0]:
  900. tb1.text = tb1.text + tb2.text
  901. else:
  902. tb1.text = tb2.text + tb1.text
  903. tb1.bbox[0] = min(tb1.bbox[0], tb2.bbox[0])
  904. tb1.bbox[2] = max(tb1.bbox[2], tb2.bbox[2])
  905. delete_obj.append(tb2)
  906. for _obj in delete_obj:
  907. if _obj in textbox_list:
  908. textbox_list.remove(_obj)
  909. return textbox_list
  910. log("into image_preprocess")
  911. try:
  912. if image_np is None:
  913. return []
  914. # 整体分辨率限制
  915. if image_np.shape[0] > 2000 or image_np.shape[1] > 2000:
  916. h, w = get_best_predict_size2(image_np, threshold=2000)
  917. log("global image resize " + str(image_np.shape[:2]) + " -> " + str(h) + "," + str(w))
  918. image_np = pil_resize(image_np, h, w)
  919. # 图片倾斜校正,写入原来的图片路径
  920. # print("image_process", image_path)
  921. g_r_i = get_rotated_image(image_np, image_path)
  922. if judge_error_code(g_r_i):
  923. if is_from_docx:
  924. return []
  925. else:
  926. return g_r_i
  927. image_np = cv2.imread(image_path)
  928. image_np_copy = copy.deepcopy(image_np)
  929. if image_np is None:
  930. return []
  931. # if image_np is None:
  932. # return []
  933. #
  934. # # idc模型实现图片倾斜校正
  935. # image_resize = pil_resize(image_np, 640, 640)
  936. # image_resize_path = image_path.split(".")[0] + "_resize_idc." + image_path.split(".")[-1]
  937. # cv2.imwrite(image_resize_path, image_resize)
  938. #
  939. # with open(image_resize_path, "rb") as f:
  940. # image_bytes = f.read()
  941. # angle = from_idc_interface(image_bytes)
  942. # if judge_error_code(angle):
  943. # if is_from_docx:
  944. # return []
  945. # else:
  946. # return angle
  947. # # 根据角度旋转
  948. # image_pil = Image.fromarray(image_np)
  949. # image_np = np.array(image_pil.rotate(angle, expand=1))
  950. # # 写入
  951. # idc_path = image_path.split(".")[0] + "_idc." + image_path.split(".")[-1]
  952. # cv2.imwrite(idc_path, image_np)
  953. # isr模型去除印章
  954. _isr_time = time.time()
  955. if count_red_pixel(image_np):
  956. # 红色像素达到一定值才过模型
  957. with open(image_path, "rb") as f:
  958. image_bytes = f.read()
  959. image_np = from_isr_interface(image_bytes)
  960. if judge_error_code(image_np):
  961. if is_from_docx:
  962. return []
  963. else:
  964. return image_np
  965. # [1]代表检测不到印章,直接返回
  966. if isinstance(image_np, list) and image_np == [1]:
  967. log("no seals detected!")
  968. image_np = image_np_copy
  969. else:
  970. isr_path = image_path.split(".")[0] + "_isr." + image_path.split(".")[-1]
  971. cv2.imwrite(isr_path, image_np)
  972. log("isr total time "+str(time.time()-_isr_time))
  973. # otr模型识别表格,需要图片resize成模型所需大小, 写入另一个路径
  974. best_h, best_w = get_best_predict_size(image_np)
  975. # image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  976. image_resize = pil_resize(image_np, best_h, best_w)
  977. image_resize_path = image_path.split(".")[0] + "_resize_otr." + image_path.split(".")[-1]
  978. cv2.imwrite(image_resize_path, image_resize)
  979. # 调用otr模型接口
  980. with open(image_resize_path, "rb") as f:
  981. image_bytes = f.read()
  982. list_line = from_otr_interface(image_bytes, is_from_pdf)
  983. if judge_error_code(list_line):
  984. return list_line
  985. # # 预处理
  986. # if is_from_pdf:
  987. # prob = 0.2
  988. # else:
  989. # prob = 0.5
  990. # with open(image_resize_path, "rb") as f:
  991. # image_bytes = f.read()
  992. # img_new, inputs = table_preprocess(image_bytes, prob)
  993. # if type(img_new) is list and judge_error_code(img_new):
  994. # return img_new
  995. # log("img_new.shape " + str(img_new.shape))
  996. #
  997. # # 调用模型运行接口
  998. # _dict = {"inputs": inputs, "md5": _global.get("md5")}
  999. # result = from_gpu_interface(_dict, model_type="otr", predictor_type="")
  1000. # if judge_error_code(result):
  1001. # logging.error("from_gpu_interface failed! " + str(result))
  1002. # raise requests.exceptions.RequestException
  1003. #
  1004. # pred = result.get("preds")
  1005. # gpu_time = result.get("gpu_time")
  1006. # log("otr model predict time " + str(gpu_time))
  1007. #
  1008. # # # 解压numpy
  1009. # # decompressed_array = io.BytesIO()
  1010. # # decompressed_array.write(pred)
  1011. # # decompressed_array.seek(0)
  1012. # # pred = np.load(decompressed_array, allow_pickle=True)['arr_0']
  1013. # # log("inputs.shape" + str(pred.shape))
  1014. #
  1015. # 调用gpu共享内存处理
  1016. # _dict = {"inputs": inputs, "md5": _global.get("md5")}
  1017. # result = from_gpu_share_memory(_dict, model_type="otr", predictor_type="")
  1018. # if judge_error_code(result):
  1019. # logging.error("from_gpu_interface failed! " + str(result))
  1020. # raise requests.exceptions.RequestException
  1021. #
  1022. # pred = result.get("preds")
  1023. # gpu_time = result.get("gpu_time")
  1024. # log("otr model predict time " + str(gpu_time))
  1025. #
  1026. # # 后处理
  1027. # list_line = table_postprocess(img_new, pred, prob)
  1028. # log("len(list_line) " + str(len(list_line)))
  1029. # if judge_error_code(list_line):
  1030. # return list_line
  1031. # otr resize后得到的bbox根据比例还原
  1032. start_time = time.time()
  1033. ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
  1034. for i in range(len(list_line)):
  1035. point = list_line[i]
  1036. list_line[i] = [int(point[0]*ratio[1]), int(point[1]*ratio[0]),
  1037. int(point[2]*ratio[1]), int(point[3]*ratio[0])]
  1038. log("otr resize bbox recover " + str(time.time()-start_time))
  1039. # ocr图片过大内存溢出,需resize
  1040. start_time = time.time()
  1041. threshold = 3000
  1042. ocr_resize_flag = 0
  1043. if image_np.shape[0] >= threshold or image_np.shape[1] >= threshold:
  1044. ocr_resize_flag = 1
  1045. best_h, best_w = get_best_predict_size2(image_np, threshold)
  1046. # image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  1047. image_resize = pil_resize(image_np, best_h, best_w)
  1048. log("ocr_process image resize " + str(image_resize.shape))
  1049. image_resize_path = image_path.split(".")[0] + "_resize_ocr." + image_path.split(".")[-1]
  1050. cv2.imwrite(image_resize_path, image_resize)
  1051. log("ocr resize before " + str(time.time()-start_time))
  1052. # 调用ocr模型接口
  1053. with open(image_resize_path, "rb") as f:
  1054. image_bytes = f.read()
  1055. text_list, bbox_list = from_ocr_interface(image_bytes, is_table=1)
  1056. if judge_error_code(text_list):
  1057. return text_list
  1058. # # PaddleOCR内部包括预处理,调用模型运行接口,后处理
  1059. # paddle_ocr = PaddleOCR(use_angle_cls=True, lang="ch")
  1060. # results = paddle_ocr.ocr(image_resize, det=True, rec=True, cls=True)
  1061. # # 循环每张图片识别结果
  1062. # text_list = []
  1063. # bbox_list = []
  1064. # for line in results:
  1065. # # print("ocr_interface line", line)
  1066. # text_list.append(line[-1][0])
  1067. # bbox_list.append(line[0])
  1068. # if len(text_list) == 0:
  1069. # return []
  1070. # ocr resize后的bbox还原
  1071. if ocr_resize_flag:
  1072. ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
  1073. else:
  1074. ratio = (1, 1)
  1075. for i in range(len(bbox_list)):
  1076. point = bbox_list[i]
  1077. bbox_list[i] = [[int(point[0][0]*ratio[1]), int(point[0][1]*ratio[0])],
  1078. [int(point[1][0]*ratio[1]), int(point[1][1]*ratio[0])],
  1079. [int(point[2][0]*ratio[1]), int(point[2][1]*ratio[0])],
  1080. [int(point[3][0]*ratio[1]), int(point[3][1]*ratio[0])]]
  1081. # 调用现成方法形成表格
  1082. try:
  1083. from format_convert.convert_tree import TableLine
  1084. list_lines = []
  1085. for line in list_line:
  1086. list_lines.append(LTLine(1, (line[0], line[1]), (line[2], line[3])))
  1087. from format_convert.convert_tree import TextBox
  1088. list_text_boxes = []
  1089. for i in range(len(bbox_list)):
  1090. bbox = bbox_list[i]
  1091. b_text = text_list[i]
  1092. list_text_boxes.append(TextBox([bbox[0][0], bbox[0][1],
  1093. bbox[2][0], bbox[2][1]], b_text))
  1094. # for _textbox in list_text_boxes:
  1095. # print("==",_textbox.get_text())
  1096. lt = LineTable()
  1097. tables, obj_in_table, _ = lt.recognize_table(list_text_boxes, list_lines, False)
  1098. # 合并同一行textbox
  1099. list_text_boxes = merge_textbox(list_text_boxes, obj_in_table)
  1100. obj_list = []
  1101. for table in tables:
  1102. obj_list.append(_Table(table["table"], table["bbox"]))
  1103. for text_box in list_text_boxes:
  1104. if text_box not in obj_in_table:
  1105. obj_list.append(_Sentence(text_box.get_text(), text_box.bbox))
  1106. return obj_list
  1107. except:
  1108. traceback.print_exc()
  1109. return [-8]
  1110. except Exception as e:
  1111. log("image_preprocess error")
  1112. traceback.print_exc()
  1113. return [-1]
  1114. if __name__ == "__main__":
  1115. image_slice_new(cv2.imread("C:/Users/Administrator/Desktop/test_image/error28.jpg"))