123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391 |
- # encoding=utf8
- import inspect
- import io
- import logging
- import os
- import sys
- import time
- import requests
- import numpy as np
- from PIL import Image
- sys.path.append(os.path.dirname(__file__) + "/../")
- from pdfminer.layout import LTLine
- import traceback
- import cv2
- from isr.pre_process import count_red_pixel
- from format_convert.utils import judge_error_code, add_div, LineTable, get_table_html, get_logger, log, \
- memory_decorator, pil_resize
- from format_convert.convert_need_interface import from_otr_interface, from_ocr_interface, from_gpu_interface_redis, \
- from_idc_interface, from_isr_interface
- from format_convert.table_correct import get_rotated_image
- def image_process(image_np, image_path, is_from_pdf=False, is_from_docx=False, use_ocr=True):
- from format_convert.convert_tree import _Table, _Sentence
- def get_cluster(t_list, b_list, axis):
- zip_list = list(zip(t_list, b_list))
- if len(zip_list) == 0:
- return t_list, b_list
- if len(zip_list[0]) > 0:
- zip_list.sort(key=lambda x: x[1][axis][1])
- cluster_list = []
- margin = 5
- for text, bbox in zip_list:
- _find = 0
- for cluster in cluster_list:
- if abs(cluster[1] - bbox[axis][1]) <= margin:
- cluster[0].append([text, bbox])
- cluster[1] = bbox[axis][1]
- _find = 1
- break
- if not _find:
- cluster_list.append([[[text, bbox]], bbox[axis][1]])
- new_text_list = []
- new_bbox_list = []
- for cluster in cluster_list:
- # print("=============convert_image")
- # print("cluster_list", cluster)
- center_y = 0
- for text, bbox in cluster[0]:
- center_y += bbox[axis][1]
- center_y = int(center_y / len(cluster[0]))
- for text, bbox in cluster[0]:
- bbox[axis][1] = center_y
- new_text_list.append(text)
- new_bbox_list.append(bbox)
- # print("cluster_list", cluster)
- return new_text_list, new_bbox_list
- def merge_textbox(textbox_list, in_objs):
- delete_obj = []
- threshold = 5
- textbox_list.sort(key=lambda x:x.bbox[0])
- for k in range(len(textbox_list)):
- tb1 = textbox_list[k]
- if tb1 not in in_objs and tb1 not in delete_obj:
- for m in range(k+1, len(textbox_list)):
- tb2 = textbox_list[m]
- if tb2 in in_objs:
- continue
- if abs(tb1.bbox[1]-tb2.bbox[1]) <= threshold \
- and abs(tb1.bbox[3]-tb2.bbox[3]) <= threshold:
- if tb1.bbox[0] <= tb2.bbox[0]:
- tb1.text = tb1.text + tb2.text
- else:
- tb1.text = tb2.text + tb1.text
- tb1.bbox[0] = min(tb1.bbox[0], tb2.bbox[0])
- tb1.bbox[2] = max(tb1.bbox[2], tb2.bbox[2])
- delete_obj.append(tb2)
- for _obj in delete_obj:
- if _obj in textbox_list:
- textbox_list.remove(_obj)
- return textbox_list
- log("into image_preprocess")
- try:
- # 图片倾斜校正,写入原来的图片路径
- # print("image_process", image_path)
- g_r_i = get_rotated_image(image_np, image_path)
- if judge_error_code(g_r_i):
- if is_from_docx:
- return []
- else:
- return g_r_i
- image_np = cv2.imread(image_path)
- if image_np is None:
- return []
- # if image_np is None:
- # return []
- #
- # # idc模型实现图片倾斜校正
- # image_resize = pil_resize(image_np, 640, 640)
- # image_resize_path = image_path.split(".")[0] + "_resize_idc." + image_path.split(".")[-1]
- # cv2.imwrite(image_resize_path, image_resize)
- #
- # with open(image_resize_path, "rb") as f:
- # image_bytes = f.read()
- # angle = from_idc_interface(image_bytes)
- # if judge_error_code(angle):
- # if is_from_docx:
- # return []
- # else:
- # return angle
- # # 根据角度旋转
- # image_pil = Image.fromarray(image_np)
- # image_np = np.array(image_pil.rotate(angle, expand=1))
- # # 写入
- # idc_path = image_path.split(".")[0] + "_idc." + image_path.split(".")[-1]
- # cv2.imwrite(idc_path, image_np)
- # isr模型去除印章
- image_np_source = image_np
- _isr_time = time.time()
- if count_red_pixel(image_np):
- # 红色像素达到一定值才过模型
- with open(image_path, "rb") as f:
- image_bytes = f.read()
- image_np = from_isr_interface(image_bytes)
- if judge_error_code(image_np):
- if is_from_docx:
- return []
- else:
- return image_np
- # [1]代表检测不到印章,直接返回
- if isinstance(image_np, list) and image_np == [1]:
- log("no seals detected!")
- image_np = image_np_source
- else:
- isr_path = image_path.split(".")[0] + "_isr." + image_path.split(".")[-1]
- cv2.imwrite(isr_path, image_np)
- log("isr total time"+str(time.time()-_isr_time))
- # otr模型识别表格,需要图片resize成模型所需大小, 写入另一个路径
- best_h, best_w = get_best_predict_size(image_np)
- # image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
- image_resize = pil_resize(image_np, best_h, best_w)
- image_resize_path = image_path.split(".")[0] + "_resize_otr." + image_path.split(".")[-1]
- cv2.imwrite(image_resize_path, image_resize)
- # 调用otr模型接口
- with open(image_resize_path, "rb") as f:
- image_bytes = f.read()
- list_line = from_otr_interface(image_bytes, is_from_pdf)
- if judge_error_code(list_line):
- return list_line
- # # 预处理
- # if is_from_pdf:
- # prob = 0.2
- # else:
- # prob = 0.5
- # with open(image_resize_path, "rb") as f:
- # image_bytes = f.read()
- # img_new, inputs = table_preprocess(image_bytes, prob)
- # if type(img_new) is list and judge_error_code(img_new):
- # return img_new
- # log("img_new.shape " + str(img_new.shape))
- #
- # # 调用模型运行接口
- # _dict = {"inputs": inputs, "md5": _global.get("md5")}
- # result = from_gpu_interface(_dict, model_type="otr", predictor_type="")
- # if judge_error_code(result):
- # logging.error("from_gpu_interface failed! " + str(result))
- # raise requests.exceptions.RequestException
- #
- # pred = result.get("preds")
- # gpu_time = result.get("gpu_time")
- # log("otr model predict time " + str(gpu_time))
- #
- # # # 解压numpy
- # # decompressed_array = io.BytesIO()
- # # decompressed_array.write(pred)
- # # decompressed_array.seek(0)
- # # pred = np.load(decompressed_array, allow_pickle=True)['arr_0']
- # # log("inputs.shape" + str(pred.shape))
- #
- # 调用gpu共享内存处理
- # _dict = {"inputs": inputs, "md5": _global.get("md5")}
- # result = from_gpu_share_memory(_dict, model_type="otr", predictor_type="")
- # if judge_error_code(result):
- # logging.error("from_gpu_interface failed! " + str(result))
- # raise requests.exceptions.RequestException
- #
- # pred = result.get("preds")
- # gpu_time = result.get("gpu_time")
- # log("otr model predict time " + str(gpu_time))
- #
- # # 后处理
- # list_line = table_postprocess(img_new, pred, prob)
- # log("len(list_line) " + str(len(list_line)))
- # if judge_error_code(list_line):
- # return list_line
- # otr resize后得到的bbox根据比例还原
- start_time = time.time()
- ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
- for i in range(len(list_line)):
- point = list_line[i]
- list_line[i] = [int(point[0]*ratio[1]), int(point[1]*ratio[0]),
- int(point[2]*ratio[1]), int(point[3]*ratio[0])]
- log("otr resize bbox recover " + str(time.time()-start_time))
- # ocr图片过大内存溢出,需resize
- start_time = time.time()
- threshold = 3000
- if image_np.shape[0] >= threshold or image_np.shape[1] >= threshold:
- best_h, best_w = get_best_predict_size2(image_np, threshold)
- # image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
- image_resize = pil_resize(image_np, best_h, best_w)
- image_resize_path = image_path.split(".")[0] + "_resize_ocr." + image_path.split(".")[-1]
- cv2.imwrite(image_resize_path, image_resize)
- log("ocr resize before " + str(time.time()-start_time))
- # 调用ocr模型接口
- with open(image_resize_path, "rb") as f:
- image_bytes = f.read()
- text_list, bbox_list = from_ocr_interface(image_bytes, is_table=True)
- if judge_error_code(text_list):
- return text_list
- # # PaddleOCR内部包括预处理,调用模型运行接口,后处理
- # paddle_ocr = PaddleOCR(use_angle_cls=True, lang="ch")
- # results = paddle_ocr.ocr(image_resize, det=True, rec=True, cls=True)
- # # 循环每张图片识别结果
- # text_list = []
- # bbox_list = []
- # for line in results:
- # # print("ocr_interface line", line)
- # text_list.append(line[-1][0])
- # bbox_list.append(line[0])
- # if len(text_list) == 0:
- # return []
- # ocr resize后的bbox还原
- ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
- for i in range(len(bbox_list)):
- point = bbox_list[i]
- bbox_list[i] = [[int(point[0][0]*ratio[1]), int(point[0][1]*ratio[0])],
- [int(point[1][0]*ratio[1]), int(point[1][1]*ratio[0])],
- [int(point[2][0]*ratio[1]), int(point[2][1]*ratio[0])],
- [int(point[3][0]*ratio[1]), int(point[3][1]*ratio[0])]]
- # for _a,_b in zip(text_list,bbox_list):
- # print("bbox1",_a,_b)
- # 调用现成方法形成表格
- try:
- from format_convert.convert_tree import TableLine
- list_lines = []
- for line in list_line:
- list_lines.append(LTLine(1, (line[0], line[1]), (line[2], line[3])))
- from format_convert.convert_tree import TextBox
- list_text_boxes = []
- for i in range(len(bbox_list)):
- bbox = bbox_list[i]
- b_text = text_list[i]
- list_text_boxes.append(TextBox([bbox[0][0], bbox[0][1],
- bbox[2][0], bbox[2][1]], b_text))
- # for _textbox in list_text_boxes:
- # print("==",_textbox.get_text())
- lt = LineTable()
- tables, obj_in_table, _ = lt.recognize_table(list_text_boxes, list_lines, False)
- # 合并同一行textbox
- list_text_boxes = merge_textbox(list_text_boxes, obj_in_table)
- obj_list = []
- for table in tables:
- obj_list.append(_Table(table["table"], table["bbox"]))
- for text_box in list_text_boxes:
- if text_box not in obj_in_table:
- obj_list.append(_Sentence(text_box.get_text(), text_box.bbox))
- return obj_list
- except:
- traceback.print_exc()
- return [-8]
- except Exception as e:
- log("image_preprocess error")
- traceback.print_exc()
- return [-1]
- @memory_decorator
- def picture2text(path, html=False):
- log("into picture2text")
- try:
- # 判断图片中表格
- img = cv2.imread(path)
- if img is None:
- return [-3]
- text = image_process(img, path)
- if judge_error_code(text):
- return text
- if html:
- text = add_div(text)
- return [text]
- except Exception as e:
- log("picture2text error!")
- print("picture2text", traceback.print_exc())
- return [-1]
- def get_best_predict_size(image_np, times=64):
- sizes = []
- for i in range(1, 100):
- if i*times <= 1300:
- sizes.append(i*times)
- sizes.sort(key=lambda x: x, reverse=True)
- min_len = 10000
- best_height = sizes[0]
- for height in sizes:
- if abs(image_np.shape[0] - height) < min_len:
- min_len = abs(image_np.shape[0] - height)
- best_height = height
- min_len = 10000
- best_width = sizes[0]
- for width in sizes:
- if abs(image_np.shape[1] - width) < min_len:
- min_len = abs(image_np.shape[1] - width)
- best_width = width
- return best_height, best_width
- def get_best_predict_size2(image_np, threshold=3000):
- h, w = image_np.shape[:2]
- scale = threshold / max(h, w)
- h = int(h * scale)
- w = int(w * scale)
- return h, w
- class ImageConvert:
- def __init__(self, path, unique_type_dir):
- from format_convert.convert_tree import _Document
- self._doc = _Document(path)
- self.path = path
- self.unique_type_dir = unique_type_dir
- def init_package(self):
- # 各个包初始化
- try:
- with open(self.path, "rb") as f:
- self.image = f.read()
- except:
- log("cannot open image!")
- traceback.print_exc()
- self._doc.error_code = [-3]
- def convert(self):
- from format_convert.convert_tree import _Page, _Image
- self.init_package()
- if self._doc.error_code is not None:
- return
- _page = _Page(None, 0)
- _image = _Image(self.image, self.path)
- _page.add_child(_image)
- self._doc.add_child(_page)
- def get_html(self):
- try:
- self.convert()
- except:
- traceback.print_exc()
- self._doc.error_code = [-1]
- if self._doc.error_code is not None:
- return self._doc.error_code
- return self._doc.get_html()
|