otr_interface.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. import base64
  2. import json
  3. import multiprocessing as mp
  4. import os
  5. import sys
  6. import traceback
  7. # os.environ['TF_XLA_FLAGS'] = '--tf_xla_cpu_global_jit'
  8. # os.environ['CUDA_VISIBLE_DEVICES'] = "0"
  9. sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
  10. from format_convert.max_compute_config import max_compute
  11. import tensorflow as tf
  12. MAX_COMPUTE = max_compute
  13. if not MAX_COMPUTE:
  14. # tensorflow 内存设置
  15. try:
  16. gpus = tf.config.list_physical_devices('GPU')
  17. if len(gpus) > 0:
  18. tf.config.experimental.set_virtual_device_configuration(
  19. gpus[0],
  20. [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])
  21. except:
  22. traceback.print_exc()
  23. # pass
  24. # gpus = tf.config.list_physical_devices('GPU')
  25. # for gpu in gpus: # 如果使用多块GPU时
  26. # tf.config.experimental.set_memory_growth(gpu, True)
  27. os.environ['CUDA_CACHE_MAXSIZE'] = str(2147483648)
  28. os.environ['CUDA_CACHE_DISABLE'] = str(0)
  29. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
  30. sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
  31. import time
  32. import logging
  33. import cv2
  34. import numpy as np
  35. from flask import Flask, request
  36. from format_convert.utils import request_post, judge_error_code, get_intranet_ip, log, get_md5_from_bytes, get_platform
  37. from otr.table_line import get_points, get_split_line, get_points_row, \
  38. get_points_col, \
  39. delete_close_points, fix_outline, get_bbox, get_outline_point, delete_contain_bbox, points_to_line, \
  40. fix_inner, merge_line, fix_corner, delete_outline, table_net, table_line
  41. from format_convert import _global
  42. # 接口配置
  43. app = Flask(__name__)
  44. @app.route('/otr', methods=['POST'])
  45. def _otr():
  46. _global._init()
  47. _global.update({"port": globals().get("port")})
  48. start_time = time.time()
  49. log("into otr_interface _otr")
  50. try:
  51. if not request.form:
  52. log("otr no data!")
  53. return json.dumps({"list_line": str([-9])})
  54. otr_model = globals().get("global_otr_model")
  55. if otr_model is None:
  56. otr_model = OtrModels().get_model()
  57. globals().update({"global_otr_model": otr_model})
  58. data = request.form.get("data")
  59. is_from_pdf = request.form.get("is_from_pdf")
  60. _md5 = request.form.get("md5")
  61. _global.update({"md5": _md5})
  62. list_lines = otr(data, otr_model, is_from_pdf)
  63. # if is_from_pdf:
  64. # list_lines = line_detect(img_data, otr_model, prob=0.2)
  65. # else:
  66. # list_lines = line_detect(img_data, otr_model, prob=0.5)
  67. return json.dumps(list_lines)
  68. except TimeoutError:
  69. return json.dumps({"list_line": str([-5])})
  70. except:
  71. traceback.print_exc()
  72. return json.dumps({"list_line": str([-1])})
  73. finally:
  74. log("otr interface finish time " + str(time.time()-start_time))
  75. def otr(data, otr_model, is_from_pdf):
  76. log("into otr_interface otr")
  77. try:
  78. img_data = base64.b64decode(data)
  79. # points_and_lines = pool.apply(table_detect, (img_data,))
  80. if is_from_pdf:
  81. list_lines = line_detect(img_data, otr_model, prob=0.2)
  82. else:
  83. list_lines = line_detect(img_data, otr_model, prob=0.5)
  84. return list_lines
  85. except TimeoutError:
  86. raise TimeoutError
  87. def table_detect2(img_data, otr_model):
  88. log("into otr_interface table_detect")
  89. start_time = time.time()
  90. try:
  91. start_time1 = time.time()
  92. # 二进制数据流转np.ndarray [np.uint8: 8位像素]
  93. img = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
  94. # log("into otr_interface table_detect 1")
  95. # cv2.imwrite("111111.jpg", img)
  96. # 将bgr转为rbg
  97. image_np = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  98. # log("into otr_interface table_detect 2")
  99. # 选择与图片最接近分辨率,以防失真
  100. # best_h, best_w = get_best_predict_size(img)
  101. print("image_np.shape", image_np.shape)
  102. best_h, best_w, _ = image_np.shape
  103. log("otr preprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  104. # 调用模型
  105. # rows, cols = table_line(image_np, otr_model)
  106. rows, cols, image_np = table_line(image_np, otr_model, size=(best_w, best_h), hprob=0.5, vprob=0.5)
  107. start_time1 = time.time()
  108. if not rows or not cols:
  109. print("points", 0, "split_lines", 0, "bboxes", 0)
  110. return {"points": str([]), "split_lines": str([]),
  111. "bboxes": str([]), "outline_points": str([]),
  112. "lines": str([])}
  113. # 查看是否正确输出rows,cols
  114. # for line in rows+cols:
  115. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  116. # (255, 0, 0), 2)
  117. # cv2.imshow("rows-cols1", img)
  118. # cv2.waitKey(0)
  119. # 处理结果
  120. # 合并错开线
  121. rows = merge_line(rows, axis=0)
  122. cols = merge_line(cols, axis=1)
  123. # 计算交点、分割线
  124. points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  125. # log("into otr_interface table_detect 5")
  126. if not points:
  127. print("points", 0, "split_lines", 0, "bboxes", 0)
  128. return {"points": str([]), "split_lines": str([]),
  129. "bboxes": str([]), "outline_points": str([]),
  130. "lines": str([])}
  131. # 清掉外围的没用的线
  132. rows, cols = delete_outline(rows, cols, points)
  133. split_lines, split_y = get_split_line(points, cols, image_np)
  134. # log("into otr_interface table_detect 6")
  135. # 计算交点所在行列,剔除相近交点
  136. row_point_list = get_points_row(points, split_y, 5)
  137. col_point_list = get_points_col(points, split_y, 5)
  138. # log("into otr_interface table_detect 7")
  139. points = delete_close_points(points, row_point_list, col_point_list)
  140. # log("into otr_interface table_detect 8")
  141. # 查看是否正确输出点
  142. # for p in points:
  143. # cv2.circle(img, (p[0], p[1]), 3, (0, 0, 255))
  144. # cv2.imshow("points", img)
  145. # cv2.waitKey(0)
  146. # 查看是否正确输出rows,cols
  147. # for line in rows+cols:
  148. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  149. # (0, 255, 0), 2)
  150. # cv2.imshow("rows-cols0", img)
  151. # cv2.waitKey(0)
  152. # 修复边框
  153. new_rows, new_cols, long_rows, long_cols = fix_outline(image_np, rows, cols, points,
  154. split_y)
  155. # print(new_cols, new_rows)
  156. if new_rows or new_cols:
  157. # 连接至补线的延长线
  158. if long_rows:
  159. rows = long_rows
  160. if long_cols:
  161. cols = long_cols
  162. # 新的补线
  163. if new_rows:
  164. rows += new_rows
  165. if new_cols:
  166. cols += new_cols
  167. # 修复边框后重新计算交点、分割线
  168. points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  169. # log("into otr_interface table_detect 10")
  170. split_lines, split_y = get_split_line(points, cols, image_np)
  171. # 计算交点所在行列,剔除相近交点
  172. row_point_list = get_points_row(points, split_y, 0)
  173. col_point_list = get_points_col(points, split_y, 0)
  174. # log("into otr_interface table_detect 11")
  175. points = delete_close_points(points, row_point_list, col_point_list)
  176. # row_point_list = get_points_row(points, split_y)
  177. # col_point_list = get_points_col(points, split_y)
  178. # log("into otr_interface table_detect 12")
  179. # 查看是否正确输出rows,cols
  180. # for line in rows+cols:
  181. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  182. # (255, 0, 0), 2)
  183. # cv2.imshow("rows-cols1", img)
  184. # cv2.waitKey(0)
  185. # 修复表格4个角
  186. rows, cols = fix_corner(rows, cols, split_y)
  187. points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  188. # row_point_list = get_points_row(points, split_y, 5)
  189. # col_point_list = get_points_col(points, split_y, 5)
  190. # print("row_point_list", row_point_list)
  191. # print("col_point_list", col_point_list)
  192. # 修复内部缺线
  193. points = fix_inner(rows, cols, points, split_y)
  194. if not points:
  195. print("points", 0, "split_lines", 0, "bboxes", 0)
  196. return {"points": str([]), "split_lines": str([]),
  197. "bboxes": str([]), "outline_points": str([]),
  198. "lines": str([])}
  199. row_point_list = get_points_row(points, split_y, 5)
  200. col_point_list = get_points_col(points, split_y, 5)
  201. # 查看是否正确输出点
  202. # for p in points:
  203. # cv2.circle(img, (p[0], p[1]), 1, (0, 255, 0), 3)
  204. # cv2.imshow("points fix", img)
  205. # cv2.waitKey(0)
  206. # 查看是否正确输出rows,cols
  207. # for line in rows+cols:
  208. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  209. # (255, 0, 0), 2)
  210. # cv2.imshow("rows-cols2", img)
  211. # cv2.waitKey(0)
  212. # 根据分行分列重新得到rows、cols,避免线延长导致后续bbox生成失败
  213. # rows = points_to_line(row_point_list, axis=0)
  214. # cols = points_to_line(col_point_list, axis=1)
  215. # points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  216. # row_point_list = get_points_row(points, split_y, 0)
  217. # col_point_list = get_points_col(points, split_y, 0)
  218. # 获取bbox 单元格
  219. bboxes = get_bbox(image_np, row_point_list, col_point_list, split_y, rows, cols)
  220. # log("into otr_interface table_detect 13")
  221. # 删除包含bbox
  222. if bboxes:
  223. bboxes = delete_contain_bbox(bboxes)
  224. # 查看是否能输出正确框
  225. # for box in bboxes:
  226. # cv2.rectangle(img, box[0], box[1], (0, 0, 255), 3)
  227. # cv2.imshow("bbox", img)
  228. # cv2.waitKey(0)
  229. # 补充连续框
  230. # if bboxes:
  231. # bboxes = add_continue_bbox(bboxes)
  232. #
  233. # # 删除包含bbox
  234. # bboxes = delete_contain_bbox(bboxes)
  235. # 查看是否能输出正确框
  236. # cv2.namedWindow('bbox', 0)
  237. # for box in bboxes:
  238. # cv2.rectangle(img, box[0], box[1], (0, 255, 0), 3)
  239. # cv2.imshow("bbox", img)
  240. # cv2.waitKey(0)
  241. # 查看是否正确输出点
  242. # cv2.namedWindow('points', 0)
  243. # for p in points:
  244. # cv2.circle(img, (p[0], p[1]), 3, (0, 0, 255))
  245. # cv2.imshow("points", img)
  246. # cv2.waitKey(0)
  247. # 查看是否正确输出区域分割线
  248. # cv2.namedWindow('split_lines', 0)
  249. # for line in split_lines:
  250. # cv2.line(img, line[0], line[1], (0, 0, 255), 2)
  251. # cv2.imshow("split_lines", img)
  252. # cv2.waitKey(0)
  253. # 获取每个表格的左上右下两个点
  254. outline_points = get_outline_point(points, split_y)
  255. # log("into otr_interface table_detect 14")
  256. if bboxes:
  257. print("bboxes number", len(bboxes))
  258. # print("bboxes", bboxes)
  259. else:
  260. print("bboxes number", "None")
  261. log("otr postprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  262. log("otr finish: " + str(round(float(time.time()-start_time1), 4)) + "s")
  263. return {"points": str(points), "split_lines": str(split_lines),
  264. "bboxes": str(bboxes), "outline_points": str(outline_points),
  265. "lines": str(rows+cols)}
  266. except TimeoutError:
  267. raise TimeoutError
  268. except Exception as e:
  269. log("otr_interface cannot detected table!")
  270. print("otr_interface cannot detected table!", traceback.print_exc())
  271. print("points", 0, "split_lines", 0, "bboxes", 0)
  272. log("otr postprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  273. return {"points": str([]), "split_lines": str([]), "bboxes": str([]),
  274. "outline_points": str([]), "lines": str([])}
  275. def line_detect(img_data, otr_model, prob=0.2):
  276. log("into otr_interface table_detect")
  277. start_time = time.time()
  278. try:
  279. start_time1 = time.time()
  280. # 二进制数据流转np.ndarray [np.uint8: 8位像素]
  281. img = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
  282. # log("into otr_interface table_detect 1")
  283. # cv2.imwrite("111111.jpg", img)
  284. # 将bgr转为rbg
  285. image_np = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  286. # log("into otr_interface table_detect 2")
  287. # 选择与图片最接近分辨率,以防失真
  288. # best_h, best_w = get_best_predict_size(img)
  289. log("image_np.shape" + str(image_np.shape))
  290. best_h, best_w, _ = image_np.shape
  291. log("otr preprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  292. # 调用模型
  293. # rows, cols = table_line(image_np, otr_model)
  294. start_time1 = time.time()
  295. list_line = table_line(image_np, otr_model, size=(best_w, best_h), prob=prob)
  296. log("otr finish " + str(round(float(time.time()-start_time1), 4)) + "s")
  297. return {"list_line": str(list_line)}
  298. except TimeoutError:
  299. raise TimeoutError
  300. except Exception as e:
  301. log("otr_interface cannot detected table!")
  302. print("otr_interface cannot detected table!", traceback.print_exc())
  303. log("otr postprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  304. return {"list_line": str([])}
  305. class OtrModels:
  306. def __init__(self):
  307. # python文件所在目录
  308. _dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
  309. model_path = _dir + "/models/table-line.h5"
  310. self.otr_model = table_net((None, None, 3), 2)
  311. self.otr_model.load_weights(model_path)
  312. def get_model(self):
  313. return self.otr_model
  314. def test_otr_model(from_remote=True):
  315. _global._init()
  316. from format_convert.convert_image import get_best_predict_size, image_process
  317. if get_platform() == "Windows":
  318. file_path = "C:/Users/Administrator/Desktop/error2.png"
  319. file_path = "C:/Users/Administrator/Downloads/1652672734044.jpg"
  320. else:
  321. file_path = "1.jpg"
  322. image_np = cv2.imread(file_path)
  323. best_h, best_w = get_best_predict_size(image_np)
  324. image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  325. cv2.imwrite(file_path, image_resize)
  326. with open(file_path, "rb") as f:
  327. file_bytes = f.read()
  328. file_base64 = base64.b64encode(file_bytes)
  329. _md5 = get_md5_from_bytes(file_bytes)[0]
  330. _global.update({"port": 15010, "md5": _md5})
  331. if from_remote:
  332. file_json = {"data": file_base64, "is_from_pdf": False, "md5": _md5}
  333. # _url = "http://192.168.2.104:18000/otr"
  334. _url = "http://127.0.0.1:18000/otr"
  335. r = json.loads(request_post(_url, file_json))
  336. else:
  337. # otr_model = OtrModels().get_model()
  338. # r = otr(file_base64, otr_model, is_from_pdf=False)
  339. r = image_process(image_resize, file_path)
  340. print(r)
  341. if __name__ == '__main__':
  342. if len(sys.argv) == 2:
  343. port = int(sys.argv[1])
  344. elif len(sys.argv) == 3:
  345. port = int(sys.argv[1])
  346. using_gpu_index = int(sys.argv[2])
  347. else:
  348. port = 18000
  349. using_gpu_index = 0
  350. _global._init()
  351. _global.update({"port": str(port)})
  352. globals().update({"port": str(port)})
  353. # 日志格式设置
  354. # ip = get_intranet_ip()
  355. # logging.basicConfig(level=logging.INFO,
  356. # format='%(asctime)s - %(name)s - %(levelname)s - '
  357. # + ip + ' - ' + str(port) + ' - %(message)s')
  358. logging.info(get_platform())
  359. # 限制tensorflow显存
  360. # os.environ['CUDA_VISIBLE_DEVICES'] = str(using_gpu_index)
  361. # import tensorflow as tf
  362. # if get_platform() != "Windows":
  363. # _version = tf.__version__
  364. # logging.info(str(_version))
  365. # memory_limit_scale = 0.3
  366. # # tensorflow 1.x
  367. # if str(_version)[0] == "1":
  368. # logging.info("1.x " + str(_version))
  369. # os.environ['CUDA_CACHE_MAXSIZE'] = str(2147483648)
  370. # os.environ['CUDA_CACHE_DISABLE'] = str(0)
  371. # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=memory_limit_scale)
  372. # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
  373. #
  374. # # tensorflow 2.x
  375. # elif str(_version)[0] == "2":
  376. # logging.info("2.x " + str(_version))
  377. # config = tf.compat.v1.ConfigProto()
  378. # config.gpu_options.per_process_gpu_memory_fraction = memory_limit_scale
  379. # config.gpu_options.allow_growth = True
  380. # sess = tf.compat.v1.Session(config=config)
  381. # app.run(host='0.0.0.0', port=port, processes=1, threaded=False, debug=False)
  382. app.run()
  383. log("OTR running "+str(port))
  384. # test_otr_model(False)
  385. # print(json.dumps([-2]))
  386. # otr_model = OtrModels().get_model()
  387. # otr("11", otr_model)