otr_interface.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. import base64
  2. import json
  3. import multiprocessing as mp
  4. import os
  5. import traceback
  6. # os.environ['TF_XLA_FLAGS'] = '--tf_xla_cpu_global_jit'
  7. # os.environ['CUDA_VISIBLE_DEVICES'] = "0"
  8. import tensorflow as tf
  9. MAX_COMPUTE = False
  10. if not MAX_COMPUTE:
  11. # tensorflow 内存设置
  12. try:
  13. gpus = tf.config.list_physical_devices('GPU')
  14. if len(gpus) > 0:
  15. tf.config.experimental.set_virtual_device_configuration(
  16. gpus[0],
  17. [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])
  18. except:
  19. traceback.print_exc()
  20. # pass
  21. # gpus = tf.config.list_physical_devices('GPU')
  22. # for gpu in gpus: # 如果使用多块GPU时
  23. # tf.config.experimental.set_memory_growth(gpu, True)
  24. os.environ['CUDA_CACHE_MAXSIZE'] = str(2147483648)
  25. os.environ['CUDA_CACHE_DISABLE'] = str(0)
  26. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
  27. sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
  28. import sys
  29. sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
  30. import time
  31. import logging
  32. # from table_line import *
  33. import cv2
  34. import numpy as np
  35. from flask import Flask, request
  36. from format_convert.utils import request_post, judge_error_code, get_intranet_ip, log, get_md5_from_bytes, get_platform
  37. from otr.table_line import get_points, get_split_line, get_points_row, \
  38. get_points_col, \
  39. delete_close_points, fix_outline, get_bbox, get_outline_point, delete_contain_bbox, points_to_line, \
  40. fix_inner, merge_line, fix_corner, add_continue_bbox, delete_outline, table_net, table_line
  41. from format_convert import _global
  42. # 接口配置
  43. app = Flask(__name__)
  44. @app.route('/otr', methods=['POST'])
  45. def _otr():
  46. _global._init()
  47. _global.update({"port": globals().get("port")})
  48. start_time = time.time()
  49. log("into otr_interface _otr")
  50. try:
  51. if not request.form:
  52. log("otr no data!")
  53. return json.dumps({"list_line": str([-9])})
  54. otr_model = globals().get("global_otr_model")
  55. if otr_model is None:
  56. otr_model = OtrModels().get_model()
  57. globals().update({"global_otr_model": otr_model})
  58. data = request.form.get("data")
  59. is_from_pdf = request.form.get("is_from_pdf")
  60. img_data = base64.b64decode(data)
  61. # _md5 = get_md5_from_bytes(img_data)[0]
  62. _md5 = request.form.get("md5")
  63. _global.update({"md5": _md5})
  64. if is_from_pdf:
  65. list_lines = line_detect(img_data, otr_model, prob=0.2)
  66. else:
  67. list_lines = line_detect(img_data, otr_model, prob=0.5)
  68. return json.dumps(list_lines)
  69. except TimeoutError:
  70. return json.dumps({"list_line": str([-5])})
  71. except:
  72. traceback.print_exc()
  73. return json.dumps({"list_line": str([-1])})
  74. finally:
  75. log("otr interface finish time " + str(time.time()-start_time))
  76. def otr(data, otr_model, is_from_pdf):
  77. log("into otr_interface otr")
  78. try:
  79. img_data = base64.b64decode(data)
  80. # points_and_lines = pool.apply(table_detect, (img_data,))
  81. if is_from_pdf:
  82. list_lines = line_detect(img_data, otr_model, prob=0.2)
  83. else:
  84. list_lines = line_detect(img_data, otr_model, prob=0.5)
  85. return list_lines
  86. except TimeoutError:
  87. raise TimeoutError
  88. flag = 0
  89. # model_path = "models/table-line.h5"
  90. def table_detect2(img_data, otr_model):
  91. log("into otr_interface table_detect")
  92. start_time = time.time()
  93. try:
  94. start_time1 = time.time()
  95. # 二进制数据流转np.ndarray [np.uint8: 8位像素]
  96. img = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
  97. # log("into otr_interface table_detect 1")
  98. # cv2.imwrite("111111.jpg", img)
  99. # 将bgr转为rbg
  100. image_np = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  101. # log("into otr_interface table_detect 2")
  102. # 选择与图片最接近分辨率,以防失真
  103. # best_h, best_w = get_best_predict_size(img)
  104. print("image_np.shape", image_np.shape)
  105. best_h, best_w, _ = image_np.shape
  106. log("otr preprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  107. # 调用模型
  108. # rows, cols = table_line(image_np, otr_model)
  109. rows, cols, image_np = table_line(image_np, otr_model, size=(best_w, best_h), hprob=0.5, vprob=0.5)
  110. start_time1 = time.time()
  111. if not rows or not cols:
  112. print("points", 0, "split_lines", 0, "bboxes", 0)
  113. return {"points": str([]), "split_lines": str([]),
  114. "bboxes": str([]), "outline_points": str([]),
  115. "lines": str([])}
  116. # 查看是否正确输出rows,cols
  117. # for line in rows+cols:
  118. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  119. # (255, 0, 0), 2)
  120. # cv2.imshow("rows-cols1", img)
  121. # cv2.waitKey(0)
  122. # 处理结果
  123. # 合并错开线
  124. rows = merge_line(rows, axis=0)
  125. cols = merge_line(cols, axis=1)
  126. # 计算交点、分割线
  127. points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  128. # log("into otr_interface table_detect 5")
  129. if not points:
  130. print("points", 0, "split_lines", 0, "bboxes", 0)
  131. return {"points": str([]), "split_lines": str([]),
  132. "bboxes": str([]), "outline_points": str([]),
  133. "lines": str([])}
  134. # 清掉外围的没用的线
  135. rows, cols = delete_outline(rows, cols, points)
  136. split_lines, split_y = get_split_line(points, cols, image_np)
  137. # log("into otr_interface table_detect 6")
  138. # 计算交点所在行列,剔除相近交点
  139. row_point_list = get_points_row(points, split_y, 5)
  140. col_point_list = get_points_col(points, split_y, 5)
  141. # log("into otr_interface table_detect 7")
  142. points = delete_close_points(points, row_point_list, col_point_list)
  143. # log("into otr_interface table_detect 8")
  144. # 查看是否正确输出点
  145. # for p in points:
  146. # cv2.circle(img, (p[0], p[1]), 3, (0, 0, 255))
  147. # cv2.imshow("points", img)
  148. # cv2.waitKey(0)
  149. # 查看是否正确输出rows,cols
  150. # for line in rows+cols:
  151. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  152. # (0, 255, 0), 2)
  153. # cv2.imshow("rows-cols0", img)
  154. # cv2.waitKey(0)
  155. # 修复边框
  156. new_rows, new_cols, long_rows, long_cols = fix_outline(image_np, rows, cols, points,
  157. split_y)
  158. # print(new_cols, new_rows)
  159. if new_rows or new_cols:
  160. # 连接至补线的延长线
  161. if long_rows:
  162. rows = long_rows
  163. if long_cols:
  164. cols = long_cols
  165. # 新的补线
  166. if new_rows:
  167. rows += new_rows
  168. if new_cols:
  169. cols += new_cols
  170. # 修复边框后重新计算交点、分割线
  171. points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  172. # log("into otr_interface table_detect 10")
  173. split_lines, split_y = get_split_line(points, cols, image_np)
  174. # 计算交点所在行列,剔除相近交点
  175. row_point_list = get_points_row(points, split_y, 0)
  176. col_point_list = get_points_col(points, split_y, 0)
  177. # log("into otr_interface table_detect 11")
  178. points = delete_close_points(points, row_point_list, col_point_list)
  179. # row_point_list = get_points_row(points, split_y)
  180. # col_point_list = get_points_col(points, split_y)
  181. # log("into otr_interface table_detect 12")
  182. # 查看是否正确输出rows,cols
  183. # for line in rows+cols:
  184. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  185. # (255, 0, 0), 2)
  186. # cv2.imshow("rows-cols1", img)
  187. # cv2.waitKey(0)
  188. # 修复表格4个角
  189. rows, cols = fix_corner(rows, cols, split_y)
  190. points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  191. # row_point_list = get_points_row(points, split_y, 5)
  192. # col_point_list = get_points_col(points, split_y, 5)
  193. # print("row_point_list", row_point_list)
  194. # print("col_point_list", col_point_list)
  195. # 修复内部缺线
  196. points = fix_inner(rows, cols, points, split_y)
  197. if not points:
  198. print("points", 0, "split_lines", 0, "bboxes", 0)
  199. return {"points": str([]), "split_lines": str([]),
  200. "bboxes": str([]), "outline_points": str([]),
  201. "lines": str([])}
  202. row_point_list = get_points_row(points, split_y, 5)
  203. col_point_list = get_points_col(points, split_y, 5)
  204. # 查看是否正确输出点
  205. # for p in points:
  206. # cv2.circle(img, (p[0], p[1]), 1, (0, 255, 0), 3)
  207. # cv2.imshow("points fix", img)
  208. # cv2.waitKey(0)
  209. # 查看是否正确输出rows,cols
  210. # for line in rows+cols:
  211. # cv2.line(img, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])),
  212. # (255, 0, 0), 2)
  213. # cv2.imshow("rows-cols2", img)
  214. # cv2.waitKey(0)
  215. # 根据分行分列重新得到rows、cols,避免线延长导致后续bbox生成失败
  216. # rows = points_to_line(row_point_list, axis=0)
  217. # cols = points_to_line(col_point_list, axis=1)
  218. # points = get_points(rows, cols, (image_np.shape[0], image_np.shape[1]))
  219. # row_point_list = get_points_row(points, split_y, 0)
  220. # col_point_list = get_points_col(points, split_y, 0)
  221. # 获取bbox 单元格
  222. bboxes = get_bbox(image_np, row_point_list, col_point_list, split_y, rows, cols)
  223. # log("into otr_interface table_detect 13")
  224. # 删除包含bbox
  225. if bboxes:
  226. bboxes = delete_contain_bbox(bboxes)
  227. # 查看是否能输出正确框
  228. # for box in bboxes:
  229. # cv2.rectangle(img, box[0], box[1], (0, 0, 255), 3)
  230. # cv2.imshow("bbox", img)
  231. # cv2.waitKey(0)
  232. # 补充连续框
  233. # if bboxes:
  234. # bboxes = add_continue_bbox(bboxes)
  235. #
  236. # # 删除包含bbox
  237. # bboxes = delete_contain_bbox(bboxes)
  238. # 查看是否能输出正确框
  239. # cv2.namedWindow('bbox', 0)
  240. # for box in bboxes:
  241. # cv2.rectangle(img, box[0], box[1], (0, 255, 0), 3)
  242. # cv2.imshow("bbox", img)
  243. # cv2.waitKey(0)
  244. # 查看是否正确输出点
  245. # cv2.namedWindow('points', 0)
  246. # for p in points:
  247. # cv2.circle(img, (p[0], p[1]), 3, (0, 0, 255))
  248. # cv2.imshow("points", img)
  249. # cv2.waitKey(0)
  250. # 查看是否正确输出区域分割线
  251. # cv2.namedWindow('split_lines', 0)
  252. # for line in split_lines:
  253. # cv2.line(img, line[0], line[1], (0, 0, 255), 2)
  254. # cv2.imshow("split_lines", img)
  255. # cv2.waitKey(0)
  256. # 获取每个表格的左上右下两个点
  257. outline_points = get_outline_point(points, split_y)
  258. # log("into otr_interface table_detect 14")
  259. if bboxes:
  260. print("bboxes number", len(bboxes))
  261. # print("bboxes", bboxes)
  262. else:
  263. print("bboxes number", "None")
  264. log("otr postprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  265. log("otr finish: " + str(round(float(time.time()-start_time1), 4)) + "s")
  266. return {"points": str(points), "split_lines": str(split_lines),
  267. "bboxes": str(bboxes), "outline_points": str(outline_points),
  268. "lines": str(rows+cols)}
  269. except TimeoutError:
  270. raise TimeoutError
  271. except Exception as e:
  272. log("otr_interface cannot detected table!")
  273. print("otr_interface cannot detected table!", traceback.print_exc())
  274. print("points", 0, "split_lines", 0, "bboxes", 0)
  275. log("otr postprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  276. return {"points": str([]), "split_lines": str([]), "bboxes": str([]),
  277. "outline_points": str([]), "lines": str([])}
  278. def line_detect(img_data, otr_model, prob=0.2):
  279. log("into otr_interface table_detect")
  280. start_time = time.time()
  281. try:
  282. start_time1 = time.time()
  283. # 二进制数据流转np.ndarray [np.uint8: 8位像素]
  284. img = cv2.imdecode(np.frombuffer(img_data, np.uint8), cv2.IMREAD_COLOR)
  285. # log("into otr_interface table_detect 1")
  286. # cv2.imwrite("111111.jpg", img)
  287. # 将bgr转为rbg
  288. image_np = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  289. # log("into otr_interface table_detect 2")
  290. # 选择与图片最接近分辨率,以防失真
  291. # best_h, best_w = get_best_predict_size(img)
  292. log("image_np.shape" + str(image_np.shape))
  293. best_h, best_w, _ = image_np.shape
  294. log("otr preprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  295. # 调用模型
  296. # rows, cols = table_line(image_np, otr_model)
  297. start_time1 = time.time()
  298. list_line = table_line(image_np, otr_model, size=(best_w, best_h), prob=prob)
  299. log("otr finish " + str(round(float(time.time()-start_time1), 4)) + "s")
  300. return {"list_line": str(list_line)}
  301. except TimeoutError:
  302. raise TimeoutError
  303. except Exception as e:
  304. log("otr_interface cannot detected table!")
  305. print("otr_interface cannot detected table!", traceback.print_exc())
  306. log("otr postprocess time: " + str(round(float(time.time()-start_time1), 4)) + "s")
  307. return {"list_line": str([])}
  308. class OtrModels:
  309. def __init__(self):
  310. # python文件所在目录
  311. _dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
  312. model_path = _dir + "/models/table-line.h5"
  313. self.otr_model = table_net((None, None, 3), 2)
  314. self.otr_model.load_weights(model_path)
  315. def get_model(self):
  316. return self.otr_model
  317. def test_otr_model(from_remote=True):
  318. _global._init()
  319. from format_convert.convert_image import get_best_predict_size, image_process
  320. if get_platform() == "Windows":
  321. file_path = "C:/Users/Administrator/Desktop/error2.png"
  322. file_path = "C:/Users/Administrator/Downloads/1652672734044.jpg"
  323. else:
  324. file_path = "1.jpg"
  325. image_np = cv2.imread(file_path)
  326. best_h, best_w = get_best_predict_size(image_np)
  327. image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  328. cv2.imwrite(file_path, image_resize)
  329. with open(file_path, "rb") as f:
  330. file_bytes = f.read()
  331. file_base64 = base64.b64encode(file_bytes)
  332. _md5 = get_md5_from_bytes(file_bytes)[0]
  333. _global.update({"port": 15010, "md5": _md5})
  334. if from_remote:
  335. file_json = {"data": file_base64, "is_from_pdf": False, "md5": _md5}
  336. # _url = "http://192.168.2.104:18000/otr"
  337. _url = "http://127.0.0.1:18000/otr"
  338. r = json.loads(request_post(_url, file_json))
  339. else:
  340. # otr_model = OtrModels().get_model()
  341. # r = otr(file_base64, otr_model, is_from_pdf=False)
  342. r = image_process(image_resize, file_path)
  343. print(r)
  344. # otr_model = table_net((None, None, 3), 2)
  345. # otr_model.load_weights(model_path)
  346. if __name__ == '__main__':
  347. if len(sys.argv) == 2:
  348. port = int(sys.argv[1])
  349. elif len(sys.argv) == 3:
  350. port = int(sys.argv[1])
  351. using_gpu_index = int(sys.argv[2])
  352. else:
  353. port = 18000
  354. using_gpu_index = 0
  355. _global._init()
  356. _global.update({"port": str(port)})
  357. globals().update({"port": str(port)})
  358. # 日志格式设置
  359. # ip = get_intranet_ip()
  360. # logging.basicConfig(level=logging.INFO,
  361. # format='%(asctime)s - %(name)s - %(levelname)s - '
  362. # + ip + ' - ' + str(port) + ' - %(message)s')
  363. logging.info(get_platform())
  364. # 限制tensorflow显存
  365. # os.environ['CUDA_VISIBLE_DEVICES'] = str(using_gpu_index)
  366. # import tensorflow as tf
  367. # if get_platform() != "Windows":
  368. # _version = tf.__version__
  369. # logging.info(str(_version))
  370. # memory_limit_scale = 0.3
  371. # # tensorflow 1.x
  372. # if str(_version)[0] == "1":
  373. # logging.info("1.x " + str(_version))
  374. # os.environ['CUDA_CACHE_MAXSIZE'] = str(2147483648)
  375. # os.environ['CUDA_CACHE_DISABLE'] = str(0)
  376. # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=memory_limit_scale)
  377. # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
  378. #
  379. # # tensorflow 2.x
  380. # elif str(_version)[0] == "2":
  381. # logging.info("2.x " + str(_version))
  382. # config = tf.compat.v1.ConfigProto()
  383. # config.gpu_options.per_process_gpu_memory_fraction = memory_limit_scale
  384. # config.gpu_options.allow_growth = True
  385. # sess = tf.compat.v1.Session(config=config)
  386. # app.run(host='0.0.0.0', port=port, processes=1, threaded=False, debug=False)
  387. app.run()
  388. log("OTR running "+str(port))
  389. # test_otr_model(False)
  390. # print(json.dumps([-2]))
  391. # otr_model = OtrModels().get_model()
  392. # otr("11", otr_model)