123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122 |
- import base64
- import json
- import os
- import sys
- import traceback
- import torch
- sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../")
- from botr.yolov8.model import Predictor
- from botr.yolov8.predict import detect
- from format_convert.max_compute_config import max_compute
- MAX_COMPUTE = max_compute
- import time
- import cv2
- from flask import Flask, request
- from format_convert.utils import request_post, log, get_md5_from_bytes, get_platform, bytes2np
- from format_convert import _global
- ROOT = os.path.abspath(os.path.dirname(__file__)) + '/../../'
- model_path = ROOT + 'botr/yolov8/weights.pt'
- # 接口配置
- app = Flask(__name__)
- @app.route('/yolo', methods=['POST'])
- def _yolo():
- _global._init()
- _global.update({"port": globals().get("port")})
- start_time = time.time()
- log("into yolo_interface _yolo")
- try:
- if not request.form:
- log("yolo no data!")
- return json.dumps({"b_table_list": str([-9])})
- yolo_predictor = globals().get("global_yolo_predictor")
- if yolo_predictor is None:
- image_size = 640
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- # device = 'cpu'
- yolo_predictor = Predictor(image_size, device, model_path)
- globals().update({"global_yolo_predictor": yolo_predictor})
- data = request.form.get("data")
- _md5 = request.form.get("md5")
- _global.update({"md5": _md5})
- b_table_list = yolo(data, yolo_predictor).get('b_table_list')
- return json.dumps({"b_table_list": b_table_list})
- except TimeoutError:
- return json.dumps({"b_table_list": str([-5])})
- except:
- traceback.print_exc()
- return json.dumps({"b_table_list": str([-1])})
- finally:
- log("yolo interface finish time " + str(time.time()-start_time))
- def yolo(data, predictor):
- log("into yolo_interface yolo")
- try:
- img_data = base64.b64decode(data)
- img = bytes2np(img_data)
- b_table_list = detect(img, predictor)
- return {"b_table_list": b_table_list}
- except TimeoutError:
- raise TimeoutError
- def test_yolo_model(from_remote=True):
- _global._init()
- from format_convert.convert_image import get_best_predict_size, image_process
- if get_platform() == "Windows":
- file_path = "C:/Users/Administrator/Desktop/error2.png"
- file_path = "C:/Users/Administrator/Downloads/1652672734044.jpg"
- else:
- file_path = "1.jpg"
- image_np = cv2.imread(file_path)
- best_h, best_w = get_best_predict_size(image_np)
- image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
- cv2.imwrite(file_path, image_resize)
- with open(file_path, "rb") as f:
- file_bytes = f.read()
- file_base64 = base64.b64encode(file_bytes)
- _md5 = get_md5_from_bytes(file_bytes)[0]
- _global.update({"port": 15010, "md5": _md5})
- if from_remote:
- file_json = {"data": file_base64, "is_from_pdf": False, "md5": _md5}
- # _url = "http://192.168.2.104:18000/otr"
- _url = "http://127.0.0.1:18000/otr"
- r = json.loads(request_post(_url, file_json))
- else:
- # otr_model = OtrModels().get_model()
- # r = otr(file_base64, otr_model, is_from_pdf=False)
- r = image_process(image_resize, file_path)
- print(r)
- if __name__ == '__main__':
- if len(sys.argv) == 2:
- port = int(sys.argv[1])
- elif len(sys.argv) == 3:
- port = int(sys.argv[1])
- using_gpu_index = int(sys.argv[2])
- else:
- port = 18080
- using_gpu_index = 0
- # app.run(host='0.0.0.0', port=port, processes=1, threaded=False, debug=False)
- app.run(host='0.0.0.0', port=port)
- log("YOLO running "+str(port))
- # test_yolo_model(False)
- # print(json.dumps([-2]))
- # otr_model = OtrModels().get_model()
- # otr("11", otr_model)
|