convert.py 103 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622
  1. #-*- coding: utf-8 -*-
  2. import sys
  3. import os
  4. sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
  5. from format_convert.convert_doc import doc2text, DocConvert
  6. from format_convert.convert_docx import docx2text, DocxConvert
  7. from format_convert.convert_image import picture2text, ImageConvert
  8. from format_convert.convert_pdf import pdf2text, PDFConvert
  9. from format_convert.convert_rar import rar2text, RarConvert
  10. from format_convert.convert_swf import swf2text, SwfConvert
  11. from format_convert.convert_txt import txt2text, TxtConvert
  12. from format_convert.convert_xls import xls2text, XlsConvert
  13. from format_convert.convert_xlsx import xlsx2text, XlsxConvert
  14. from format_convert.convert_zip import zip2text, ZipConvert
  15. import hashlib
  16. from format_convert import get_memory_info
  17. from ocr import ocr_interface
  18. from otr import otr_interface
  19. import re
  20. import shutil
  21. import base64
  22. import time
  23. import uuid
  24. import logging
  25. from bs4 import BeautifulSoup
  26. logging.getLogger("pdfminer").setLevel(logging.WARNING)
  27. from format_convert.table_correct import *
  28. import logging
  29. from format_convert import timeout_decorator
  30. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  31. # txt doc docx xls xlsx pdf zip rar swf jpg jpeg png
  32. # def judge_error_code(_list, code=[-1, -2, -3, -4, -5, -7]):
  33. # for c in code:
  34. # if _list == [c]:
  35. # return True
  36. # return False
  37. #
  38. #
  39. # def set_timeout(signum, frame):
  40. # print("=======================set_timeout")
  41. # print("=======================set_timeout")
  42. # print("=======================set_timeout")
  43. # print("=======================set_timeout")
  44. # print("=======================set_timeout")
  45. # print("=======================set_timeout")
  46. # print("=======================set_timeout")
  47. # print("=======================set_timeout")
  48. # print("=======================set_timeout")
  49. # print("=======================set_timeout")
  50. # print("=======================set_timeout")
  51. # print("=======================set_timeout")
  52. # print("=======================set_timeout")
  53. # print("=======================set_timeout")
  54. # print("=======================set_timeout")
  55. # print("=======================set_timeout")
  56. #
  57. # raise TimeoutError
  58. #
  59. #
  60. # def log_traceback(func_name):
  61. # logging.info(func_name)
  62. # etype, value, tb = sys.exc_info()
  63. # for line in traceback.TracebackException(
  64. # type(value), value, tb, limit=None).format(chain=True):
  65. # logging.info(line)
  66. #
  67. #
  68. # def judge_format(path):
  69. # guess1 = mimetypes.guess_type(path)
  70. # _type = None
  71. # if guess1[0]:
  72. # _type = guess1[0]
  73. # else:
  74. # guess2 = filetype.guess(path)
  75. # if guess2:
  76. # _type = guess2.mime
  77. #
  78. # if _type == "application/pdf":
  79. # return "pdf"
  80. # if _type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
  81. # return "docx"
  82. # if _type == "application/x-zip-compressed" or _type == "application/zip":
  83. # return "zip"
  84. # if _type == "application/x-rar-compressed" or _type == "application/rar":
  85. # return "rar"
  86. # if _type == "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet":
  87. # return "xlsx"
  88. # if _type == "application/msword":
  89. # return "doc"
  90. # if _type == "image/png":
  91. # return "png"
  92. # if _type == "image/jpeg":
  93. # return "jpg"
  94. #
  95. # # 猜不到,返回None
  96. # return None
  97. #
  98. #
  99. # @get_memory_info.memory_decorator
  100. # def txt2text(path):
  101. # logging.info("into txt2text")
  102. # try:
  103. # # 判断字符编码
  104. # with open(path, "rb") as ff:
  105. # data = ff.read()
  106. # encode = chardet.detect(data).get("encoding")
  107. # print("txt2text judge code is", encode)
  108. #
  109. # try:
  110. # if encode is None:
  111. # logging.info("txt2text cannot judge file code!")
  112. # return [-3]
  113. # with open(path, "r", encoding=encode) as ff:
  114. # txt_text = ff.read()
  115. # return [txt_text]
  116. # except:
  117. # logging.info("txt2text cannot open file with code " + encode)
  118. # return [-3]
  119. # except Exception as e:
  120. # print("txt2text", traceback.print_exc())
  121. # logging.info("txt2text error!")
  122. # return [-1]
  123. #
  124. #
  125. # @get_memory_info.memory_decorator
  126. # def doc2text(path, unique_type_dir):
  127. # logging.info("into doc2text")
  128. # try:
  129. # # 调用office格式转换
  130. # file_path = from_office_interface(path, unique_type_dir, 'docx')
  131. # # if file_path == [-3]:
  132. # # return [-3]
  133. # if judge_error_code(file_path):
  134. # return file_path
  135. #
  136. # text = docx2text(file_path, unique_type_dir)
  137. # return text
  138. # except Exception as e:
  139. # logging.info("doc2text error!")
  140. # print("doc2text", traceback.print_exc())
  141. # # log_traceback("doc2text")
  142. # return [-1]
  143. #
  144. #
  145. # @get_memory_info.memory_decorator
  146. # def read_xml_order(path, save_path):
  147. # logging.info("into read_xml_order")
  148. # try:
  149. # try:
  150. # f = zipfile.ZipFile(path)
  151. # for file in f.namelist():
  152. # if "word/document.xml" == str(file):
  153. # f.extract(file, save_path)
  154. # f.close()
  155. # except Exception as e:
  156. # # print("docx format error!", e)
  157. # logging.info("docx format error!")
  158. # return [-3]
  159. #
  160. # # DOMTree = xml.dom.minidom.parse(save_path + "word/document.xml")
  161. # # collection = DOMTree.documentElement
  162. #
  163. # try:
  164. # collection = xml_analyze(save_path + "word/document.xml")
  165. # except TimeoutError:
  166. # logging.info("read_xml_order timeout")
  167. # return [-4]
  168. #
  169. # body = collection.getElementsByTagName("w:body")[0]
  170. # order_list = []
  171. # for line in body.childNodes:
  172. # # print(str(line))
  173. # if "w:p" in str(line):
  174. # text = line.getElementsByTagName("w:t")
  175. # picture = line.getElementsByTagName("wp:docPr")
  176. # if text:
  177. # order_list.append("w:t")
  178. # if picture:
  179. # order_list.append("wp:docPr")
  180. #
  181. # for line1 in line.childNodes:
  182. # if "w:r" in str(line1):
  183. # # print("read_xml_order", "w:r")
  184. # picture1 = line1.getElementsByTagName("w:pict")
  185. # if picture1:
  186. # order_list.append("wp:docPr")
  187. #
  188. # if "w:tbl" in str(line):
  189. # order_list.append("w:tbl")
  190. # read_xml_table(path, save_path)
  191. # return order_list
  192. # except Exception as e:
  193. # logging.info("read_xml_order error!")
  194. # print("read_xml_order", traceback.print_exc())
  195. # # log_traceback("read_xml_order")
  196. # return [-1]
  197. #
  198. #
  199. # @get_memory_info.memory_decorator
  200. # def read_xml_table(path, save_path):
  201. # logging.info("into read_xml_table")
  202. # try:
  203. # # print("into read_xml_table")
  204. # try:
  205. # f = zipfile.ZipFile(path)
  206. # for file in f.namelist():
  207. # if "word/document.xml" == str(file):
  208. # f.extract(file, save_path)
  209. # f.close()
  210. # except Exception as e:
  211. # # print("docx format error!", e)
  212. # logging.info("docx format error!")
  213. # return [-3]
  214. #
  215. # # DOMTree = xml.dom.minidom.parse(save_path + "word/document.xml")
  216. # # collection = DOMTree.documentElement
  217. #
  218. # try:
  219. # collection = xml_analyze(save_path + "word/document.xml")
  220. # except TimeoutError:
  221. # logging.info("read_xml_table timeout")
  222. # return [-4]
  223. #
  224. # body = collection.getElementsByTagName("w:body")[0]
  225. # table_text_list = []
  226. # # print("body.childNodes", body.childNodes)
  227. # for line in body.childNodes:
  228. # if "w:tbl" in str(line):
  229. # # print("str(line)", str(line))
  230. # table_text = '<table border="1">' + "\n"
  231. # tr_list = line.getElementsByTagName("w:tr")
  232. # # print("line.childNodes", line.childNodes)
  233. # tr_index = 0
  234. # tr_text_list = []
  235. # tr_text_list_colspan = []
  236. # for tr in tr_list:
  237. # table_text = table_text + "<tr rowspan=1>" + "\n"
  238. # tc_list = tr.getElementsByTagName("w:tc")
  239. # tc_index = 0
  240. # tc_text_list = []
  241. # for tc in tc_list:
  242. # tc_text = ""
  243. #
  244. # # 获取一格占多少列
  245. # col_span = tc.getElementsByTagName("w:gridSpan")
  246. # if col_span:
  247. # col_span = int(col_span[0].getAttribute("w:val"))
  248. # else:
  249. # col_span = 1
  250. #
  251. # # 获取是否是合并单元格的下一个空单元格
  252. # is_merge = tc.getElementsByTagName("w:vMerge")
  253. # if is_merge:
  254. # is_merge = is_merge[0].getAttribute("w:val")
  255. # if is_merge == "continue":
  256. # col_span_index = 0
  257. # real_tc_index = 0
  258. #
  259. # # if get_platform() == "Windows":
  260. # # print("read_xml_table tr_text_list", tr_text_list)
  261. # # print("read_xml_table tr_index", tr_index)
  262. #
  263. # if 0 <= tr_index - 1 < len(tr_text_list):
  264. # for tc_colspan in tr_text_list[tr_index - 1]:
  265. # if col_span_index < tc_index:
  266. # col_span_index += tc_colspan[1]
  267. # real_tc_index += 1
  268. #
  269. # # print("tr_index-1, real_tc_index", tr_index-1, real_tc_index)
  270. # # print(tr_text_list[tr_index-1])
  271. # if real_tc_index < len(tr_text_list[tr_index - 1]):
  272. # tc_text = tr_text_list[tr_index - 1][real_tc_index][0]
  273. #
  274. # table_text = table_text + "<td colspan=" + str(col_span) + ">" + "\n"
  275. # p_list = tc.getElementsByTagName("w:p")
  276. #
  277. # for p in p_list:
  278. # t = p.getElementsByTagName("w:t")
  279. # if t:
  280. # for tt in t:
  281. # # print("tt", tt.childNodes)
  282. # if len(tt.childNodes) > 0:
  283. # tc_text += tt.childNodes[0].nodeValue
  284. # tc_text += "\n"
  285. #
  286. # table_text = table_text + tc_text + "</td>" + "\n"
  287. # tc_index += 1
  288. # tc_text_list.append([tc_text, col_span])
  289. # table_text += "</tr>" + "\n"
  290. # tr_index += 1
  291. # tr_text_list.append(tc_text_list)
  292. # table_text += "</table>" + "\n"
  293. # table_text_list.append(table_text)
  294. # return table_text_list
  295. #
  296. # except Exception as e:
  297. # logging.info("read_xml_table error")
  298. # print("read_xml_table", traceback.print_exc())
  299. # # log_traceback("read_xml_table")
  300. # return [-1]
  301. #
  302. #
  303. # @get_memory_info.memory_decorator
  304. # @timeout_decorator.timeout(300, timeout_exception=TimeoutError)
  305. # def xml_analyze(path):
  306. # # 解析xml
  307. # DOMTree = xml.dom.minidom.parse(path)
  308. # collection = DOMTree.documentElement
  309. # return collection
  310. #
  311. #
  312. # def read_docx_table(document):
  313. # table_text_list = []
  314. # for table in document.tables:
  315. # table_text = "<table>\n"
  316. # print("==================")
  317. # for row in table.rows:
  318. # table_text += "<tr>\n"
  319. # for cell in row.cells:
  320. # table_text += "<td>" + cell.text + "</td>\n"
  321. # table_text += "</tr>\n"
  322. # table_text += "</table>\n"
  323. # print(table_text)
  324. # table_text_list.append(table_text)
  325. # return table_text_list
  326. #
  327. #
  328. # @get_memory_info.memory_decorator
  329. # def docx2text(path, unique_type_dir):
  330. # logging.info("into docx2text")
  331. # try:
  332. # try:
  333. # doc = docx.Document(path)
  334. # except Exception as e:
  335. # print("docx format error!", e)
  336. # print(traceback.print_exc())
  337. # logging.info("docx format error!")
  338. # return [-3]
  339. #
  340. # # 遍历段落
  341. # # print("docx2text extract paragraph")
  342. # paragraph_text_list = []
  343. # for paragraph in doc.paragraphs:
  344. # if paragraph.text != "":
  345. # paragraph_text_list.append("<div>" + paragraph.text + "</div>" + "\n")
  346. # # print("paragraph_text", paragraph.text)
  347. #
  348. # # 遍历表
  349. # try:
  350. # table_text_list = read_xml_table(path, unique_type_dir)
  351. # except TimeoutError:
  352. # return [-4]
  353. #
  354. # if judge_error_code(table_text_list):
  355. # return table_text_list
  356. #
  357. # # 顺序遍历图片
  358. # # print("docx2text extract image")
  359. # image_text_list = []
  360. # temp_image_path = unique_type_dir + "temp_image.png"
  361. # pattern = re.compile('rId\d+')
  362. # for graph in doc.paragraphs:
  363. # for run in graph.runs:
  364. # if run.text == '':
  365. # try:
  366. # if not pattern.search(run.element.xml):
  367. # continue
  368. # content_id = pattern.search(run.element.xml).group(0)
  369. # content_type = doc.part.related_parts[content_id].content_type
  370. # except Exception as e:
  371. # print("docx no image!", e)
  372. # continue
  373. # if not content_type.startswith('image'):
  374. # continue
  375. #
  376. # # 写入临时文件
  377. # img_data = doc.part.related_parts[content_id].blob
  378. # with open(temp_image_path, 'wb') as f:
  379. # f.write(img_data)
  380. #
  381. # # if get_platform() == "Windows":
  382. # # print("img_data", img_data)
  383. #
  384. # if img_data is None:
  385. # continue
  386. #
  387. # # 识别图片文字
  388. # image_text = picture2text(temp_image_path)
  389. # if image_text == [-2]:
  390. # return [-2]
  391. # if image_text == [-1]:
  392. # return [-1]
  393. # if image_text == [-3]:
  394. # continue
  395. #
  396. # image_text = image_text[0]
  397. # image_text_list.append(add_div(image_text))
  398. #
  399. # # 解析document.xml,获取文字顺序
  400. # # print("docx2text extract order")
  401. # order_list = read_xml_order(path, unique_type_dir)
  402. # if order_list == [-2]:
  403. # return [-2]
  404. # if order_list == [-1]:
  405. # return [-1]
  406. #
  407. # text = ""
  408. # print("len(order_list)", len(order_list))
  409. # print("len(paragraph_text_list)", len(paragraph_text_list))
  410. # print("len(image_text_list)", len(image_text_list))
  411. # print("len(table_text_list)", len(table_text_list))
  412. #
  413. # # log("docx2text output in order")
  414. # for tag in order_list:
  415. # if tag == "w:t":
  416. # if len(paragraph_text_list) > 0:
  417. # text += paragraph_text_list.pop(0)
  418. # if tag == "wp:docPr":
  419. # if len(image_text_list) > 0:
  420. # text += image_text_list.pop(0)
  421. # if tag == "w:tbl":
  422. # if len(table_text_list) > 0:
  423. # text += table_text_list.pop(0)
  424. # return [text]
  425. # except Exception as e:
  426. # # print("docx2text", e, global_type)
  427. # logging.info("docx2text error!")
  428. # print("docx2text", traceback.print_exc())
  429. # # log_traceback("docx2text")
  430. # return [-1]
  431. #
  432. #
  433. # def add_div(text):
  434. # if text == "" or text is None:
  435. # return text
  436. #
  437. # if get_platform() == "Windows":
  438. # print("add_div", text)
  439. # if re.findall("<div>", text):
  440. # return text
  441. #
  442. # text = "<div>" + text + "\n"
  443. # text = re.sub("\n", "</div>\n<div>", text)
  444. # # text += "</div>"
  445. # if text[-5:] == "<div>":
  446. # print("add_div has cut", text[-30:])
  447. # text = text[:-5]
  448. # return text
  449. #
  450. #
  451. # @get_memory_info.memory_decorator
  452. # def pdf2Image(path, save_dir):
  453. # logging.info("into pdf2Image")
  454. # try:
  455. # try:
  456. # doc = fitz.open(path)
  457. # except Exception as e:
  458. # logging.info("pdf format error!")
  459. # # print("pdf format error!", e)
  460. # return [-3]
  461. #
  462. # # output_image_list = []
  463. # output_image_dict = {}
  464. # page_count = doc.page_count
  465. # for page_no in range(page_count):
  466. # # 限制pdf页数,只取前10页后10页
  467. # if page_count > 20:
  468. # if 10 <= page_no < page_count-10:
  469. # # logging.info("pdf2Image: pdf pages count " + str(doc.page_count)
  470. # # + ", only get 70 pages")
  471. # continue
  472. #
  473. # try:
  474. # page = doc.loadPage(page_no)
  475. # output = save_dir + "_page" + str(page_no) + ".png"
  476. # rotate = int(0)
  477. # # 每个尺寸的缩放系数为1.3,这将为我们生成分辨率提高2.6的图像。
  478. # # 此处若是不做设置,默认图片大小为:792X612, dpi=96
  479. # # (1.33333333 --> 1056x816) (2 --> 1584x1224)
  480. # # (1.183, 2.28 --> 1920x1080)
  481. # zoom_x = 3.
  482. # zoom_y = 3.
  483. # # mat = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate)
  484. # mat = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate)
  485. # pix = page.getPixmap(matrix=mat, alpha=False)
  486. # pix.writePNG(output)
  487. # pdf_image = cv2.imread(output)
  488. # print("pdf_image", page_no, pdf_image.shape)
  489. # # output_image_list.append([page_no, output])
  490. # output_image_dict[int(page_no)] = output
  491. # except ValueError as e:
  492. # traceback.print_exc()
  493. # if str(e) == "page not in document":
  494. # logging.info("pdf2Image page not in document! continue..." + str(page_no))
  495. # continue
  496. # elif "encrypted" in str(e):
  497. # logging.info("pdf2Image document need password " + str(page_no))
  498. # return [-7]
  499. # except RuntimeError as e:
  500. # if "cannot find page" in str(e):
  501. # logging.info("pdf2Image page {} not in document! continue... ".format(str(page_no)) + str(e))
  502. # continue
  503. # else:
  504. # traceback.print_exc()
  505. # return [-3]
  506. # return [output_image_dict]
  507. #
  508. # except Exception as e:
  509. # logging.info("pdf2Image error!")
  510. # print("pdf2Image", traceback.print_exc())
  511. # return [-1]
  512. #
  513. #
  514. # ocr_result_flag = 0
  515. # def image_preprocess(image_np, image_path, use_ocr=True):
  516. # logging.info("into image_preprocess")
  517. # try:
  518. # # 长 宽
  519. # # resize_size = (1024, 768)
  520. # # 限制图片大小
  521. # # resize_image(image_path, resize_size)
  522. #
  523. # # 图片倾斜校正,写入原来的图片路径
  524. # g_r_i = get_rotated_image(image_np, image_path)
  525. # if g_r_i == [-1]:
  526. # return [-1], [], [], 0
  527. #
  528. # # otr需要图片resize, 写入另一个路径
  529. # image_np = cv2.imread(image_path)
  530. # best_h, best_w = get_best_predict_size(image_np)
  531. # image_resize = cv2.resize(image_np, (best_w, best_h), interpolation=cv2.INTER_AREA)
  532. # image_resize_path = image_path[:-4] + "_resize" + image_path[-4:]
  533. # cv2.imwrite(image_resize_path, image_resize)
  534. #
  535. # # 调用otr模型接口
  536. # with open(image_resize_path, "rb") as f:
  537. # image_bytes = f.read()
  538. # points, split_lines, bboxes, outline_points = from_otr_interface(image_bytes)
  539. # if judge_error_code(points):
  540. # return points, [], [], 0
  541. #
  542. # # 将resize后得到的bbox根据比例还原
  543. # ratio = (image_np.shape[0]/best_h, image_np.shape[1]/best_w)
  544. # for i in range(len(bboxes)):
  545. # bbox = bboxes[i]
  546. # bboxes[i] = [(int(bbox[0][0]*ratio[1]), int(bbox[0][1]*ratio[0])),
  547. # (int(bbox[1][0]*ratio[1]), int(bbox[1][1]*ratio[0]))]
  548. # for i in range(len(split_lines)):
  549. # line = split_lines[i]
  550. # split_lines[i] = [(int(line[0][0]*ratio[1]), int(line[0][1]*ratio[0])),
  551. # (int(line[1][0]*ratio[1]), int(line[1][1]*ratio[0]))]
  552. # for i in range(len(points)):
  553. # point = points[i]
  554. # points[i] = (int(point[0]*ratio[1]), int(point[1]*ratio[0]))
  555. #
  556. # for i in range(len(outline_points)):
  557. # point = outline_points[i]
  558. # outline_points[i] = [(int(point[0][0]*ratio[1]), int(point[0][1]*ratio[0])),
  559. # (int(point[1][0]*ratio[1]), int(point[1][1]*ratio[0]))]
  560. #
  561. # # 查看是否能输出正确框
  562. # for box in bboxes:
  563. # cv2.rectangle(image_np, box[0], box[1], (0, 255, 0), 2)
  564. # # cv2.namedWindow('bbox', 0)
  565. # # cv2.imshow("bbox", image_np)
  566. # # cv2.waitKey(0)
  567. #
  568. # # 调用ocr模型接口
  569. # with open(image_path, "rb") as f:
  570. # image_bytes = f.read()
  571. # # 有表格
  572. # if len(bboxes) >= 2:
  573. # text_list, bbox_list = from_ocr_interface(image_bytes, True)
  574. # if judge_error_code(text_list):
  575. # return text_list, [], [], 0
  576. #
  577. # # for i in range(len(text_list)):
  578. # # print(text_list[i], bbox_list[i])
  579. # # 查看是否能输出正确框
  580. #
  581. # # for box in bbox_list:
  582. # # cv2.rectangle(image_np, (int(box[0][0]), int(box[0][1])),
  583. # # (int(box[2][0]), int(box[2][1])), (255, 0, 0), 1)
  584. # # cv2.namedWindow('bbox', 0)
  585. # # cv2.imshow("bbox", image_np)
  586. # # cv2.waitKey(0)
  587. #
  588. # text, column_list = get_formatted_table(text_list, bbox_list, bboxes, split_lines)
  589. # if judge_error_code(text):
  590. # return text, [], [], 0
  591. # is_table = 1
  592. # return text, column_list, outline_points, is_table
  593. #
  594. # # 无表格
  595. # else:
  596. # if use_ocr:
  597. # text = from_ocr_interface(image_bytes)
  598. # if judge_error_code(text):
  599. # return text, [], [], 0
  600. #
  601. # is_table = 0
  602. # return text, [], [], is_table
  603. # else:
  604. # is_table = 0
  605. # return None, [], [], is_table
  606. #
  607. # except Exception as e:
  608. # logging.info("image_preprocess error")
  609. # print("image_preprocess", traceback.print_exc())
  610. # return [-1], [], [], 0
  611. #
  612. #
  613. # def get_best_predict_size2(image_np):
  614. # sizes = [1280, 1152, 1024, 896, 768, 640, 512, 384, 256, 128]
  615. #
  616. # min_len = 10000
  617. # best_height = sizes[0]
  618. # for height in sizes:
  619. # if abs(image_np.shape[0] - height) < min_len:
  620. # min_len = abs(image_np.shape[0] - height)
  621. # best_height = height
  622. #
  623. # min_len = 10000
  624. # best_width = sizes[0]
  625. # for width in sizes:
  626. # if abs(image_np.shape[1] - width) < min_len:
  627. # min_len = abs(image_np.shape[1] - width)
  628. # best_width = width
  629. #
  630. # return best_height, best_width
  631. #
  632. #
  633. # def get_best_predict_size(image_np, times=64):
  634. # sizes = []
  635. # for i in range(1, 100):
  636. # if i*times <= 3000:
  637. # sizes.append(i*times)
  638. # sizes.sort(key=lambda x: x, reverse=True)
  639. #
  640. # min_len = 10000
  641. # best_height = sizes[0]
  642. # for height in sizes:
  643. # if abs(image_np.shape[0] - height) < min_len:
  644. # min_len = abs(image_np.shape[0] - height)
  645. # best_height = height
  646. #
  647. # min_len = 10000
  648. # best_width = sizes[0]
  649. # for width in sizes:
  650. # if abs(image_np.shape[1] - width) < min_len:
  651. # min_len = abs(image_np.shape[1] - width)
  652. # best_width = width
  653. #
  654. # return best_height, best_width
  655. #
  656. #
  657. # @get_memory_info.memory_decorator
  658. # def pdf2text(path, unique_type_dir):
  659. # logging.info("into pdf2text")
  660. # try:
  661. # # pymupdf pdf to image
  662. # save_dir = path.split(".")[-2] + "_" + path.split(".")[-1]
  663. # output_image_dict = pdf2Image(path, save_dir)
  664. # if judge_error_code(output_image_dict):
  665. # return output_image_dict
  666. # output_image_dict = output_image_dict[0]
  667. # output_image_no_list = list(output_image_dict.keys())
  668. # output_image_no_list.sort(key=lambda x: x)
  669. #
  670. # # 获取每页pdf提取的文字、表格的列数、轮廓点、是否含表格、页码
  671. # # page_info_list = []
  672. # page_info_dict = {}
  673. # has_table_dict = {}
  674. # no_table_dict = {}
  675. # for page_no in output_image_no_list:
  676. # img_path = output_image_dict.get(page_no)
  677. # print("pdf page", page_no, "in total", output_image_no_list[-1])
  678. # # 读不出来的跳过
  679. # try:
  680. # img = cv2.imread(img_path)
  681. # img_size = img.shape
  682. # except:
  683. # logging.info("pdf2text read image in page fail! continue...")
  684. # continue
  685. #
  686. # # 每张图片处理
  687. # text, column_list, outline_points, is_table = image_preprocess(img, img_path,
  688. # use_ocr=False)
  689. # if judge_error_code(text):
  690. # return text
  691. #
  692. # # page_info_list.append([text, column_list, outline_points, is_table,
  693. # # page_no, img_size])
  694. # page_info = [text, column_list, outline_points, is_table, img_size]
  695. # page_info_dict[int(page_no)] = page_info
  696. # # 包含table的和不包含table的
  697. # if is_table:
  698. # has_table_dict[int(page_no)] = page_info
  699. # else:
  700. # no_table_dict[int(page_no)] = page_info
  701. #
  702. # has_table_no_list = list(has_table_dict.keys())
  703. # has_table_no_list.sort(key=lambda x: x)
  704. # page_no_list = list(page_info_dict.keys())
  705. # page_no_list.sort(key=lambda x: x)
  706. #
  707. # # 页码表格连接
  708. # table_connect_list, connect_text_list = page_table_connect(has_table_dict)
  709. # if judge_error_code(table_connect_list):
  710. # return table_connect_list
  711. #
  712. # # 连接的页码
  713. # table_connect_page_no_list = []
  714. # for area in connect_text_list:
  715. # table_connect_page_no_list.append(area[1])
  716. # print("pdf2text table_connect_list", table_connect_list)
  717. # print("connect_text_list", connect_text_list)
  718. #
  719. # # pdfminer 方式
  720. # try:
  721. # fp = open(path, 'rb')
  722. # # 用文件对象创建一个PDF文档分析器
  723. # parser = PDFParser(fp)
  724. # # 创建一个PDF文档
  725. # doc = PDFDocument(parser)
  726. # # 连接分析器,与文档对象
  727. # rsrcmgr = PDFResourceManager()
  728. # device = PDFPageAggregator(rsrcmgr, laparams=LAParams())
  729. # interpreter = PDFPageInterpreter(rsrcmgr, device)
  730. #
  731. # # 判断是否能读pdf
  732. # for page in PDFPage.create_pages(doc):
  733. # break
  734. # except pdfminer.psparser.PSEOF as e:
  735. # # pdfminer 读不了空白页的对象,直接使用pymupdf转换出的图片进行ocr识别
  736. # logging.info("pdf2text " + str(e) + " use ocr read pdf!")
  737. # text_list = []
  738. # for page_no in page_no_list:
  739. # logging.info("pdf2text ocr page_no " + str(page_no))
  740. # page_info = page_info_dict.get(page_no)
  741. # # 表格
  742. # if page_info[3]:
  743. # # 判断表格是否跨页连接
  744. # area_no = 0
  745. # jump_page = 0
  746. # for area in table_connect_list:
  747. # if page_no in area:
  748. # # 只记录一次text
  749. # if page_no == area[0]:
  750. # image_text = connect_text_list[area_no][0]
  751. # text_list.append([image_text, page_no, 0])
  752. # jump_page = 1
  753. # area_no += 1
  754. #
  755. # # 是连接页的跳过后面步骤
  756. # if jump_page:
  757. # continue
  758. #
  759. # # 直接取text
  760. # image_text = page_info_dict.get(page_no)[0]
  761. # text_list.append([image_text, page_no, 0])
  762. # # 非表格
  763. # else:
  764. # with open(output_image_dict.get(page_no), "rb") as ff:
  765. # image_stream = ff.read()
  766. # image_text = from_ocr_interface(image_stream)
  767. # text_list.append([image_text, page_no, 0])
  768. #
  769. # text_list.sort(key=lambda z: z[1])
  770. # text = ""
  771. # for t in text_list:
  772. # text += t[0]
  773. # return [text]
  774. # except Exception as e:
  775. # logging.info("pdf format error!")
  776. # traceback.print_exc()
  777. # return [-3]
  778. #
  779. # text_list = []
  780. # page_no = 0
  781. # pages = PDFPage.create_pages(doc)
  782. # pages = list(pages)
  783. # page_count = len(pages)
  784. # for page in pages:
  785. # logging.info("pdf2text pymupdf page_no " + str(page_no))
  786. # # 限制pdf页数,只取前100页
  787. # # if page_no >= 70:
  788. # # logging.info("pdf2text: pdf pages only get 70 pages")
  789. # # break
  790. # if page_count > 20:
  791. # if 10 <= page_no < page_count-10:
  792. # page_no += 1
  793. # continue
  794. #
  795. # # 判断页码在含表格页码中,直接拿已生成的text
  796. # if page_no in has_table_no_list:
  797. # # 判断表格是否跨页连接
  798. # area_no = 0
  799. # jump_page = 0
  800. # for area in table_connect_list:
  801. # if page_no in area:
  802. # # 只记录一次text
  803. # if page_no == area[0]:
  804. # image_text = connect_text_list[area_no][0]
  805. # text_list.append([image_text, page_no, 0])
  806. # jump_page = 1
  807. # area_no += 1
  808. #
  809. # # 是连接页的跳过后面步骤
  810. # if jump_page:
  811. # page_no += 1
  812. # continue
  813. #
  814. # # 直接取text
  815. # image_text = has_table_dict.get(page_no)[0]
  816. # text_list.append([image_text, page_no, 0])
  817. # page_no += 1
  818. # continue
  819. #
  820. # # 不含表格的解析pdf
  821. # else:
  822. # if get_platform() == "Windows":
  823. # try:
  824. # interpreter.process_page(page)
  825. # layout = device.get_result()
  826. # except Exception:
  827. # logging.info("pdf2text pdfminer read pdf page error! continue...")
  828. # continue
  829. #
  830. # else:
  831. # # 设置超时时间
  832. # try:
  833. # # 解析pdf中的不含表格的页
  834. # if get_platform() == "Windows":
  835. # origin_pdf_analyze = pdf_analyze.__wrapped__
  836. # layout = origin_pdf_analyze(interpreter, page, device)
  837. # else:
  838. # layout = pdf_analyze(interpreter, page, device)
  839. # except TimeoutError as e:
  840. # logging.info("pdf2text pdfminer read pdf page time out!")
  841. # return [-4]
  842. # except Exception:
  843. # logging.info("pdf2text pdfminer read pdf page error! continue...")
  844. # continue
  845. #
  846. # # 判断该页有没有文字对象,没有则有可能是有水印
  847. # only_image = 1
  848. # image_count = 0
  849. # for x in layout:
  850. # if isinstance(x, LTTextBoxHorizontal):
  851. # only_image = 0
  852. # if isinstance(x, LTFigure):
  853. # image_count += 1
  854. #
  855. # # 如果该页图片数量过多,直接ocr整页识别
  856. # logging.info("pdf2text image_count " + str(image_count))
  857. # if image_count >= 3:
  858. # image_text = page_info_dict.get(page_no)[0]
  859. # if image_text is None:
  860. # with open(output_image_dict.get(page_no), "rb") as ff:
  861. # image_stream = ff.read()
  862. # image_text = from_ocr_interface(image_stream)
  863. # if judge_error_code(image_text):
  864. # return image_text
  865. # page_info_dict[page_no][0] = image_text
  866. #
  867. # text_list.append([image_text, page_no, 0])
  868. # page_no += 1
  869. # continue
  870. #
  871. # order_list = []
  872. # for x in layout:
  873. # # 该对象是否是ocr识别
  874. # ocr_flag = 0
  875. #
  876. # if get_platform() == "Windows":
  877. # # print("x", page_no, x)
  878. # print()
  879. #
  880. # if isinstance(x, LTTextBoxHorizontal):
  881. # image_text = x.get_text()
  882. #
  883. # # 无法识别编码,用ocr
  884. # if re.search('[(]cid:[0-9]+[)]', image_text):
  885. # print(re.search('[(]cid:[0-9]+[)]', image_text))
  886. # image_text = page_info_dict.get(page_no)[0]
  887. # if image_text is None:
  888. # with open(output_image_dict.get(page_no), "rb") as ff:
  889. # image_stream = ff.read()
  890. # image_text = from_ocr_interface(image_stream)
  891. # if judge_error_code(image_text):
  892. # return image_text
  893. # page_info_dict[page_no][0] = image_text
  894. # image_text = add_div(image_text)
  895. # # order_list.append([image_text, page_no, x.bbox[1]])
  896. # order_list = [[image_text, page_no, x.bbox[1]]]
  897. # break
  898. # else:
  899. # image_text = add_div(image_text)
  900. # order_list.append([image_text, page_no, x.bbox[1]])
  901. # continue
  902. #
  903. # if isinstance(x, LTFigure):
  904. # for image in x:
  905. # if isinstance(image, LTImage):
  906. # try:
  907. # print("pdf2text LTImage size", page_no, image.width, image.height)
  908. # image_stream = image.stream.get_data()
  909. #
  910. # # 小的图忽略
  911. # if image.width <= 300 and image.height <= 300:
  912. # continue
  913. #
  914. # # 有些水印导致pdf分割、读取报错
  915. # # if image.width <= 200 and image.height<=200:
  916. # # continue
  917. #
  918. # # img_test = Image.open(io.BytesIO(image_stream))
  919. # # img_test.save('temp/LTImage.jpg')
  920. #
  921. # # 查看提取的图片高宽,太大则抛错用pdf输出图进行ocr识别
  922. # img_test = Image.open(io.BytesIO(image_stream))
  923. # if img_test.size[1] > 2000 or img_test.size[0] > 1500:
  924. # print("pdf2text LTImage stream output size", img_test.size)
  925. # raise Exception
  926. # # 比较小的图则直接保存用ocr识别
  927. # else:
  928. # img_test.save('temp/LTImage.jpg')
  929. # with open('temp/LTImage.jpg', "rb") as ff:
  930. # image_stream = ff.read()
  931. # image_text = from_ocr_interface(image_stream)
  932. # if judge_error_code(image_text):
  933. # return image_text
  934. # # except pdfminer.pdftypes.PDFNotImplementedError:
  935. # # with open(output_image_list[page_no], "rb") as ff:
  936. # # image_stream = ff.read()
  937. # except Exception:
  938. # logging.info("pdf2text pdfminer read image in page " + str(page_no) +
  939. # " fail! use pymupdf read image...")
  940. # print(traceback.print_exc())
  941. # image_text = page_info_dict.get(page_no)[0]
  942. # if image_text is None:
  943. # with open(output_image_dict.get(page_no), "rb") as ff:
  944. # image_stream = ff.read()
  945. # image_text = from_ocr_interface(image_stream)
  946. # if judge_error_code(image_text):
  947. # return image_text
  948. # page_info_dict[page_no][0] = image_text
  949. # ocr_flag = 1
  950. #
  951. # # 判断只拿到了水印图: 无文字输出且只有图片对象
  952. # if image_text == "" and only_image:
  953. # # 拆出该页pdf
  954. # try:
  955. # logging.info("pdf2text guess pdf has watermark")
  956. # split_path = get_single_pdf(path, page_no)
  957. # except:
  958. # # 如果拆分抛异常,则大概率不是水印图,用ocr识别图片
  959. # logging.info("pdf2text guess pdf has no watermark")
  960. # image_text = page_info_dict.get(page_no)[0]
  961. # if image_text is None:
  962. # with open(output_image_dict.get(page_no), "rb") as ff:
  963. # image_stream = ff.read()
  964. # image_text = from_ocr_interface(image_stream)
  965. # order_list.append([image_text, page_no, -1])
  966. # page_info_dict[page_no][0] = image_text
  967. # ocr_flag = 1
  968. # continue
  969. # if judge_error_code(split_path):
  970. # return split_path
  971. #
  972. # # 调用office格式转换
  973. # file_path = from_office_interface(split_path, unique_type_dir, 'html', 3)
  974. # # if file_path == [-3]:
  975. # # return [-3]
  976. # if judge_error_code(file_path):
  977. # return file_path
  978. #
  979. # # 获取html文本
  980. # image_text = get_html_p(file_path)
  981. # if judge_error_code(image_text):
  982. # return image_text
  983. #
  984. # if get_platform() == "Windows":
  985. # print("image_text", page_no, x.bbox[1], image_text)
  986. # with open("temp" + str(x.bbox[0]) + ".jpg", "wb") as ff:
  987. # ff.write(image_stream)
  988. # image_text = add_div(image_text)
  989. # if ocr_flag:
  990. # order_list.append([image_text, page_no, -1])
  991. # else:
  992. # order_list.append([image_text, page_no, x.bbox[1]])
  993. #
  994. # order_list.sort(key=lambda z: z[2], reverse=True)
  995. #
  996. # # 有ocr参与识别
  997. # if order_list[-1][2] == -1:
  998. # ocr_order_list = [order_list[-1]]
  999. # not_ocr_order_list = []
  1000. # not_ocr_text = ""
  1001. # # 去重,因读取失败而重复获取
  1002. # for order in order_list:
  1003. # if order[2] != -1:
  1004. # not_ocr_order_list.append(order)
  1005. # not_ocr_text += order[0]
  1006. # if string_similarity(ocr_order_list[0][0], not_ocr_text) >= 0.85:
  1007. # order_list = not_ocr_order_list
  1008. # else:
  1009. # order_list = ocr_order_list
  1010. #
  1011. # for order in order_list:
  1012. # text_list.append(order)
  1013. # page_no += 1
  1014. #
  1015. # text = ""
  1016. # for t in text_list:
  1017. # # text += add_div(t[0])
  1018. # if t[0] is not None:
  1019. # text += t[0]
  1020. # return [text]
  1021. # except UnicodeDecodeError as e:
  1022. # logging.info("pdf2text pdfminer create pages failed! " + str(e))
  1023. # return [-3]
  1024. # except Exception as e:
  1025. # logging.info("pdf2text error!")
  1026. # print("pdf2text", traceback.print_exc())
  1027. # return [-1]
  1028. #
  1029. #
  1030. # def string_similarity(str1, str2):
  1031. # # 去掉<div>和回车
  1032. # str1 = re.sub("<div>", "", str1)
  1033. # str1 = re.sub("</div>", "", str1)
  1034. # str1 = re.sub("\n", "", str1)
  1035. # str2 = re.sub("<div>", "", str2)
  1036. # str2 = re.sub("</div>", "", str2)
  1037. # str2 = re.sub("\n", "", str2)
  1038. # # print("********************************")
  1039. # # print("str1", str1)
  1040. # # print("********************************")
  1041. # # print("str2", str2)
  1042. # # print("********************************")
  1043. # score = difflib.SequenceMatcher(None, str1, str2).ratio()
  1044. # print("string_similarity", score)
  1045. # return score
  1046. #
  1047. #
  1048. # @get_memory_info.memory_decorator
  1049. # @timeout_decorator.timeout(300, timeout_exception=TimeoutError)
  1050. # def pdf_analyze(interpreter, page, device):
  1051. # logging.info("into pdf_analyze")
  1052. # # 解析pdf中的不含表格的页
  1053. # pdf_time = time.time()
  1054. # print("pdf_analyze interpreter process...")
  1055. # interpreter.process_page(page)
  1056. # print("pdf_analyze device get_result...")
  1057. # layout = device.get_result()
  1058. # logging.info("pdf2text read time " + str(time.time()-pdf_time))
  1059. # return layout
  1060. #
  1061. #
  1062. # def get_html_p(html_path):
  1063. # logging.info("into get_html_p")
  1064. # try:
  1065. # with open(html_path, "r") as ff:
  1066. # html_str = ff.read()
  1067. #
  1068. # soup = BeautifulSoup(html_str, 'lxml')
  1069. # text = ""
  1070. # for p in soup.find_all("p"):
  1071. # p_text = p.text
  1072. # p_text = p_text.strip()
  1073. # if p.string != "":
  1074. # text += p_text
  1075. # text += "\n"
  1076. # return text
  1077. # except Exception as e:
  1078. # logging.info("get_html_p error!")
  1079. # print("get_html_p", traceback.print_exc())
  1080. # return [-1]
  1081. #
  1082. #
  1083. # def get_single_pdf(path, page_no):
  1084. # logging.info("into get_single_pdf")
  1085. # try:
  1086. # # print("path, ", path)
  1087. # pdf_origin = PdfFileReader(path, strict=False)
  1088. #
  1089. # pdf_new = PdfFileWriter()
  1090. # pdf_new.addPage(pdf_origin.getPage(page_no))
  1091. #
  1092. # path_new = path.split(".")[0] + "_split.pdf"
  1093. # with open(path_new, "wb") as ff:
  1094. # pdf_new.write(ff)
  1095. # return path_new
  1096. # except PyPDF2.utils.PdfReadError as e:
  1097. # raise e
  1098. # except Exception as e:
  1099. # logging.info("get_single_pdf error! page " + str(page_no))
  1100. # print("get_single_pdf", traceback.print_exc())
  1101. # raise e
  1102. #
  1103. #
  1104. # def page_table_connect2(has_table_list, page_info_list):
  1105. # logging.info("into page_table_connect")
  1106. # try:
  1107. # # 判断是否有页码的表格相连
  1108. # table_connect_list = []
  1109. # temp_list = []
  1110. # # 离图片顶部或底部距离,页面高度的1/7
  1111. # threshold = 7
  1112. #
  1113. # for i in range(1, len(has_table_list)):
  1114. # page_info = has_table_list[i]
  1115. # last_page_info = has_table_list[i - 1]
  1116. #
  1117. # # 页码需相连
  1118. # if page_info[4] - last_page_info[4] == 1:
  1119. #
  1120. # # 上一页最后一个区域的列数和下一页第一个区域列数都为0,且相等
  1121. # if not last_page_info[1][-1] and not page_info[1][0] and \
  1122. # last_page_info[1][-1] == page_info[1][0]:
  1123. #
  1124. # # 上一页的轮廓点要离底部一定距离内,下一页的轮廓点要离顶部一定距离内
  1125. # if last_page_info[5][0] - last_page_info[2][-1][1][1] \
  1126. # <= int(last_page_info[5][0]/threshold) \
  1127. # and page_info[2][0][0][1] - 0 \
  1128. # <= int(page_info[5][0]/threshold):
  1129. # temp_list.append(last_page_info[4])
  1130. # temp_list.append(page_info[4])
  1131. # continue
  1132. #
  1133. # # 条件不符合的,存储之前保存的连接页码
  1134. # if len(temp_list) > 1:
  1135. # temp_list = list(set(temp_list))
  1136. # temp_list.sort(key=lambda x: x)
  1137. # table_connect_list.append(temp_list)
  1138. # temp_list = []
  1139. # if len(temp_list) > 1:
  1140. # temp_list = list(set(temp_list))
  1141. # temp_list.sort(key=lambda x: x)
  1142. # table_connect_list.append(temp_list)
  1143. # temp_list = []
  1144. #
  1145. # # 连接两页内容
  1146. # connect_text_list = []
  1147. # for area in table_connect_list:
  1148. # first_page_no = area[0]
  1149. # for page in page_info_list:
  1150. # if page[4] == first_page_no:
  1151. # area_page_text = str(page[0])
  1152. # break
  1153. # for i in range(1, len(area)):
  1154. # current_page_no = area[i]
  1155. # for page in page_info_list:
  1156. # if page[4] == current_page_no:
  1157. # current_page_text = str(page[0])
  1158. # break
  1159. #
  1160. # # 连接两个table
  1161. # table_prefix = re.finditer('<table border="1">', current_page_text)
  1162. # index_list = []
  1163. # for t in table_prefix:
  1164. # index_list.append(t.span())
  1165. #
  1166. # delete_index = index_list[0]
  1167. # current_page_text = current_page_text[:delete_index[0]] \
  1168. # + current_page_text[delete_index[1]:]
  1169. #
  1170. # table_suffix = re.finditer('</table>', area_page_text)
  1171. # index_list = []
  1172. # for t in table_suffix:
  1173. # index_list.append(t.span())
  1174. #
  1175. # delete_index = index_list[-1]
  1176. # area_page_text = area_page_text[:delete_index[0]] \
  1177. # + area_page_text[delete_index[1]:]
  1178. # area_page_text = area_page_text + current_page_text
  1179. # connect_text_list.append([area_page_text, area])
  1180. #
  1181. # return table_connect_list, connect_text_list
  1182. # except Exception as e:
  1183. # # print("page_table_connect", e)
  1184. # logging.info("page_table_connect error!")
  1185. # print("page_table_connect", traceback.print_exc())
  1186. # return [-1], [-1]
  1187. #
  1188. #
  1189. # def page_table_connect(has_table_dict):
  1190. # logging.info("into page_table_connect")
  1191. # if not has_table_dict:
  1192. # return [], []
  1193. #
  1194. # try:
  1195. # # 判断是否有页码的表格相连
  1196. # table_connect_list = []
  1197. # temp_list = []
  1198. # # 离图片顶部或底部距离,页面高度的1/7
  1199. # threshold = 7
  1200. # page_no_list = list(has_table_dict.keys())
  1201. # page_no_list.sort(key=lambda x: x)
  1202. # for i in range(1, len(page_no_list)):
  1203. # page_info = has_table_dict.get(page_no_list[i])
  1204. # last_page_info = has_table_dict.get(page_no_list[i-1])
  1205. # # 页码需相连
  1206. # if page_no_list[i] - page_no_list[i-1] == 1:
  1207. # # 上一页最后一个区域的列数和下一页第一个区域列数都为0,且相等
  1208. # if not last_page_info[1][-1] and not page_info[1][0] and \
  1209. # last_page_info[1][-1] == page_info[1][0]:
  1210. #
  1211. # # 上一页的轮廓点要离底部一定距离内,下一页的轮廓点要离顶部一定距离内
  1212. # if last_page_info[4][0] - last_page_info[2][-1][1][1] \
  1213. # <= int(last_page_info[4][0]/threshold) \
  1214. # and page_info[2][0][0][1] - 0 \
  1215. # <= int(page_info[4][0]/threshold):
  1216. # temp_list.append(page_no_list[i-1])
  1217. # temp_list.append(page_no_list[i])
  1218. # continue
  1219. #
  1220. # # 条件不符合的,存储之前保存的连接页码
  1221. # if len(temp_list) > 1:
  1222. # temp_list = list(set(temp_list))
  1223. # temp_list.sort(key=lambda x: x)
  1224. # table_connect_list.append(temp_list)
  1225. # temp_list = []
  1226. # if len(temp_list) > 1:
  1227. # temp_list = list(set(temp_list))
  1228. # temp_list.sort(key=lambda x: x)
  1229. # table_connect_list.append(temp_list)
  1230. # temp_list = []
  1231. #
  1232. # # 连接两页内容
  1233. # connect_text_list = []
  1234. # for area in table_connect_list:
  1235. # first_page_no = area[0]
  1236. # area_page_text = str(has_table_dict.get(first_page_no)[0])
  1237. # for i in range(1, len(area)):
  1238. # current_page_no = area[i]
  1239. # current_page_text = str(has_table_dict.get(current_page_no)[0])
  1240. #
  1241. # # 连接两个table
  1242. # table_prefix = re.finditer('<table border="1">', current_page_text)
  1243. # index_list = []
  1244. # for t in table_prefix:
  1245. # index_list.append(t.span())
  1246. #
  1247. # delete_index = index_list[0]
  1248. # current_page_text = current_page_text[:delete_index[0]] \
  1249. # + current_page_text[delete_index[1]:]
  1250. #
  1251. # table_suffix = re.finditer('</table>', area_page_text)
  1252. # index_list = []
  1253. # for t in table_suffix:
  1254. # index_list.append(t.span())
  1255. #
  1256. # delete_index = index_list[-1]
  1257. # area_page_text = area_page_text[:delete_index[0]] \
  1258. # + area_page_text[delete_index[1]:]
  1259. # area_page_text = area_page_text + current_page_text
  1260. # connect_text_list.append([area_page_text, area])
  1261. #
  1262. # return table_connect_list, connect_text_list
  1263. # except Exception as e:
  1264. # # print("page_table_connect", e)
  1265. # logging.info("page_table_connect error!")
  1266. # print("page_table_connect", traceback.print_exc())
  1267. # return [-1], [-1]
  1268. #
  1269. #
  1270. # @get_memory_info.memory_decorator
  1271. # def zip2text(path, unique_type_dir):
  1272. # logging.info("into zip2text")
  1273. # try:
  1274. # zip_path = unique_type_dir
  1275. #
  1276. # try:
  1277. # zip_file = zipfile.ZipFile(path)
  1278. # zip_list = zip_file.namelist()
  1279. # # print("zip list namelist", zip_list)
  1280. #
  1281. # if get_platform() == "Windows":
  1282. # if os.path.exists(zip_list[0]):
  1283. # print("zip2text exists")
  1284. #
  1285. # # 循环解压文件到指定目录
  1286. # file_list = []
  1287. # for f in zip_list:
  1288. # file_list.append(zip_file.extract(f, path=zip_path))
  1289. # # zip_file.extractall(path=zip_path)
  1290. # zip_file.close()
  1291. #
  1292. # # 获取文件名
  1293. # # file_list = []
  1294. # # for root, dirs, files in os.walk(zip_path, topdown=False):
  1295. # # for name in dirs:
  1296. # # file_list.append(os.path.join(root, name) + os.sep)
  1297. # # for name in files:
  1298. # # file_list.append(os.path.join(root, name))
  1299. # #
  1300. # # # if get_platform() == "Windows":
  1301. # # # print("file_list", file_list)
  1302. # #
  1303. # # # 过滤掉doc缓存文件
  1304. # # temp_list = []
  1305. # # for f in file_list:
  1306. # # if re.search("~\$", f):
  1307. # # continue
  1308. # # else:
  1309. # # temp_list.append(f)
  1310. # # file_list = temp_list
  1311. #
  1312. # except Exception as e:
  1313. # logging.info("zip format error!")
  1314. # print("zip format error!", traceback.print_exc())
  1315. # return [-3]
  1316. #
  1317. # # 内部文件重命名
  1318. # # file_list = inner_file_rename(file_list)
  1319. # file_list = rename_inner_files(zip_path)
  1320. # if judge_error_code(file_list):
  1321. # return file_list
  1322. #
  1323. # if get_platform() == "Windows":
  1324. # print("============= zip file list")
  1325. # # print(file_list)
  1326. #
  1327. # text = []
  1328. # for file in file_list:
  1329. # if os.path.isdir(file):
  1330. # continue
  1331. #
  1332. # # 无文件后缀,猜格式
  1333. # if len(file.split(".")) <= 1:
  1334. # logging.info(str(file) + " has no type! Guess type...")
  1335. # _type = judge_format(file)
  1336. # if _type is None:
  1337. # logging.info(str(file) + "cannot guess type!")
  1338. # sub_text = [""]
  1339. # else:
  1340. # logging.info(str(file) + " guess type: " + _type)
  1341. # new_file = str(file) + "." + _type
  1342. # os.rename(file, new_file)
  1343. # file = new_file
  1344. # sub_text = getText(_type, file)
  1345. # # 有文件后缀,截取
  1346. # else:
  1347. # _type = file.split(".")[-1]
  1348. # sub_text = getText(_type, file)
  1349. #
  1350. # if judge_error_code(sub_text, code=[-3]):
  1351. # continue
  1352. # if judge_error_code(sub_text):
  1353. # return sub_text
  1354. #
  1355. # text = text + sub_text
  1356. # return text
  1357. # except Exception as e:
  1358. # logging.info("zip2text error!")
  1359. # print("zip2text", traceback.print_exc())
  1360. # return [-1]
  1361. #
  1362. #
  1363. # @get_memory_info.memory_decorator
  1364. # def rar2text(path, unique_type_dir):
  1365. # logging.info("into rar2text")
  1366. # try:
  1367. # rar_path = unique_type_dir
  1368. #
  1369. # try:
  1370. # # shell调用unrar解压
  1371. # _signal = os.system("unrar x " + path + " " + rar_path)
  1372. # print("rar2text _signal", _signal)
  1373. # # =0, 解压成功
  1374. # if _signal != 0:
  1375. # raise Exception
  1376. # except Exception as e:
  1377. # logging.info("rar format error!")
  1378. # print("rar format error!", e)
  1379. # return [-3]
  1380. #
  1381. # # 获取文件名
  1382. # # file_list = []
  1383. # # for root, dirs, files in os.walk(rar_path, topdown=False):
  1384. # # for name in dirs:
  1385. # # file_list.append(os.path.join(root, name) + os.sep)
  1386. # # for name in files:
  1387. # # file_list.append(os.path.join(root, name))
  1388. #
  1389. # if get_platform() == "Windows":
  1390. # print("============= rar file list")
  1391. #
  1392. # # 内部文件重命名
  1393. # # file_list = inner_file_rename(file_list)
  1394. # file_list = rename_inner_files(rar_path)
  1395. # if judge_error_code(file_list):
  1396. # return file_list
  1397. #
  1398. # text = []
  1399. # for file in file_list:
  1400. # if os.path.isdir(file):
  1401. # continue
  1402. #
  1403. # # 无文件后缀,猜格式
  1404. # if len(file.split(".")) <= 1:
  1405. # logging.info(str(file) + " has no type! Guess type...")
  1406. # _type = judge_format(file)
  1407. # if _type is None:
  1408. # logging.info(str(file) + "cannot guess type!")
  1409. # sub_text = [""]
  1410. # else:
  1411. # logging.info(str(file) + " guess type: " + _type)
  1412. # new_file = str(file) + "." + _type
  1413. # os.rename(file, new_file)
  1414. # file = new_file
  1415. # sub_text = getText(_type, file)
  1416. # # 有文件后缀,截取
  1417. # else:
  1418. # _type = file.split(".")[-1]
  1419. # sub_text = getText(_type, file)
  1420. #
  1421. # if judge_error_code(sub_text, code=[-3]):
  1422. # continue
  1423. # if judge_error_code(sub_text):
  1424. # return sub_text
  1425. #
  1426. # # print("sub text", sub_text, file, _type)
  1427. # text = text + sub_text
  1428. # return text
  1429. # except Exception as e:
  1430. # logging.info("rar2text error!")
  1431. # print("rar2text", traceback.print_exc())
  1432. # return [-1]
  1433. #
  1434. #
  1435. # def inner_file_rename(path_list):
  1436. # logging.info("into inner_file_rename")
  1437. # try:
  1438. # # 先过滤文件名中的点 '.'
  1439. # path_list.sort(key=lambda x: len(x), reverse=True)
  1440. # for i in range(len(path_list)):
  1441. # old_path = path_list[i]
  1442. # # 对于目录,判断最后一级是否需过滤,重命名
  1443. # if os.path.isdir(old_path):
  1444. # ps = old_path.split(os.sep)
  1445. # old_p = ps[-2]
  1446. # if '.' in old_p:
  1447. # new_p = re.sub("\\.", "", old_p)
  1448. # new_path = ""
  1449. # for p in ps[:-2]:
  1450. # new_path += p + os.sep
  1451. # new_path += new_p + os.sep
  1452. #
  1453. # # 重命名,更新
  1454. # # print("has .", path_list[i], new_path)
  1455. # os.rename(old_path, new_path)
  1456. # for j in range(len(path_list)):
  1457. # if old_path in path_list[j]:
  1458. # path_list[j] = re.sub(old_p, new_p, path_list[j]) + os.sep
  1459. #
  1460. # # 将path分割,按分割个数排名
  1461. # path_len_list = []
  1462. # for p in path_list:
  1463. # p_ss = p.split(os.sep)
  1464. # temp_p_ss = []
  1465. # for pp in p_ss:
  1466. # if pp == "":
  1467. # continue
  1468. # temp_p_ss.append(pp)
  1469. # p_ss = temp_p_ss
  1470. # path_len_list.append([p, p_ss, len(p_ss)])
  1471. #
  1472. # # 从路径分割少的开始改名,即从根目录开始改
  1473. # path_len_list.sort(key=lambda x: x[2])
  1474. #
  1475. # # for p in path_len_list:
  1476. # # print("---", p[1])
  1477. #
  1478. # # 判断不用变的目录在第几级
  1479. # no_change_level = 0
  1480. # loop = 0
  1481. # for p_s in path_len_list[0][1]:
  1482. # if p_s[-4:] == "_rar" or p_s[-4:] == "_zip":
  1483. # no_change_level += loop
  1484. # loop = 0
  1485. # loop += 1
  1486. # no_change_level += 1
  1487. #
  1488. # # 每个
  1489. # new_path_list = []
  1490. # for path_len in path_len_list:
  1491. # # 前n个是固定路径
  1492. # new_path = ""
  1493. # for i in range(no_change_level):
  1494. # new_path += path_len[1][i] + os.sep
  1495. # old_path = new_path
  1496. #
  1497. # if not get_platform() == "Windows":
  1498. # old_path = os.sep + old_path
  1499. # new_path = os.sep + new_path
  1500. # # print("path_len[1][3:]", path_len[1][3:])
  1501. #
  1502. # count = 0
  1503. # for p in path_len[1][no_change_level:]:
  1504. # # 新路径全部转换hash
  1505. # new_path += str(hash(p))
  1506. #
  1507. # # 最后一个不加os.sep,并且旧路径最后一个不转换hash
  1508. # if count < len(path_len[1][no_change_level:]) - 1:
  1509. # old_path += str(hash(p)) + os.sep
  1510. # new_path += os.sep
  1511. # else:
  1512. # old_path += p
  1513. # count += 1
  1514. #
  1515. # # path是文件夹再加os.sep
  1516. # if os.path.isdir(path_len[0]):
  1517. # new_path += os.sep
  1518. # old_path += os.sep
  1519. # # path是文件再加文件名后缀
  1520. # else:
  1521. # p_ss = path_len[1][-1].split(".")
  1522. # if len(p_ss) > 1:
  1523. # path_suffix = "." + p_ss[-1]
  1524. # new_path += path_suffix
  1525. #
  1526. # print("inner_file_rename", old_path, "to", new_path)
  1527. # os.rename(old_path, new_path)
  1528. # new_path_list.append(new_path)
  1529. #
  1530. # return new_path_list
  1531. # except Exception as e:
  1532. # logging.info("inner_file_rename error!")
  1533. # print("inner_file_rename", traceback.print_exc())
  1534. # return [-1]
  1535. #
  1536. #
  1537. # def rename_inner_files(root_path):
  1538. # try:
  1539. # logging.info("into rename_inner_files")
  1540. # # 获取解压文件夹下所有文件+文件夹,不带根路径
  1541. # path_list = []
  1542. # for root, dirs, files in os.walk(root_path, topdown=False):
  1543. # for name in dirs:
  1544. # p = os.path.join(root, name) + os.sep
  1545. # p = re.sub(root_path, "", p)
  1546. # path_list.append(p)
  1547. # for name in files:
  1548. # p = os.path.join(root, name)
  1549. # p = re.sub(root_path, "", p)
  1550. # path_list.append(p)
  1551. #
  1552. # # 按路径长度排序
  1553. # path_list.sort(key=lambda x: len(x), reverse=True)
  1554. #
  1555. # # 循环改名
  1556. # for old_path in path_list:
  1557. # # 按路径分隔符分割
  1558. # ss = old_path.split(os.sep)
  1559. # # 判断是否文件夹
  1560. # is_dir = 0
  1561. # file_type = ""
  1562. # if os.path.isdir(root_path + old_path):
  1563. # ss = ss[:-1]
  1564. # is_dir = 1
  1565. # else:
  1566. # if "." in old_path:
  1567. # file_type = "." + old_path.split(".")[-1]
  1568. # else:
  1569. # file_type = ""
  1570. #
  1571. # # 最后一级需要用hash改名
  1572. # new_path = ""
  1573. # # new_path = re.sub(ss[-1], str(hash(ss[-1])), old_path) + file_type
  1574. # current_level = 0
  1575. # for s in ss:
  1576. # # 路径拼接
  1577. # if current_level < len(ss) - 1:
  1578. # new_path += s + os.sep
  1579. # else:
  1580. # new_path += str(hash(s)) + file_type
  1581. # current_level += 1
  1582. #
  1583. # new_ab_path = root_path + new_path
  1584. # old_ab_path = root_path + old_path
  1585. # os.rename(old_ab_path, new_ab_path)
  1586. #
  1587. # # 重新获取解压文件夹下所有文件+文件夹
  1588. # new_path_list = []
  1589. # for root, dirs, files in os.walk(root_path, topdown=False):
  1590. # for name in dirs:
  1591. # new_path_list.append(os.path.join(root, name) + os.sep)
  1592. # for name in files:
  1593. # new_path_list.append(os.path.join(root, name))
  1594. # # print("new_path_list", new_path_list)
  1595. # return new_path_list
  1596. # except:
  1597. # traceback.print_exc()
  1598. # return [-1]
  1599. #
  1600. #
  1601. # @get_memory_info.memory_decorator
  1602. # def xls2text(path, unique_type_dir):
  1603. # logging.info("into xls2text")
  1604. # try:
  1605. # # 调用libreoffice格式转换
  1606. # file_path = from_office_interface(path, unique_type_dir, 'xlsx')
  1607. # # if file_path == [-3]:
  1608. # # return [-3]
  1609. # if judge_error_code(file_path):
  1610. # return file_path
  1611. #
  1612. # text = xlsx2text(file_path, unique_type_dir)
  1613. # # if text == [-1]:
  1614. # # return [-1]
  1615. # # if text == [-3]:
  1616. # # return [-3]
  1617. # if judge_error_code(text):
  1618. # return text
  1619. #
  1620. # return text
  1621. # except Exception as e:
  1622. # logging.info("xls2text error!")
  1623. # print("xls2text", traceback.print_exc())
  1624. # return [-1]
  1625. #
  1626. #
  1627. # @get_memory_info.memory_decorator
  1628. # def xlsx2text(path, unique_type_dir):
  1629. # logging.info("into xlsx2text")
  1630. # try:
  1631. # try:
  1632. # # sheet_name=None, 即拿取所有sheet,存为dict
  1633. # df_dict = pandas.read_excel(path, header=None, keep_default_na=False, sheet_name=None)
  1634. # except Exception as e:
  1635. # logging.info("xlsx format error!")
  1636. # # print("xlsx format error!", e)
  1637. # return [-3]
  1638. #
  1639. # df_list = [sheet for sheet in df_dict.values()]
  1640. # sheet_text = ""
  1641. # for df in df_list:
  1642. # text = '<table border="1">' + "\n"
  1643. # for index, row in df.iterrows():
  1644. # text = text + "<tr>"
  1645. # for r in row:
  1646. # text = text + "<td>" + str(r) + "</td>" + "\n"
  1647. # # print(text)
  1648. # text = text + "</tr>" + "\n"
  1649. # text = text + "</table>" + "\n"
  1650. # sheet_text += text
  1651. #
  1652. # return [sheet_text]
  1653. # except Exception as e:
  1654. # logging.info("xlsx2text error!")
  1655. # print("xlsx2text", traceback.print_exc())
  1656. # return [-1]
  1657. #
  1658. #
  1659. # @get_memory_info.memory_decorator
  1660. # def swf2text(path, unique_type_dir):
  1661. # logging.info("into swf2text")
  1662. # try:
  1663. # try:
  1664. # with open(path, 'rb') as f:
  1665. # swf_file = SWF(f)
  1666. # svg_exporter = SVGExporter()
  1667. # svg = swf_file.export(svg_exporter)
  1668. # # with open('swf_export.jpg', 'wb') as f:
  1669. # # f.write(svg.read())
  1670. # swf_str = str(svg.getvalue(), encoding='utf-8')
  1671. # except Exception as e:
  1672. # logging.info("swf format error!")
  1673. # traceback.print_exc()
  1674. # return [-3]
  1675. #
  1676. # # 正则匹配图片的信息位置
  1677. # result0 = re.finditer('<image id=(.[^>]*)', swf_str)
  1678. # image_bytes_list = []
  1679. # i = 0
  1680. # image_path_prefix = path.split(".")[-2] + "_" + path.split(".")[-1]
  1681. # image_path_list = []
  1682. # for r in result0:
  1683. # # 截取图片信息所在位置
  1684. # swf_str0 = swf_str[r.span()[0]:r.span()[1] + 1]
  1685. #
  1686. # # 正则匹配得到图片的base64编码
  1687. # result1 = re.search('xlink:href="data:(.[^>]*)', swf_str0)
  1688. # swf_str1 = swf_str0[result1.span()[0]:result1.span()[1]]
  1689. # reg1_prefix = 'b\''
  1690. # result1 = re.search(reg1_prefix + '(.[^\']*)', swf_str1)
  1691. # swf_str1 = swf_str1[result1.span()[0] + len(reg1_prefix):result1.span()[1]]
  1692. #
  1693. # # base64_str -> base64_bytes -> no "\\" base64_bytes -> bytes -> image
  1694. # base64_bytes_with_double = bytes(swf_str1, "utf-8")
  1695. # base64_bytes = codecs.escape_decode(base64_bytes_with_double, "hex-escape")[0]
  1696. # image_bytes = base64.b64decode(base64_bytes)
  1697. # image_bytes_list.append(image_bytes)
  1698. # image_path = image_path_prefix + "_page_" + str(i) + ".png"
  1699. # with open(image_path, 'wb') as f:
  1700. # f.write(image_bytes)
  1701. #
  1702. # image_path_list.append(image_path)
  1703. # # 正则匹配得到图片的宽高
  1704. # # reg2_prefix = 'width="'
  1705. # # result2 = re.search(reg2_prefix + '(\d+)', swf_str0)
  1706. # # swf_str2 = swf_str0[result2.span()[0]+len(reg2_prefix):result2.span()[1]]
  1707. # # width = swf_str2
  1708. # # reg2_prefix = 'height="'
  1709. # # result2 = re.search(reg2_prefix + '(\d+)', swf_str0)
  1710. # # swf_str2 = swf_str0[result2.span()[0]+len(reg2_prefix):result2.span()[1]]
  1711. # # height = swf_str2
  1712. # i += 1
  1713. #
  1714. # text_list = []
  1715. # # print("image_path_list", image_path_list)
  1716. # for image_path in image_path_list:
  1717. # text = picture2text(image_path)
  1718. # # print("text", text)
  1719. #
  1720. # if judge_error_code(text, code=[-3]):
  1721. # continue
  1722. # if judge_error_code(text):
  1723. # return text
  1724. #
  1725. # text = text[0]
  1726. # text_list.append(text)
  1727. #
  1728. # text = ""
  1729. # for t in text_list:
  1730. # text += t
  1731. #
  1732. # return [text]
  1733. # except Exception as e:
  1734. # logging.info("swf2text error!")
  1735. # print("swf2text", traceback.print_exc())
  1736. # return [-1]
  1737. #
  1738. #
  1739. # @get_memory_info.memory_decorator
  1740. # def picture2text(path, html=False):
  1741. # logging.info("into picture2text")
  1742. # try:
  1743. # # 判断图片中表格
  1744. # img = cv2.imread(path)
  1745. # if img is None:
  1746. # return [-3]
  1747. #
  1748. # # if get_platform() == "Windows":
  1749. # # print("picture2text img", img)
  1750. #
  1751. # text, column_list, outline_points, is_table = image_preprocess(img, path)
  1752. # if judge_error_code(text):
  1753. # return text
  1754. # # if text == [-5]:
  1755. # # return [-5]
  1756. # # if text == [-2]:
  1757. # # return [-2]
  1758. # # if text == [-1]:
  1759. # # return [-1]
  1760. #
  1761. # if html:
  1762. # text = add_div(text)
  1763. # return [text]
  1764. # except Exception as e:
  1765. # logging.info("picture2text error!")
  1766. # print("picture2text", traceback.print_exc())
  1767. # return [-1]
  1768. #
  1769. #
  1770. # @get_memory_info.memory_decorator
  1771. # def from_ocr_interface(image_stream, is_table=False):
  1772. # logging.info("into from_ocr_interface")
  1773. # try:
  1774. # base64_stream = base64.b64encode(image_stream)
  1775. #
  1776. # # 调用接口
  1777. # try:
  1778. # r = ocr(data=base64_stream, ocr_model=globals().get("global_ocr_model"))
  1779. # except TimeoutError:
  1780. # if is_table:
  1781. # return [-5], [-5]
  1782. # else:
  1783. # return [-5]
  1784. # except requests.exceptions.ConnectionError as e:
  1785. # if is_table:
  1786. # return [-2], [-2]
  1787. # else:
  1788. # return [-2]
  1789. #
  1790. # _dict = r
  1791. # text_list = eval(_dict.get("text"))
  1792. # bbox_list = eval(_dict.get("bbox"))
  1793. # if text_list is None:
  1794. # text_list = []
  1795. # if bbox_list is None:
  1796. # bbox_list = []
  1797. #
  1798. # if is_table:
  1799. # return text_list, bbox_list
  1800. # else:
  1801. # if text_list and bbox_list:
  1802. # text = get_sequential_data(text_list, bbox_list, html=True)
  1803. # if judge_error_code(text):
  1804. # return text
  1805. # # if text == [-1]:
  1806. # # return [-1]
  1807. # else:
  1808. # text = ""
  1809. # return text
  1810. # except Exception as e:
  1811. # logging.info("from_ocr_interface error!")
  1812. # # print("from_ocr_interface", e, global_type)
  1813. # if is_table:
  1814. # return [-1], [-1]
  1815. # else:
  1816. # return [-1]
  1817. #
  1818. #
  1819. # @get_memory_info.memory_decorator
  1820. # def from_otr_interface(image_stream):
  1821. # logging.info("into from_otr_interface")
  1822. # try:
  1823. # base64_stream = base64.b64encode(image_stream)
  1824. #
  1825. # # 调用接口
  1826. # try:
  1827. # r = otr(data=base64_stream, otr_model=globals().get("global_otr_model"))
  1828. # except TimeoutError:
  1829. # return [-5], [-5], [-5], [-5]
  1830. # except requests.exceptions.ConnectionError as e:
  1831. # logging.info("from_otr_interface")
  1832. # print("from_otr_interface", traceback.print_exc())
  1833. # return [-2], [-2], [-2], [-2]
  1834. #
  1835. # # 处理结果
  1836. # _dict = r
  1837. # points = eval(_dict.get("points"))
  1838. # split_lines = eval(_dict.get("split_lines"))
  1839. # bboxes = eval(_dict.get("bboxes"))
  1840. # outline_points = eval(_dict.get("outline_points"))
  1841. # # print("from_otr_interface len(bboxes)", len(bboxes))
  1842. # if points is None:
  1843. # points = []
  1844. # if split_lines is None:
  1845. # split_lines = []
  1846. # if bboxes is None:
  1847. # bboxes = []
  1848. # if outline_points is None:
  1849. # outline_points = []
  1850. # return points, split_lines, bboxes, outline_points
  1851. # except Exception as e:
  1852. # logging.info("from_otr_interface error!")
  1853. # print("from_otr_interface", traceback.print_exc())
  1854. # return [-1], [-1], [-1], [-1]
  1855. #
  1856. #
  1857. # def from_office_interface(src_path, dest_path, target_format, retry_times=1):
  1858. # try:
  1859. # # Win10跳出超时装饰器
  1860. # if get_platform() == "Windows":
  1861. # # origin_office_convert = office_convert.__wrapped__
  1862. # # file_path = origin_office_convert(src_path, dest_path, target_format, retry_times)
  1863. # file_path = office_convert(src_path, dest_path, target_format, retry_times)
  1864. # else:
  1865. # # 将装饰器包装为一个类,否则多进程Pickle会报错 it's not the same object as xxx 问题,
  1866. # # timeout_decorator_obj = my_timeout_decorator.TimeoutClass(office_convert, 180, TimeoutError)
  1867. # # file_path = timeout_decorator_obj.run(src_path, dest_path, target_format, retry_times)
  1868. #
  1869. # file_path = office_convert(src_path, dest_path, target_format, retry_times)
  1870. #
  1871. # if judge_error_code(file_path):
  1872. # return file_path
  1873. # return file_path
  1874. # except TimeoutError:
  1875. # logging.info("from_office_interface timeout error!")
  1876. # return [-5]
  1877. # except:
  1878. # logging.info("from_office_interface error!")
  1879. # print("from_office_interface", traceback.print_exc())
  1880. # return [-1]
  1881. #
  1882. #
  1883. # def get_sequential_data(text_list, bbox_list, html=False):
  1884. # logging.info("into get_sequential_data")
  1885. # try:
  1886. # text = ""
  1887. # order_list = []
  1888. # for i in range(len(text_list)):
  1889. # length_start = bbox_list[i][0][0]
  1890. # length_end = bbox_list[i][1][0]
  1891. # height_start = bbox_list[i][0][1]
  1892. # height_end = bbox_list[i][-1][1]
  1893. # # print([length_start, length_end, height_start, height_end])
  1894. # order_list.append([text_list[i], length_start, length_end, height_start, height_end])
  1895. # # text = text + infomation['text'] + "\n"
  1896. #
  1897. # if get_platform() == "Windows":
  1898. # print("get_sequential_data", order_list)
  1899. # if not order_list:
  1900. # if get_platform() == "Windows":
  1901. # print("get_sequential_data", "no order list")
  1902. # return ""
  1903. #
  1904. # # 根据bbox的坐标对输出排序
  1905. # order_list.sort(key=lambda x: (x[3], x[1]))
  1906. #
  1907. # # 根据bbox分行分列
  1908. # # col_list = []
  1909. # # height_end = int((order_list[0][4] + order_list[0][3]) / 2)
  1910. # # for i in range(len(order_list)):
  1911. # # if height_end - threshold <= order_list[i][3] <= height_end + threshold:
  1912. # # col_list.append(order_list[i])
  1913. # # else:
  1914. # # row_list.append(col_list)
  1915. # # col_list = []
  1916. # # height_end = int((order_list[i][4] + order_list[i][3]) / 2)
  1917. # # col_list.append(order_list[i])
  1918. # # if i == len(order_list) - 1:
  1919. # # row_list.append(col_list)
  1920. #
  1921. # row_list = []
  1922. # used_box = []
  1923. # threshold = 5
  1924. # for box in order_list:
  1925. # if box in used_box:
  1926. # continue
  1927. #
  1928. # height_center = (box[4] + box[3]) / 2
  1929. # row = []
  1930. # for box2 in order_list:
  1931. # if box2 in used_box:
  1932. # continue
  1933. # height_center2 = (box2[4] + box2[3]) / 2
  1934. # if height_center - threshold <= height_center2 <= height_center + threshold:
  1935. # if box2 not in row:
  1936. # row.append(box2)
  1937. # used_box.append(box2)
  1938. # row.sort(key=lambda x: x[0])
  1939. # row_list.append(row)
  1940. #
  1941. # for row in row_list:
  1942. # if not row:
  1943. # continue
  1944. # if len(row) <= 1:
  1945. # text = text + row[0][0] + "\n"
  1946. # else:
  1947. # sub_text = ""
  1948. # row.sort(key=lambda x: x[1])
  1949. # for col in row:
  1950. # sub_text = sub_text + col[0] + " "
  1951. # sub_text = sub_text + "\n"
  1952. # text += sub_text
  1953. #
  1954. # if html:
  1955. # text = "<div>" + text
  1956. # text = re.sub("\n", "</div>\n<div>", text)
  1957. # text += "</div>"
  1958. # # if text[-5:] == "<div>":
  1959. # # text = text[:-5]
  1960. # return text
  1961. #
  1962. # except Exception as e:
  1963. # logging.info("get_sequential_data error!")
  1964. # print("get_sequential_data", traceback.print_exc())
  1965. # return [-1]
  1966. #
  1967. #
  1968. # def get_formatted_table(text_list, text_bbox_list, table_bbox_list, split_line):
  1969. # logging.info("into get_formatted_table")
  1970. # try:
  1971. # # 重新定义text_bbox_list,[point, point, text]
  1972. # text_bbox_list = [[text_bbox_list[i][0], text_bbox_list[i][2], text_list[i]] for i in
  1973. # range(len(text_bbox_list))]
  1974. # # 按纵坐标排序
  1975. # text_bbox_list.sort(key=lambda x: (x[0][1], x[0][0]))
  1976. # table_bbox_list.sort(key=lambda x: (x[0][1], x[0][0]))
  1977. #
  1978. # # print("text_bbox_list", text_bbox_list)
  1979. # # print("table_bbox_list", table_bbox_list)
  1980. #
  1981. # # bbox位置 threshold
  1982. # threshold = 5
  1983. #
  1984. # # 根据split_line分区,可能有个区多个表格 [(), ()]
  1985. # area_text_bbox_list = []
  1986. # area_table_bbox_list = []
  1987. # # print("get_formatted_table, split_line", split_line)
  1988. # for j in range(1, len(split_line)):
  1989. # last_y = split_line[j - 1][0][1]
  1990. # current_y = split_line[j][0][1]
  1991. # temp_text_bbox_list = []
  1992. # temp_table_bbox_list = []
  1993. #
  1994. # # 找出该区域下text bbox
  1995. # for text_bbox in text_bbox_list:
  1996. # # 计算 text bbox 中心点
  1997. # text_bbox_center = ((text_bbox[1][0] + text_bbox[0][0]) / 2,
  1998. # (text_bbox[1][1] + text_bbox[0][1]) / 2)
  1999. # if last_y - threshold <= text_bbox_center[1] <= current_y + threshold:
  2000. # temp_text_bbox_list.append(text_bbox)
  2001. # area_text_bbox_list.append(temp_text_bbox_list)
  2002. #
  2003. # # 找出该区域下table bbox
  2004. # for table_bbox in table_bbox_list:
  2005. # # 计算 table bbox 中心点
  2006. # table_bbox_center = ((table_bbox[1][0] + table_bbox[0][0]) / 2,
  2007. # (table_bbox[1][1] + table_bbox[0][1]) / 2)
  2008. # if last_y < table_bbox_center[1] < current_y:
  2009. # temp_table_bbox_list.append(table_bbox)
  2010. # area_table_bbox_list.append(temp_table_bbox_list)
  2011. #
  2012. # # for j in range(len(area_text_bbox_list)):
  2013. # # print("area_text_bbox_list", j, area_text_bbox_list[j])
  2014. #
  2015. # # 对每个区域分别进行两个bbox匹配,生成表格
  2016. # area_text_list = []
  2017. # area_column_list = []
  2018. # for j in range(len(area_text_bbox_list)):
  2019. # # 每个区域的table bbox 和text bbox
  2020. # temp_table_bbox_list = area_table_bbox_list[j]
  2021. # temp_text_bbox_list = area_text_bbox_list[j]
  2022. #
  2023. # # 判断该区域有无表格bbox
  2024. # # 若无表格,将该区域文字连接
  2025. # if not temp_table_bbox_list:
  2026. # # 找出该区域的所有text bbox
  2027. # only_text_list = []
  2028. # only_bbox_list = []
  2029. # for text_bbox in temp_text_bbox_list:
  2030. # only_text_list.append(text_bbox[2])
  2031. # only_bbox_list.append([text_bbox[0], text_bbox[1]])
  2032. # only_text = get_sequential_data(only_text_list, only_bbox_list, True)
  2033. # if only_text == [-1]:
  2034. # return [-1], [-1]
  2035. # area_text_list.append(only_text)
  2036. # area_column_list.append(0)
  2037. # continue
  2038. #
  2039. # # 有表格
  2040. # # 文本对应的表格格子
  2041. # text_in_table = {}
  2042. # for i in range(len(temp_text_bbox_list)):
  2043. # text_bbox = temp_text_bbox_list[i]
  2044. #
  2045. # # 计算 text bbox 中心点
  2046. # text_bbox_center = ((text_bbox[1][0] + text_bbox[0][0]) / 2,
  2047. # (text_bbox[1][1] + text_bbox[0][1]) / 2)
  2048. #
  2049. # # 判断中心点在哪个table bbox中
  2050. # for table_bbox in temp_table_bbox_list:
  2051. # # 中心点在table bbox中,将text写入字典
  2052. # if table_bbox[0][0] <= text_bbox_center[0] <= table_bbox[1][0] and \
  2053. # table_bbox[0][1] <= text_bbox_center[1] <= table_bbox[1][1]:
  2054. # if str(table_bbox) in text_in_table.keys():
  2055. # text_in_table[str(table_bbox)] = text_in_table.get(str(table_bbox)) + text_bbox[2]
  2056. # else:
  2057. # text_in_table[str(table_bbox)] = text_bbox[2]
  2058. # break
  2059. #
  2060. # # 如果未找到text bbox匹配的table bbox,加大threshold匹配
  2061. # # elif (table_bbox[0][0] <= text_bbox_center[0]+threshold <= table_bbox[1][0] and
  2062. # # table_bbox[0][1] <= text_bbox_center[1]+threshold <= table_bbox[1][1]) or \
  2063. # # (table_bbox[0][0] <= text_bbox_center[0]-threshold <= table_bbox[1][0] and
  2064. # # table_bbox[0][1] <= text_bbox_center[1]-threshold <= table_bbox[1][1]) or \
  2065. # # (table_bbox[0][0] <= text_bbox_center[0]+threshold <= table_bbox[1][0] and
  2066. # # table_bbox[0][1] <= text_bbox_center[1]-threshold <= table_bbox[1][1]) or \
  2067. # # (table_bbox[0][0] <= text_bbox_center[0]-threshold <= table_bbox[1][0] and
  2068. # # table_bbox[0][1] <= text_bbox_center[1]+threshold <= table_bbox[1][1]):
  2069. # # if str(table_bbox) in text_in_table.keys():
  2070. # # text_in_table[str(table_bbox)] = text_in_table.get(str(table_bbox)) + text_bbox[2]
  2071. # # else:
  2072. # # text_in_table[str(table_bbox)] = text_bbox[2]
  2073. # # break
  2074. #
  2075. # # 对表格格子进行分行分列,并计算总计多少小列
  2076. # # 放入坐标
  2077. # all_col_list = []
  2078. # all_row_list = []
  2079. # for i in range(len(temp_table_bbox_list)):
  2080. # table_bbox = temp_table_bbox_list[i]
  2081. #
  2082. # # 放入所有坐标x
  2083. # if table_bbox[0][0] not in all_col_list:
  2084. # all_col_list.append(table_bbox[0][0])
  2085. # if table_bbox[1][0] not in all_col_list:
  2086. # all_col_list.append(table_bbox[1][0])
  2087. #
  2088. # # 放入所有坐标y
  2089. # if table_bbox[0][1] not in all_row_list:
  2090. # all_row_list.append(table_bbox[0][1])
  2091. # if table_bbox[1][1] not in all_row_list:
  2092. # all_row_list.append(table_bbox[1][1])
  2093. # all_col_list.sort(key=lambda x: x)
  2094. # all_row_list.sort(key=lambda x: x)
  2095. #
  2096. # # 分行
  2097. # row_list = []
  2098. # rows = []
  2099. # temp_table_bbox_list.sort(key=lambda x: (x[0][1], x[0][0], x[1][1], x[1][0]))
  2100. # y_row = temp_table_bbox_list[0][0][1]
  2101. # for i in range(len(temp_table_bbox_list)):
  2102. # table_bbox = temp_table_bbox_list[i]
  2103. #
  2104. # if y_row - threshold <= table_bbox[0][1] <= y_row + threshold:
  2105. # rows.append(table_bbox)
  2106. # else:
  2107. # y_row = table_bbox[0][1]
  2108. # if rows:
  2109. # rows.sort(key=lambda x: x[0][0])
  2110. # row_list.append(rows)
  2111. # rows = []
  2112. # rows.append(table_bbox)
  2113. # # print("*" * 30)
  2114. # # print(row_list)
  2115. #
  2116. # if i == len(temp_table_bbox_list) - 1:
  2117. # if rows:
  2118. # rows.sort(key=lambda x: x[0][0])
  2119. # row_list.append(rows)
  2120. #
  2121. # # 生成表格,包括文字和格子宽度
  2122. # area_column = []
  2123. # text = '<table border="1">' + "\n"
  2124. # for row in row_list:
  2125. # text += "<tr>" + "\n"
  2126. # for col in row:
  2127. # # 计算bbox y坐标之间有多少其他点,+1即为所占行数
  2128. # row_span = 1
  2129. # for y in all_row_list:
  2130. # if col[0][1] < y < col[1][1]:
  2131. # if y - col[0][1] >= 2 and col[1][1] - y >= 2:
  2132. # row_span += 1
  2133. #
  2134. # # 计算bbox x坐标之间有多少其他点,+1即为所占列数
  2135. # col_span = 1
  2136. # for x in all_col_list:
  2137. # if col[0][0] < x < col[1][0]:
  2138. # if x - col[0][0] >= 2 and col[1][0] - x >= 2:
  2139. # col_span += 1
  2140. #
  2141. # text += "<td colspan=" + str(col_span) + " rowspan=" + str(row_span) + ">"
  2142. #
  2143. # if str(col) in text_in_table.keys():
  2144. # text += text_in_table.get(str(col))
  2145. # else:
  2146. # text += ""
  2147. # text += "</td>" + "\n"
  2148. # text += "</tr>" + "\n"
  2149. # text += "</table>" + "\n"
  2150. #
  2151. # # 计算最大column
  2152. # max_col_num = 0
  2153. # for row in row_list:
  2154. # col_num = 0
  2155. # for col in row:
  2156. # col_num += 1
  2157. # if max_col_num < col_num:
  2158. # max_col_num = col_num
  2159. #
  2160. # area_text_list.append(text)
  2161. # area_column_list.append(max_col_num)
  2162. #
  2163. # text = ""
  2164. # if get_platform() == "Windows":
  2165. # print("get_formatted_table area_text_list", area_text_list)
  2166. # for area_text in area_text_list:
  2167. # text += area_text
  2168. # return text, area_column_list
  2169. # except Exception as e:
  2170. # logging.info("get_formatted_table error!")
  2171. # print("get_formatted_table", traceback.print_exc())
  2172. # return [-1], [-1]
  2173. port_num = [0]
  2174. def choose_port():
  2175. process_num = 4
  2176. if port_num[0] % process_num == 0:
  2177. _url = local_url + ":15011"
  2178. elif port_num[0] % process_num == 1:
  2179. _url = local_url + ":15012"
  2180. elif port_num[0] % process_num == 2:
  2181. _url = local_url + ":15013"
  2182. elif port_num[0] % process_num == 3:
  2183. _url = local_url + ":15014"
  2184. port_num[0] = port_num[0] + 1
  2185. return _url
  2186. def getText(_type, path_or_stream):
  2187. print("file type - " + _type)
  2188. logging.info("file type - " + _type)
  2189. try:
  2190. ss = path_or_stream.split(".")
  2191. unique_type_dir = ss[-2] + "_" + ss[-1] + os.sep
  2192. except:
  2193. unique_type_dir = path_or_stream + "_" + _type + os.sep
  2194. if _type == "pdf":
  2195. # return pdf2text(path_or_stream, unique_type_dir)
  2196. return PDFConvert(path_or_stream, unique_type_dir).get_html()
  2197. if _type == "docx":
  2198. # return docx2text(path_or_stream, unique_type_dir)
  2199. return DocxConvert(path_or_stream, unique_type_dir).get_html()
  2200. if _type == "zip":
  2201. # return zip2text(path_or_stream, unique_type_dir)
  2202. return ZipConvert(path_or_stream, unique_type_dir).get_html()
  2203. if _type == "rar":
  2204. # return rar2text(path_or_stream, unique_type_dir)
  2205. return RarConvert(path_or_stream, unique_type_dir).get_html()
  2206. if _type == "xlsx":
  2207. # return xlsx2text(path_or_stream, unique_type_dir)
  2208. return XlsxConvert(path_or_stream, unique_type_dir).get_html()
  2209. if _type == "xls":
  2210. # return xls2text(path_or_stream, unique_type_dir)
  2211. return XlsConvert(path_or_stream, unique_type_dir).get_html()
  2212. if _type == "doc":
  2213. # return doc2text(path_or_stream, unique_type_dir)
  2214. return DocConvert(path_or_stream, unique_type_dir).get_html()
  2215. if _type == "jpg" or _type == "png" or _type == "jpeg":
  2216. # return picture2text(path_or_stream)
  2217. return ImageConvert(path_or_stream, unique_type_dir).get_html()
  2218. if _type == "swf":
  2219. # return swf2text(path_or_stream, unique_type_dir)
  2220. return SwfConvert(path_or_stream, unique_type_dir).get_html()
  2221. if _type == "txt":
  2222. # return txt2text(path_or_stream)
  2223. return TxtConvert(path_or_stream, unique_type_dir).get_html()
  2224. return [""]
  2225. def to_html(path, text):
  2226. with open(path, 'w',encoding="utf8") as f:
  2227. f.write("<!DOCTYPE HTML>")
  2228. f.write('<head><meta charset="UTF-8"></head>')
  2229. f.write("<body>")
  2230. f.write(text)
  2231. f.write("</body>")
  2232. def resize_image(image_path, size):
  2233. try:
  2234. image_np = cv2.imread(image_path)
  2235. # print(image_np.shape)
  2236. width = image_np.shape[1]
  2237. height = image_np.shape[0]
  2238. h_w_rate = height / width
  2239. # width_standard = 900
  2240. # height_standard = 1400
  2241. width_standard = size[1]
  2242. height_standard = size[0]
  2243. width_new = int(height_standard / h_w_rate)
  2244. height_new = int(width_standard * h_w_rate)
  2245. if width > width_standard:
  2246. image_np = cv2.resize(image_np, (width_standard, height_new))
  2247. elif height > height_standard:
  2248. image_np = cv2.resize(image_np, (width_new, height_standard))
  2249. cv2.imwrite(image_path, image_np)
  2250. # print("resize_image", image_np.shape)
  2251. return
  2252. except Exception as e:
  2253. logging.info("resize_image")
  2254. print("resize_image", e, global_type)
  2255. return
  2256. def remove_red_seal(image_np):
  2257. """
  2258. 去除红色印章
  2259. """
  2260. # 获得红色通道
  2261. blue_c, green_c, red_c = cv2.split(image_np)
  2262. # 多传入一个参数cv2.THRESH_OTSU,并且把阈值thresh设为0,算法会找到最优阈值
  2263. thresh, ret = cv2.threshold(red_c, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
  2264. # print("remove_red_seal thresh", thresh)
  2265. # 实测调整为95%效果好一些
  2266. filter_condition = int(thresh * 0.98)
  2267. thresh1, red_thresh = cv2.threshold(red_c, filter_condition, 255, cv2.THRESH_BINARY)
  2268. # 把图片转回 3 通道
  2269. image_and = np.expand_dims(red_thresh, axis=2)
  2270. image_and = np.concatenate((image_and, image_and, image_and), axis=-1)
  2271. # print(image_and.shape)
  2272. # 膨胀
  2273. gray = cv2.cvtColor(image_and, cv2.COLOR_RGB2GRAY)
  2274. kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
  2275. erode = cv2.erode(gray, kernel)
  2276. cv2.imshow("erode", erode)
  2277. cv2.waitKey(0)
  2278. image_and = np.bitwise_and(cv2.bitwise_not(blue_c), cv2.bitwise_not(erode))
  2279. result_img = cv2.bitwise_not(image_and)
  2280. cv2.imshow("remove_red_seal", result_img)
  2281. cv2.waitKey(0)
  2282. return result_img
  2283. def remove_underline(image_np):
  2284. """
  2285. 去除文字下划线
  2286. """
  2287. # 灰度化
  2288. gray = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)
  2289. # 二值化
  2290. binary = cv2.adaptiveThreshold(~gray, 255,
  2291. cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,
  2292. 15, 10)
  2293. # Sobel
  2294. kernel_row = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], np.float32)
  2295. kernel_col = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32)
  2296. # binary = cv2.filter2D(binary, -1, kernel=kernel)
  2297. binary_row = cv2.filter2D(binary, -1, kernel=kernel_row)
  2298. binary_col = cv2.filter2D(binary, -1, kernel=kernel_col)
  2299. cv2.imshow("custom_blur_demo", binary)
  2300. cv2.waitKey(0)
  2301. rows, cols = binary.shape
  2302. # 识别横线
  2303. scale = 5
  2304. kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (cols // scale, 1))
  2305. erodedcol = cv2.erode(binary_row, kernel, iterations=1)
  2306. cv2.imshow("Eroded Image", erodedcol)
  2307. cv2.waitKey(0)
  2308. dilatedcol = cv2.dilate(erodedcol, kernel, iterations=1)
  2309. cv2.imshow("dilate Image", dilatedcol)
  2310. cv2.waitKey(0)
  2311. return
  2312. def getMDFFromFile(path):
  2313. _length = 0
  2314. try:
  2315. _md5 = hashlib.md5()
  2316. with open(path, "rb") as ff:
  2317. while True:
  2318. data = ff.read(4096)
  2319. if not data:
  2320. break
  2321. _length += len(data)
  2322. _md5.update(data)
  2323. return _md5.hexdigest(), _length
  2324. except Exception as e:
  2325. traceback.print_exc()
  2326. return None, _length
  2327. def add_html_format(text_list):
  2328. new_text_list = []
  2329. for t in text_list:
  2330. html_t = "<!DOCTYPE HTML>\n"
  2331. html_t += '<head><meta charset="UTF-8"></head>\n'
  2332. html_t += "<body>\n"
  2333. html_t += t
  2334. html_t += "\n</body>\n"
  2335. new_text_list.append(html_t)
  2336. return new_text_list
  2337. @timeout_decorator.timeout(1200, timeout_exception=TimeoutError)
  2338. def unique_temp_file_process(stream, _type):
  2339. logging.info("into unique_temp_file_process")
  2340. try:
  2341. # 每个调用在temp中创建一个唯一空间
  2342. uid1 = uuid.uuid1().hex
  2343. unique_space_path = _path + os.sep + "temp" + os.sep + uid1 + os.sep
  2344. # unique_space_path = "/mnt/fangjiasheng/" + "temp/" + uid1 + "/"
  2345. # 判断冲突
  2346. if not os.path.exists(unique_space_path):
  2347. if not os.path.exists(_path + os.sep + "temp"):
  2348. os.mkdir(_path + os.sep + "temp" + os.sep)
  2349. os.mkdir(unique_space_path)
  2350. else:
  2351. uid2 = uuid.uuid1().hex
  2352. if not os.path.exists(_path + os.sep + "temp"):
  2353. os.mkdir(_path + os.sep + "temp" + os.sep)
  2354. os.mkdir(_path + os.sep + "temp" + os.sep + uid2 + os.sep)
  2355. # os.mkdir("/mnt/" + "temp/" + uid2 + "/")
  2356. # 在唯一空间中,对传入的文件也保存为唯一
  2357. uid3 = uuid.uuid1().hex
  2358. file_path = unique_space_path + uid3 + "." + _type
  2359. with open(file_path, "wb") as ff:
  2360. ff.write(stream)
  2361. # 跳过一些编号
  2362. pass_md5 = getMDFFromFile(file_path)
  2363. print("getMDFFromFile", pass_md5)
  2364. if pass_md5 == '84dba5a65339f338d3ebdf9f33fae13e'\
  2365. or pass_md5 == '3d9f9f4354582d85b21b060ebd5786db'\
  2366. or pass_md5 == 'b52da40f24c6b29dfc2ebeaefe4e41f1' \
  2367. or pass_md5 == 'eefb925b7ccec1467be20b462fde2a09':
  2368. raise Exception
  2369. text = getText(_type, file_path)
  2370. return text
  2371. except Exception as e:
  2372. # print("Convert error! Delete temp file. ", e, global_type)
  2373. logging.info("unique_temp_file_process")
  2374. print("unique_temp_file_process:", traceback.print_exc())
  2375. return [-1]
  2376. finally:
  2377. print("======================================")
  2378. print("File md5:", getMDFFromFile(file_path))
  2379. try:
  2380. if get_platform() == "Linux":
  2381. # 删除该唯一空间下所有文件
  2382. if os.path.exists(unique_space_path):
  2383. shutil.rmtree(unique_space_path)
  2384. print()
  2385. except Exception as e:
  2386. logging.info("Delete Files Failed!")
  2387. # print("Delete Files Failed!")
  2388. return [-1]
  2389. print("Finally")
  2390. # to_html(_path + "6.html", text[0])
  2391. # to_html(unique_space_path + "result.html", text[0])
  2392. # return text
  2393. logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  2394. logger = logging.getLogger(__name__)
  2395. def log(msg):
  2396. """
  2397. @summary:打印信息
  2398. """
  2399. logger.info(msg)
  2400. def cut_str(text_list, only_text_list, max_bytes_length=2000000):
  2401. logging.info("into cut_str")
  2402. try:
  2403. # 计算有格式总字节数
  2404. bytes_length = 0
  2405. for text in text_list:
  2406. bytes_length += len(bytes(text, encoding='utf-8'))
  2407. print("text_list", bytes_length)
  2408. # 小于直接返回
  2409. if bytes_length < max_bytes_length:
  2410. print("return text_list no cut")
  2411. return text_list
  2412. # 全部文件连接,重新计算无格式字节数
  2413. all_text = ""
  2414. bytes_length = 0
  2415. for text in only_text_list:
  2416. bytes_length += len(bytes(text, encoding='utf-8'))
  2417. all_text += text
  2418. # print("only_text_list", bytes_length)
  2419. # 小于直接返回
  2420. if bytes_length < max_bytes_length:
  2421. print("return only_text_list no cut")
  2422. return only_text_list
  2423. # 截取字符
  2424. all_text = all_text[:int(max_bytes_length/3)]
  2425. print("text bytes ", len(bytes(all_text, encoding='utf-8')))
  2426. print("return only_text_list has cut")
  2427. return [all_text]
  2428. except Exception as e:
  2429. logging.info("cut_str " + str(e))
  2430. return ["-1"]
  2431. @get_memory_info.memory_decorator
  2432. def convert(data, ocr_model, otr_model):
  2433. """
  2434. 接口返回值:
  2435. {[str], 1}: 处理成功
  2436. {[-1], 0}: 逻辑处理错误
  2437. {[-2], 0}: 接口调用错误
  2438. {[-3], 1}: 文件格式错误,无法打开
  2439. {[-4], 0}: 各类文件调用第三方包读取超时
  2440. {[-5], 0}: 整个转换过程超时
  2441. {[-6], 0}: 阿里云UDF队列超时
  2442. {[-7], 1}: 文件需密码,无法打开
  2443. :return: {"result": [], "is_success": int}
  2444. """
  2445. # 控制内存
  2446. # soft, hard = resource.getrlimit(resource.RLIMIT_AS)
  2447. # resource.setrlimit(resource.RLIMIT_AS, (15 * 1024 ** 3, hard))
  2448. logging.info("into convert")
  2449. start_time = time.time()
  2450. try:
  2451. # 模型加入全局变量
  2452. globals().update({"global_ocr_model": ocr_model})
  2453. globals().update({"global_otr_model": otr_model})
  2454. stream = base64.b64decode(data.get("file"))
  2455. _type = data.get("type")
  2456. if get_platform() == "Windows":
  2457. # 解除超时装饰器,直接访问原函数
  2458. origin_unique_temp_file_process = unique_temp_file_process.__wrapped__
  2459. text = origin_unique_temp_file_process(stream, _type)
  2460. else:
  2461. # Linux 通过装饰器设置整个转换超时时间
  2462. try:
  2463. text = unique_temp_file_process(stream, _type)
  2464. except TimeoutError:
  2465. logging.info("convert time out! 1200 sec")
  2466. text = [-5]
  2467. # if text == [-1]:
  2468. # print({"failed result": [-1], "is_success": 0}, time.time() - start_time)
  2469. # return {"result_html": ["-1"], "result_text": ["-1"], "is_success": 0}
  2470. # if text == [-2]:
  2471. # print({"failed result": [-2], "is_success": 0}, time.time() - start_time)
  2472. # return {"result_html": ["-2"], "result_text": ["-2"], "is_success": 0}
  2473. # if text == [-3]:
  2474. # print({"failed result": [-3], "is_success": 1}, time.time() - start_time)
  2475. # return {"result_html": ["-3"], "result_text": ["-3"], "is_success": 1}
  2476. # if text == [-4]:
  2477. # print({"failed result": [-4], "is_success": 0}, time.time() - start_time)
  2478. # return {"result_html": ["-4"], "result_text": ["-4"], "is_success": 0}
  2479. # if text == [-5]:
  2480. # print({"failed result": [-5], "is_success": 0}, time.time() - start_time)
  2481. # return {"result_html": ["-5"], "result_text": ["-5"], "is_success": 0}
  2482. # if text == [-7]:
  2483. # print({"failed result": [-7], "is_success": 1}, time.time() - start_time)
  2484. # return {"result_html": ["-7"], "result_text": ["-7"], "is_success": 1}
  2485. # if text == [-8]:
  2486. # print({"failed result": [-8], "is_success": 0}, time.time() - start_time)
  2487. # return {"result_html": ["-8"], "result_text": ["-8"], "is_success": 1}
  2488. error_code = [[-x] for x in range(1, 9)]
  2489. still_success_code = [[-3], [-7]]
  2490. if text in error_code:
  2491. if text in still_success_code:
  2492. print({"failed result": text, "is_success": 1}, time.time() - start_time)
  2493. return {"result_html": [str(text[0])], "result_text": [str(text[0])],
  2494. "is_success": 1}
  2495. else:
  2496. print({"failed result": text, "is_success": 0}, time.time() - start_time)
  2497. return {"result_html": [str(text[0])], "result_text": [str(text[0])],
  2498. "is_success": 0}
  2499. # 结果保存result.html
  2500. # if get_platform() == "Windows":
  2501. text_str = ""
  2502. for t in text:
  2503. text_str += t
  2504. to_html("../result.html", text_str)
  2505. # 取纯文本
  2506. only_text = []
  2507. for t in text:
  2508. new_t = BeautifulSoup(t, "lxml").get_text()
  2509. new_t = re.sub("\n", "", new_t)
  2510. only_text.append(new_t)
  2511. # 判断长度,过长截取
  2512. text = cut_str(text, only_text)
  2513. only_text = cut_str(only_text, only_text)
  2514. if len(only_text) == 0:
  2515. only_text = [""]
  2516. if only_text[0] == '' and len(only_text) <= 1:
  2517. print({"finished result": ["", 0], "is_success": 1}, time.time() - start_time)
  2518. else:
  2519. print({"finished result": [str(only_text)[:20], len(str(text))],
  2520. "is_success": 1}, time.time() - start_time)
  2521. return {"result_html": text, "result_text": only_text, "is_success": 1}
  2522. except Exception as e:
  2523. print({"failed result": [-1], "is_success": 0}, time.time() - start_time)
  2524. print("convert", traceback.print_exc())
  2525. return {"result_html": ["-1"], "result_text": ["-1"], "is_success": 0}
  2526. global_type = ""
  2527. local_url = "http://127.0.0.1"
  2528. if get_platform() == "Windows":
  2529. _path = os.path.abspath(os.path.dirname(__file__))
  2530. else:
  2531. _path = "/home/admin"
  2532. if not os.path.exists(_path):
  2533. _path = os.path.dirname(os.path.abspath(__file__))
  2534. if __name__ == '__main__':
  2535. if get_platform() == "Windows":
  2536. file_path = "C:/Users/Administrator/Desktop/error8.pdf"
  2537. # file_path = "D:/BIDI_DOC/比地_文档/2022/Test_Interface/1622529434414.rar"
  2538. # file_path = "D:/BIDI_DOC/比地_文档/2022/Test_ODPS/1625369915229.zip"
  2539. else:
  2540. file_path = "1.doc"
  2541. with open(file_path, "rb") as f:
  2542. file_bytes = f.read()
  2543. file_base64 = base64.b64encode(file_bytes)
  2544. data = {"file": file_base64, "type": file_path.split(".")[-1], "filemd5": 100}
  2545. ocr_model = ocr_interface.OcrModels().get_model()
  2546. otr_model = otr_interface.OtrModels().get_model()
  2547. result = convert(data, ocr_model, otr_model)