convert_docx.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627
  1. import os
  2. import sys
  3. sys.path.append(os.path.dirname(__file__) + "/../")
  4. from format_convert.convert_tree import _Document, _Sentence, _Page, _Image, _Table
  5. import re
  6. import traceback
  7. import xml
  8. import zipfile
  9. import docx
  10. from bs4 import BeautifulSoup
  11. from format_convert.utils import judge_error_code, add_div, get_logger, log, memory_decorator, get_garble_code
  12. from format_convert.wrapt_timeout_decorator import timeout
  13. from format_convert.convert_image import ImageConvert
  14. from format_convert.convert_need_interface import from_tika_interface
  15. def docx2text():
  16. return
  17. def read_rel_image(document_xml_rels):
  18. if not document_xml_rels:
  19. return {}
  20. # 获取映射文件里的关系 Id-Target
  21. image_rel_dict = {}
  22. for rel in document_xml_rels:
  23. if 'Relationship' in str(rel):
  24. _id = rel.get("Id")
  25. _target = rel.get("Target")
  26. _type = rel.get("Type")
  27. if 'image' in _type:
  28. image_rel_dict[_id] = _target
  29. return image_rel_dict
  30. def read_no_start(numbering_xml):
  31. """
  32. 读取编号组的起始值
  33. :return:
  34. """
  35. if not numbering_xml:
  36. return {}, {}
  37. # 获取虚拟-真实id映射关系
  38. w_num_list = numbering_xml.getElementsByTagName("w:num")
  39. abstract_real_id_dict = {}
  40. for w_num in w_num_list:
  41. w_num_id = w_num.getAttribute("w:numId")
  42. w_abstract_num_id = w_num.getElementsByTagName('w:abstractNumId')[0].getAttribute("w:val")
  43. abstract_real_id_dict[w_abstract_num_id] = w_num_id
  44. # 获取虚拟id的开始编号
  45. w_abstract_num_list = numbering_xml.getElementsByTagName("w:abstractNum")
  46. abstract_id_level_dict = {}
  47. abstract_id_level_text_dict = {}
  48. for w_abstract_num in w_abstract_num_list:
  49. w_abstract_num_id = w_abstract_num.getAttribute("w:abstractNumId")
  50. w_lvl_list = w_abstract_num.getElementsByTagName("w:lvl")
  51. level_start_dict = {}
  52. level_text_dict = {}
  53. for w_lvl in w_lvl_list:
  54. w_ilvl_value = w_lvl.getAttribute('w:ilvl')
  55. if w_lvl.getElementsByTagName("w:start"):
  56. w_ilvl_start_num = w_lvl.getElementsByTagName("w:start")[0].getAttribute("w:val")
  57. level_start_dict[int(w_ilvl_value)] = int(w_ilvl_start_num)
  58. if w_lvl.getElementsByTagName("w:lvlText") and w_lvl.getElementsByTagName("w:numFmt"):
  59. w_lvl_text = w_lvl.getElementsByTagName("w:lvlText")[0].getAttribute("w:val")
  60. w_lvl_format = w_lvl.getElementsByTagName("w:numFmt")[0].getAttribute("w:val")
  61. if w_lvl_format == 'upperLetter':
  62. w_lvl_text = re.sub('%\d', '%A', w_lvl_text)
  63. elif w_lvl_format == 'lowerLetter':
  64. w_lvl_text = re.sub('%\d', '%a', w_lvl_text)
  65. level_text_dict[int(w_ilvl_value)] = w_lvl_text
  66. abstract_id_level_dict[w_abstract_num_id] = level_start_dict
  67. abstract_id_level_text_dict[w_abstract_num_id] = level_text_dict
  68. # 映射回真实id
  69. real_id_level_start_dict = {}
  70. for abstract_id in abstract_real_id_dict.keys():
  71. real_id = abstract_real_id_dict.get(abstract_id)
  72. level_start_dict = abstract_id_level_dict.get(abstract_id)
  73. if level_start_dict:
  74. real_id_level_start_dict[int(real_id)] = level_start_dict
  75. real_id_level_text_dict = {}
  76. for abstract_id in abstract_real_id_dict.keys():
  77. real_id = abstract_real_id_dict.get(abstract_id)
  78. level_text_dict = abstract_id_level_text_dict.get(abstract_id)
  79. if level_text_dict:
  80. real_id_level_text_dict[int(real_id)] = level_text_dict
  81. return real_id_level_start_dict, real_id_level_text_dict
  82. def read_p_text(unique_type_dir, p_node, _last_node_level, _num_pr_dict, numbering_xml, document_xml_rels,
  83. is_sdt=False):
  84. """
  85. 读取w:p下的文本,包括编号
  86. :param unique_type_dir:
  87. :param p_node:
  88. :param _last_node_level:
  89. :param _num_pr_dict:
  90. :param numbering_xml:
  91. :param document_xml_rels:
  92. :param is_sdt:
  93. :return:
  94. """
  95. _text_list = []
  96. _order_list = []
  97. # 文本的编号(如果有编号的话)
  98. text_no = ''
  99. # 获取编号组的起始值和编号组的展示形式
  100. id_level_start_dict, id_level_text_dict = read_no_start(numbering_xml)
  101. # print('_num_pr_dict', _num_pr_dict)
  102. # 提取编号 组-层级-序号
  103. num_pr = p_node.getElementsByTagName("w:numPr")
  104. if num_pr:
  105. num_pr = num_pr[0]
  106. if num_pr.getElementsByTagName("w:numId"):
  107. group_id = int(num_pr.getElementsByTagName("w:numId")[0].getAttribute("w:val"))
  108. if group_id >= 1:
  109. node_level = num_pr.getElementsByTagName("w:ilvl")
  110. if node_level:
  111. node_level = int(node_level[0].getAttribute("w:val"))
  112. # print('group_id', group_id, 'node_level', node_level, 'last_node_level', _last_node_level)
  113. if group_id in _num_pr_dict.keys():
  114. # if node_level == 0 and node_level not in _num_pr_dict[group_id].keys():
  115. if node_level == 0 and _num_pr_dict.get(group_id) and node_level not in _num_pr_dict.get(group_id).keys():
  116. _num_pr_dict[group_id][node_level] = 1
  117. if _last_node_level != 0 and node_level < _last_node_level:
  118. # print('重置', 'group_id', group_id, 'last_node_level', last_node_level)
  119. # 需循环重置node_level到last_node_level之间的level
  120. for l in range(node_level+1, _last_node_level+1):
  121. _num_pr_dict[group_id][l] = 0
  122. if _num_pr_dict[group_id].get(node_level):
  123. _num_pr_dict[group_id][node_level] += 1
  124. else:
  125. pass
  126. # print('group_id, node_level', group_id, node_level)
  127. # elif node_level in _num_pr_dict[group_id].keys():
  128. elif node_level in _num_pr_dict.get(group_id).keys():
  129. _num_pr_dict[group_id][node_level] += 1
  130. else:
  131. _num_pr_dict[group_id][node_level] = 1
  132. else:
  133. _num_pr_dict[group_id] = {node_level: 1}
  134. # print(num_pr_dict[group_id])
  135. for level in range(node_level+1):
  136. # 当前level下有多少个node
  137. # if level not in _num_pr_dict[group_id]:
  138. if level not in _num_pr_dict.get(group_id):
  139. # if level not in id_level_start_dict[group_id]:
  140. if not id_level_start_dict.get(group_id) or level not in id_level_start_dict.get(group_id):
  141. continue
  142. else:
  143. level_node_cnt = id_level_start_dict[group_id][level]
  144. else:
  145. level_node_cnt = _num_pr_dict[group_id][level]
  146. if id_level_start_dict.get(group_id) and _num_pr_dict.get(group_id) and id_level_start_dict.get(group_id).get(level) and _num_pr_dict.get(group_id).get(level):
  147. start_no = id_level_start_dict.get(group_id).get(level)
  148. level_node_cnt += start_no - 1
  149. level_text = None
  150. if id_level_text_dict.get(group_id) and id_level_text_dict.get(group_id).get(level) and _num_pr_dict.get(group_id).get(level):
  151. level_text = id_level_text_dict.get(group_id).get(level)
  152. # print('level_node_cnt', level_node_cnt)
  153. if level_text:
  154. if re.search('a', level_text):
  155. level_node_cnt = chr(ord('a') + level_node_cnt - 1)
  156. text_no += re.sub('%a', str(level_node_cnt), level_text)
  157. elif re.search('A', level_text):
  158. level_node_cnt = chr(ord('A') + level_node_cnt - 1)
  159. text_no += re.sub('%A', str(level_node_cnt), level_text)
  160. else:
  161. text_no += re.sub('%\d', str(level_node_cnt), level_text)
  162. else:
  163. text_no += str(level_node_cnt) + '.'
  164. # print('text_no', text_no)
  165. _last_node_level = node_level
  166. # text = p_node.getElementsByTagName("w:t")
  167. # picture = p_node.getElementsByTagName("wp:docPr")
  168. # if text:
  169. # _order_list.append("w:t")
  170. # temp_text = ""
  171. # if is_sdt and len(text) == 2:
  172. # if len(text[0].childNodes) > 0 and len(text[1].childNodes) > 0:
  173. # temp_text += text[0].childNodes[0].nodeValue + '.'*20 + text[1].childNodes[0].nodeValue
  174. # else:
  175. # for t in text:
  176. # if len(t.childNodes) > 0:
  177. # temp_text += t.childNodes[0].nodeValue
  178. # else:
  179. # continue
  180. # if text_no:
  181. # temp_text = text_no + ' ' + temp_text
  182. # _text_list.append(temp_text)
  183. # # 只有序号
  184. # elif len(text_no) >= 2:
  185. # _text_list.append(text_no[:-1])
  186. #
  187. # if picture:
  188. # _order_list.append("wp:docPr")
  189. #
  190. # for line1 in p_node.childNodes:
  191. # if "w:r" in str(line1):
  192. # picture1 = line1.getElementsByTagName("w:pict")
  193. # if picture1:
  194. # _order_list.append("wp:docPr")
  195. p_node_text = ''
  196. has_html = False
  197. # 编号先加上
  198. if text_no:
  199. p_node_text += text_no
  200. text = p_node.getElementsByTagName("w:t")
  201. # 目录页单特殊生成
  202. if is_sdt and len(text) == 2:
  203. p_node_text += text[0].childNodes[0].nodeValue + '.'*20 + text[1].childNodes[0].nodeValue
  204. # 正常页面
  205. else:
  206. image_rel_dict = read_rel_image(document_xml_rels)
  207. p_node_all = p_node.getElementsByTagName("*")
  208. for node in p_node_all:
  209. # 文本
  210. if "w:t" in str(node).split(' '):
  211. if node.childNodes:
  212. p_node_text += node.childNodes[0].nodeValue
  213. # 图片,提前识别,不做成Image对象放入Page了
  214. elif "a:blip" in str(node).split(' '):
  215. _id = node.getAttribute("r:embed")
  216. image_path = image_rel_dict.get(_id)
  217. if image_path:
  218. image_path = unique_type_dir + 'word/' + image_path
  219. image_convert = ImageConvert(image_path, '')
  220. image_html = image_convert.get_html()[0]
  221. if isinstance(image_html, int):
  222. image_html = ''
  223. p_node_text += image_html
  224. has_html = True
  225. # 只有编号
  226. if len(p_node_text) > 0 and p_node_text == text_no:
  227. p_node_text = p_node_text[:-1]
  228. _text_list.append(p_node_text)
  229. if has_html:
  230. _order_list.append('w:t html')
  231. else:
  232. _order_list.append('w:t')
  233. return _text_list, _order_list, _num_pr_dict, _last_node_level
  234. @timeout(50, timeout_exception=TimeoutError)
  235. def read_xml_order(unique_type_dir, document_xml, numbering_xml, document_xml_rels):
  236. log("into read_xml_order")
  237. try:
  238. body = document_xml.getElementsByTagName("w:body")[0]
  239. order_list = []
  240. text_list = []
  241. # 编号组记录
  242. num_pr_dict = {}
  243. last_node_level = 0
  244. for line in body.childNodes:
  245. # 普通文本
  246. if "w:p" in str(line):
  247. t_list, o_list, num_pr_dict, last_node_level = read_p_text(unique_type_dir,
  248. line,
  249. last_node_level,
  250. num_pr_dict,
  251. numbering_xml,
  252. document_xml_rels)
  253. text_list += t_list
  254. order_list += o_list
  255. # 目录索引
  256. elif "w:sdt" in str(line):
  257. sdt = line
  258. for sdt_child in sdt.childNodes:
  259. if "w:sdtContent" in str(sdt_child):
  260. sdt_content = sdt_child
  261. for sdt_content_child in sdt_content.childNodes:
  262. if 'w:p' in str(sdt_content_child):
  263. t_list, o_list, num_pr_dict, last_node_level = read_p_text(unique_type_dir,
  264. sdt_content_child,
  265. last_node_level,
  266. num_pr_dict,
  267. numbering_xml,
  268. document_xml_rels,
  269. is_sdt=True)
  270. text_list += t_list
  271. order_list += o_list
  272. elif "w:tbl" in str(line):
  273. order_list.append("w:tbl")
  274. # read_xml_table(path, save_path)
  275. return [order_list, text_list]
  276. except Exception as e:
  277. log("read_xml_order error!")
  278. traceback.print_exc()
  279. return [-1]
  280. @timeout(50, timeout_exception=TimeoutError)
  281. def read_xml_table(unique_type_dir, document_xml, numbering_xml, document_xml_rels):
  282. def recursion_read_table(table):
  283. table_text = '<table border="1">'
  284. tr_index = 0
  285. tr_text_list = []
  286. last_node_level = 0
  287. num_pr_dict = {}
  288. # 直接子节点用child表示,所有子节点用all表示
  289. for table_child in table.childNodes:
  290. if 'w:tr' in str(table_child):
  291. table_text += "<tr>"
  292. tr = table_child
  293. tr_child_nodes = tr.childNodes
  294. tc_index = 0
  295. tc_text_list = []
  296. for tr_child in tr_child_nodes:
  297. if 'w:tc' in str(tr_child).split(' '):
  298. tc_text = ""
  299. tc = tr_child
  300. # 获取一格占多少列,相当于colspan
  301. col_span = tc.getElementsByTagName("w:gridSpan")
  302. if col_span:
  303. col_span = int(col_span[0].getAttribute("w:val"))
  304. else:
  305. col_span = 1
  306. # 获取是否是合并单元格的下一个空单元格,相当于rowspan
  307. is_merge = tc.getElementsByTagName("w:vMerge")
  308. if is_merge:
  309. is_merge = is_merge[0].getAttribute("w:val")
  310. if is_merge == "continue":
  311. col_span_index = 0
  312. real_tc_index = 0
  313. if 0 <= tr_index - 1 < len(tr_text_list):
  314. for tc_colspan in tr_text_list[tr_index - 1]:
  315. if col_span_index < tc_index:
  316. col_span_index += tc_colspan[1]
  317. real_tc_index += 1
  318. if real_tc_index < len(tr_text_list[tr_index - 1]):
  319. tc_text = tr_text_list[tr_index - 1][real_tc_index][0]
  320. # 设置colspan
  321. table_text = table_text + "<td colspan=" + str(col_span) + ">"
  322. # 放入文本
  323. tc_child_nodes = tc.childNodes
  324. for tc_child in tc_child_nodes:
  325. if 'w:tbl' in str(tc_child).split(' '):
  326. # 嵌套在tc中的表格
  327. tc_text += recursion_read_table(tc_child)
  328. if 'w:p' in str(tc_child).split(' '):
  329. tc_p_all_nodes = tc_child.getElementsByTagName("*")
  330. _t_list, _, num_pr_dict, last_node_level = read_p_text(unique_type_dir,
  331. tc_child,
  332. last_node_level,
  333. num_pr_dict,
  334. numbering_xml,
  335. document_xml_rels)
  336. # print('_t_list', _t_list)
  337. tc_text += ''.join(_t_list)
  338. # for tc_p_all in tc_p_all_nodes:
  339. # if 'w:t' in str(tc_p_all).split(' '):
  340. # # w:t必须加childNodes[0]才能读文本
  341. # tc_text += tc_p_all.childNodes[0].nodeValue
  342. # 结束该tc
  343. table_text = table_text + tc_text + "</td>"
  344. tc_index += 1
  345. tc_text_list.append([tc_text, col_span])
  346. # 结束该tr
  347. table_text += "</tr>"
  348. tr_index += 1
  349. tr_text_list.append(tc_text_list)
  350. # 结束该table
  351. table_text += "</table>"
  352. return table_text
  353. log("into read_xml_table")
  354. try:
  355. body = document_xml.getElementsByTagName("w:body")[0]
  356. table_text_list = []
  357. body_nodes = body.childNodes
  358. for node in body_nodes:
  359. if 'w:tbl' in str(node).split(' '):
  360. _table = node
  361. _table_text = recursion_read_table(_table)
  362. table_text_list.append(_table_text)
  363. return table_text_list
  364. except Exception as e:
  365. log("read_xml_table error")
  366. print("read_xml_table", traceback.print_exc())
  367. return [-1]
  368. @timeout(25, timeout_exception=TimeoutError)
  369. def parse_xml(path):
  370. # 解析xml
  371. DOMTree = xml.dom.minidom.parse(path)
  372. collection = DOMTree.documentElement
  373. return collection
  374. @timeout(25, timeout_exception=TimeoutError)
  375. def parse_xml2(path):
  376. # 解析xml
  377. tree = xml.etree.ElementTree.parse(path)
  378. root = tree.getroot()
  379. return root
  380. class DocxConvert:
  381. def __init__(self, path, unique_type_dir):
  382. self._doc = _Document(path)
  383. self.path = path
  384. self.unique_type_dir = unique_type_dir
  385. # 解压docx
  386. try:
  387. f = zipfile.ZipFile(path)
  388. for file in f.namelist():
  389. if "word/" in str(file):
  390. f.extract(file, self.unique_type_dir)
  391. f.close()
  392. except Exception as e:
  393. log("docx format error!")
  394. self._doc.error_code = [-3]
  395. # 读取内容
  396. try:
  397. self.document_xml = parse_xml(self.unique_type_dir + "word/document.xml")
  398. if os.path.exists(self.unique_type_dir + "word/numbering.xml"):
  399. self.numbering_xml = parse_xml(self.unique_type_dir + "word/numbering.xml")
  400. else:
  401. self.numbering_xml = []
  402. if os.path.exists(self.unique_type_dir + "word/_rels/document.xml.rels"):
  403. self.document_xml_rels = parse_xml2(self.unique_type_dir + "word/_rels/document.xml.rels")
  404. else:
  405. self.document_xml_rels = []
  406. except FileNotFoundError:
  407. # 找不到解压文件,就用html格式读
  408. log('FileNotFoundError')
  409. self._doc.error_code = None
  410. except TimeoutError:
  411. log("parse_xml timeout")
  412. self._doc.error_code = [-4]
  413. @memory_decorator
  414. def init_package(self):
  415. # 各个包初始化
  416. try:
  417. self.docx = docx.Document(self.path)
  418. self.zip = zipfile.ZipFile(self.path)
  419. except:
  420. log("cannot open docx!")
  421. traceback.print_exc()
  422. self._doc.error_code = [-3]
  423. def convert(self):
  424. self._page = _Page(None, 0)
  425. # 先判断特殊doc文件,可能是html文本
  426. is_html_doc = False
  427. try:
  428. with open(self.path, 'r') as f:
  429. html_str = f.read()
  430. if re.search('<div|<html|<body|<head|<tr|<br|<table|<td', html_str):
  431. soup = BeautifulSoup(html_str, 'lxml')
  432. text = soup.text
  433. is_html_doc = True
  434. except:
  435. pass
  436. if is_html_doc:
  437. _sen = _Sentence(text, (0, 0, 0, 0))
  438. self._page.add_child(_sen)
  439. self._doc.add_child(self._page)
  440. return
  441. self.init_package()
  442. if self._doc.error_code is not None:
  443. return
  444. order_and_text_list = self.get_orders()
  445. if judge_error_code(order_and_text_list):
  446. self._doc.error_code = order_and_text_list
  447. return
  448. order_list, text_list = order_and_text_list
  449. # 乱码返回文件格式错误
  450. match1 = re.findall(get_garble_code(), ''.join(text_list))
  451. if len(match1) > 10:
  452. log("doc/docx garbled code!")
  453. self._doc.error_code = [-3]
  454. # _sen = _Sentence('文件乱码!', (0, 0, 0, 0))
  455. # self._page.add_child(_sen)
  456. self._doc.add_child(self._page)
  457. return
  458. # test
  459. # for i in range(len(text_list)):
  460. # print(order_list[i], text_list[i])
  461. table_list = self.get_tables()
  462. if judge_error_code(table_list):
  463. self._doc.error_code = table_list
  464. return
  465. # paragraph_list = self.get_paragraphs()
  466. image_list = self.get_images()
  467. order_y = 0
  468. doc_pr_cnt = 0
  469. for tag in order_list:
  470. bbox = (0, order_y, 0, 0)
  471. if tag == "w:t html":
  472. if len(text_list) > 0:
  473. _para = text_list.pop(0)
  474. _sen = _Sentence(_para, bbox)
  475. _sen.combine = False
  476. _sen.is_html = True
  477. self._page.add_child(_sen)
  478. if tag == "w:t":
  479. if len(text_list) > 0:
  480. _para = text_list.pop(0)
  481. _sen = _Sentence(_para, bbox)
  482. _sen.combine = False
  483. self._page.add_child(_sen)
  484. if tag == "wp:docPr":
  485. if len(image_list) > 0:
  486. temp_image_path = self.unique_type_dir + "docpr" + str(doc_pr_cnt) + ".png"
  487. _image = image_list.pop(0)
  488. with open(temp_image_path, "wb") as f:
  489. f.write(_image)
  490. _img = _Image(_image, temp_image_path, bbox)
  491. _img.is_from_docx = True
  492. self._page.add_child(_img)
  493. doc_pr_cnt += 1
  494. if tag == "w:tbl":
  495. if len(table_list) > 0:
  496. _table = table_list.pop(0)
  497. _table = _Table(_table, bbox)
  498. _table.is_html = True
  499. self._page.add_child(_table)
  500. order_y += 1
  501. if self._doc.error_code is None and self._page.error_code is not None:
  502. self._doc.error_code = self._page.error_code
  503. self._doc.add_child(self._page)
  504. @memory_decorator
  505. def get_tables(self):
  506. # 遍历表
  507. table_list = read_xml_table(self.unique_type_dir, self.document_xml, self.numbering_xml, self.document_xml_rels)
  508. return table_list
  509. def get_images(self):
  510. # 顺序遍历图片
  511. image_list = []
  512. pattern = re.compile('rId\d+')
  513. for graph in self.docx.paragraphs:
  514. for run in graph.runs:
  515. if run.text == '':
  516. try:
  517. if not pattern.search(run.element.xml):
  518. continue
  519. content_id = pattern.search(run.element.xml).group(0)
  520. content_type = self.docx.part.related_parts[content_id].content_type
  521. except Exception as e:
  522. print("docx no image!", e)
  523. continue
  524. if not content_type.startswith('image'):
  525. continue
  526. img_data = self.docx.part.related_parts[content_id].blob
  527. if img_data is not None:
  528. image_list.append(img_data)
  529. return image_list
  530. @memory_decorator
  531. def get_orders(self):
  532. # 解析document.xml,获取文字顺序
  533. order_and_text_list = read_xml_order(self.unique_type_dir, self.document_xml, self.numbering_xml, self.document_xml_rels)
  534. return order_and_text_list
  535. def get_doc_object(self):
  536. return self._doc
  537. def get_html(self):
  538. if self._doc.error_code is not None:
  539. return self._doc.error_code
  540. try:
  541. self.convert()
  542. except:
  543. traceback.print_exc()
  544. self._doc.error_code = [-1]
  545. # log('docx error code ' + str(self._doc.error_code))
  546. if self._doc.error_code is not None:
  547. # 调用tika提取
  548. html = from_tika_interface(self.path)
  549. if judge_error_code(html):
  550. self._doc.error_code = html
  551. return self._doc.error_code
  552. else:
  553. return [html]
  554. return self._doc.get_html()
  555. if __name__ == '__main__':
  556. c = DocxConvert("C:/Users/Administrator/Downloads/1631944542835.docx", "C:/Users/Administrator/Downloads/1/")
  557. print(c.get_html())