convert_docx.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. import os
  2. import sys
  3. sys.path.append(os.path.dirname(__file__) + "/../")
  4. from format_convert.convert_tree import _Document, _Sentence, _Page, _Image, _Table
  5. import re
  6. import traceback
  7. import xml
  8. import zipfile
  9. import docx
  10. from bs4 import BeautifulSoup
  11. from format_convert.utils import judge_error_code, add_div, get_logger, log, memory_decorator, get_garble_code
  12. from format_convert.wrapt_timeout_decorator import timeout
  13. from format_convert.convert_image import ImageConvert
  14. def docx2text():
  15. return
  16. def read_rel_image(document_xml_rels):
  17. if not document_xml_rels:
  18. return {}
  19. # 获取映射文件里的关系 Id-Target
  20. image_rel_dict = {}
  21. for rel in document_xml_rels:
  22. if 'Relationship' in str(rel):
  23. _id = rel.get("Id")
  24. _target = rel.get("Target")
  25. _type = rel.get("Type")
  26. if 'image' in _type:
  27. image_rel_dict[_id] = _target
  28. return image_rel_dict
  29. def read_no_start(numbering_xml):
  30. """
  31. 读取编号组的起始值
  32. :return:
  33. """
  34. if not numbering_xml:
  35. return {}
  36. # 获取虚拟-真实id映射关系
  37. w_num_list = numbering_xml.getElementsByTagName("w:num")
  38. abstract_real_id_dict = {}
  39. for w_num in w_num_list:
  40. w_num_id = w_num.getAttribute("w:numId")
  41. w_abstract_num_id = w_num.getElementsByTagName('w:abstractNumId')[0].getAttribute("w:val")
  42. abstract_real_id_dict[w_abstract_num_id] = w_num_id
  43. # 获取虚拟id的开始编号
  44. w_abstract_num_list = numbering_xml.getElementsByTagName("w:abstractNum")
  45. abstract_id_level_dict = {}
  46. for w_abstract_num in w_abstract_num_list:
  47. w_abstract_num_id = w_abstract_num.getAttribute("w:abstractNumId")
  48. w_lvl_list = w_abstract_num.getElementsByTagName("w:lvl")
  49. level_start_dict = {}
  50. for w_lvl in w_lvl_list:
  51. w_ilvl_value = w_lvl.getAttribute('w:ilvl')
  52. if w_lvl.getElementsByTagName("w:start"):
  53. w_ilvl_start_num = w_lvl.getElementsByTagName("w:start")[0].getAttribute("w:val")
  54. level_start_dict[int(w_ilvl_value)] = int(w_ilvl_start_num)
  55. abstract_id_level_dict[w_abstract_num_id] = level_start_dict
  56. # 映射回真实id
  57. real_id_level_start_dict = {}
  58. for abstract_id in abstract_real_id_dict.keys():
  59. real_id = abstract_real_id_dict.get(abstract_id)
  60. level_start_dict = abstract_id_level_dict.get(abstract_id)
  61. if level_start_dict:
  62. real_id_level_start_dict[int(real_id)] = level_start_dict
  63. return real_id_level_start_dict
  64. def read_p_text(unique_type_dir, p_node, _last_node_level, _num_pr_dict, numbering_xml, document_xml_rels,
  65. is_sdt=False):
  66. """
  67. 读取w:p下的文本,包括编号
  68. :param unique_type_dir:
  69. :param p_node:
  70. :param _last_node_level:
  71. :param _num_pr_dict:
  72. :param numbering_xml:
  73. :param document_xml_rels:
  74. :param is_sdt:
  75. :return:
  76. """
  77. _text_list = []
  78. _order_list = []
  79. # 文本的编号(如果有编号的话)
  80. text_no = ''
  81. # 获取编号组的起始值
  82. id_level_start_dict = read_no_start(numbering_xml)
  83. # print('_num_pr_dict', _num_pr_dict)
  84. # 提取编号 组-层级-序号
  85. num_pr = p_node.getElementsByTagName("w:numPr")
  86. if num_pr:
  87. num_pr = num_pr[0]
  88. if num_pr.getElementsByTagName("w:numId"):
  89. group_id = int(num_pr.getElementsByTagName("w:numId")[0].getAttribute("w:val"))
  90. if group_id >= 1:
  91. node_level = num_pr.getElementsByTagName("w:ilvl")
  92. if node_level:
  93. node_level = int(node_level[0].getAttribute("w:val"))
  94. # print('group_id', group_id, 'node_level', node_level, 'last_node_level', _last_node_level)
  95. if group_id in _num_pr_dict.keys():
  96. if node_level == 0 and node_level not in _num_pr_dict[group_id].keys():
  97. _num_pr_dict[group_id][node_level] = 1
  98. if _last_node_level != 0 and node_level < _last_node_level:
  99. # print('重置', 'group_id', group_id, 'last_node_level', last_node_level)
  100. # 需循环重置node_level到last_node_level之间的level
  101. for l in range(node_level+1, _last_node_level+1):
  102. _num_pr_dict[group_id][l] = 0
  103. if _num_pr_dict[group_id].get(node_level):
  104. _num_pr_dict[group_id][node_level] += 1
  105. else:
  106. pass
  107. # print('group_id, node_level', group_id, node_level)
  108. elif node_level in _num_pr_dict[group_id].keys():
  109. _num_pr_dict[group_id][node_level] += 1
  110. else:
  111. _num_pr_dict[group_id][node_level] = 1
  112. else:
  113. _num_pr_dict[group_id] = {node_level: 1}
  114. # print(num_pr_dict[group_id])
  115. for level in range(node_level+1):
  116. # 当前level下有多少个node
  117. if level not in _num_pr_dict[group_id]:
  118. if level not in id_level_start_dict[group_id]:
  119. continue
  120. else:
  121. level_node_cnt = id_level_start_dict[group_id][level]
  122. else:
  123. level_node_cnt = _num_pr_dict[group_id][level]
  124. if id_level_start_dict.get(group_id) and id_level_start_dict.get(group_id).get(level) and _num_pr_dict.get(group_id).get(level):
  125. start_no = id_level_start_dict.get(group_id).get(level)
  126. level_node_cnt += start_no - 1
  127. # print('level_node_cnt', level_node_cnt)
  128. text_no += str(level_node_cnt) + '.'
  129. # print('text_no', text_no)
  130. _last_node_level = node_level
  131. # text = p_node.getElementsByTagName("w:t")
  132. # picture = p_node.getElementsByTagName("wp:docPr")
  133. # if text:
  134. # _order_list.append("w:t")
  135. # temp_text = ""
  136. # if is_sdt and len(text) == 2:
  137. # if len(text[0].childNodes) > 0 and len(text[1].childNodes) > 0:
  138. # temp_text += text[0].childNodes[0].nodeValue + '.'*20 + text[1].childNodes[0].nodeValue
  139. # else:
  140. # for t in text:
  141. # if len(t.childNodes) > 0:
  142. # temp_text += t.childNodes[0].nodeValue
  143. # else:
  144. # continue
  145. # if text_no:
  146. # temp_text = text_no + ' ' + temp_text
  147. # _text_list.append(temp_text)
  148. # # 只有序号
  149. # elif len(text_no) >= 2:
  150. # _text_list.append(text_no[:-1])
  151. #
  152. # if picture:
  153. # _order_list.append("wp:docPr")
  154. #
  155. # for line1 in p_node.childNodes:
  156. # if "w:r" in str(line1):
  157. # picture1 = line1.getElementsByTagName("w:pict")
  158. # if picture1:
  159. # _order_list.append("wp:docPr")
  160. p_node_text = ''
  161. has_html = False
  162. # 编号先加上
  163. if text_no:
  164. p_node_text += text_no
  165. text = p_node.getElementsByTagName("w:t")
  166. # 目录页单特殊生成
  167. if is_sdt and len(text) == 2:
  168. p_node_text += text[0].childNodes[0].nodeValue + '.'*20 + text[1].childNodes[0].nodeValue
  169. # 正常页面
  170. else:
  171. image_rel_dict = read_rel_image(document_xml_rels)
  172. p_node_all = p_node.getElementsByTagName("*")
  173. for node in p_node_all:
  174. # 文本
  175. if "w:t" in str(node).split(' '):
  176. if node.childNodes:
  177. p_node_text += node.childNodes[0].nodeValue
  178. # 图片,提前识别,不做成Image对象放入Page了
  179. elif "a:blip" in str(node).split(' '):
  180. _id = node.getAttribute("r:embed")
  181. image_path = image_rel_dict.get(_id)
  182. if image_path:
  183. image_path = unique_type_dir + 'word/' + image_path
  184. image_convert = ImageConvert(image_path, '')
  185. image_html = image_convert.get_html()[0]
  186. if isinstance(image_html, int):
  187. image_html = ''
  188. p_node_text += image_html
  189. has_html = True
  190. # 只有编号
  191. if len(p_node_text) > 0 and p_node_text == text_no:
  192. p_node_text = p_node_text[:-1]
  193. _text_list.append(p_node_text)
  194. if has_html:
  195. _order_list.append('w:t html')
  196. else:
  197. _order_list.append('w:t')
  198. return _text_list, _order_list, _num_pr_dict, _last_node_level
  199. @timeout(50, timeout_exception=TimeoutError)
  200. def read_xml_order(unique_type_dir, document_xml, numbering_xml, document_xml_rels):
  201. log("into read_xml_order")
  202. try:
  203. body = document_xml.getElementsByTagName("w:body")[0]
  204. order_list = []
  205. text_list = []
  206. # 编号组记录
  207. num_pr_dict = {}
  208. last_node_level = 0
  209. for line in body.childNodes:
  210. # 普通文本
  211. if "w:p" in str(line):
  212. t_list, o_list, num_pr_dict, last_node_level = read_p_text(unique_type_dir,
  213. line,
  214. last_node_level,
  215. num_pr_dict,
  216. numbering_xml,
  217. document_xml_rels)
  218. text_list += t_list
  219. order_list += o_list
  220. # 目录索引
  221. elif "w:sdt" in str(line):
  222. sdt = line
  223. for sdt_child in sdt.childNodes:
  224. if "w:sdtContent" in str(sdt_child):
  225. sdt_content = sdt_child
  226. for sdt_content_child in sdt_content.childNodes:
  227. if 'w:p' in str(sdt_content_child):
  228. t_list, o_list, num_pr_dict, last_node_level = read_p_text(unique_type_dir,
  229. sdt_content_child,
  230. last_node_level,
  231. num_pr_dict,
  232. numbering_xml,
  233. document_xml_rels,
  234. is_sdt=True)
  235. text_list += t_list
  236. order_list += o_list
  237. elif "w:tbl" in str(line):
  238. order_list.append("w:tbl")
  239. # read_xml_table(path, save_path)
  240. return [order_list, text_list]
  241. except Exception as e:
  242. log("read_xml_order error!")
  243. traceback.print_exc()
  244. return [-1]
  245. @timeout(50, timeout_exception=TimeoutError)
  246. def read_xml_table(unique_type_dir, document_xml, numbering_xml, document_xml_rels):
  247. def recursion_read_table(table):
  248. table_text = '<table border="1">'
  249. tr_index = 0
  250. tr_text_list = []
  251. last_node_level = 0
  252. num_pr_dict = {}
  253. # 直接子节点用child表示,所有子节点用all表示
  254. for table_child in table.childNodes:
  255. if 'w:tr' in str(table_child):
  256. tr = table_child
  257. tr_child_nodes = tr.childNodes
  258. tc_index = 0
  259. tc_text_list = []
  260. for tr_child in tr_child_nodes:
  261. if 'w:tc' in str(tr_child).split(' '):
  262. tc_text = ""
  263. tc = tr_child
  264. # 获取一格占多少列,相当于colspan
  265. col_span = tc.getElementsByTagName("w:gridSpan")
  266. if col_span:
  267. col_span = int(col_span[0].getAttribute("w:val"))
  268. else:
  269. col_span = 1
  270. # 获取是否是合并单元格的下一个空单元格,相当于rowspan
  271. is_merge = tc.getElementsByTagName("w:vMerge")
  272. if is_merge:
  273. is_merge = is_merge[0].getAttribute("w:val")
  274. if is_merge == "continue":
  275. col_span_index = 0
  276. real_tc_index = 0
  277. if 0 <= tr_index - 1 < len(tr_text_list):
  278. for tc_colspan in tr_text_list[tr_index - 1]:
  279. if col_span_index < tc_index:
  280. col_span_index += tc_colspan[1]
  281. real_tc_index += 1
  282. if real_tc_index < len(tr_text_list[tr_index - 1]):
  283. tc_text = tr_text_list[tr_index - 1][real_tc_index][0]
  284. # 设置colspan
  285. table_text = table_text + "<td colspan=" + str(col_span) + ">"
  286. # 放入文本
  287. tc_child_nodes = tc.childNodes
  288. for tc_child in tc_child_nodes:
  289. if 'w:tbl' in str(tc_child).split(' '):
  290. # 嵌套在tc中的表格
  291. tc_text += recursion_read_table(tc_child)
  292. if 'w:p' in str(tc_child).split(' '):
  293. tc_p_all_nodes = tc_child.getElementsByTagName("*")
  294. _t_list, _, num_pr_dict, last_node_level = read_p_text(unique_type_dir,
  295. tc_child,
  296. last_node_level,
  297. num_pr_dict,
  298. numbering_xml,
  299. document_xml_rels)
  300. # print('_t_list', _t_list)
  301. tc_text += ''.join(_t_list)
  302. # for tc_p_all in tc_p_all_nodes:
  303. # if 'w:t' in str(tc_p_all).split(' '):
  304. # # w:t必须加childNodes[0]才能读文本
  305. # tc_text += tc_p_all.childNodes[0].nodeValue
  306. # 结束该tc
  307. table_text = table_text + tc_text + "</td>"
  308. tc_index += 1
  309. tc_text_list.append([tc_text, col_span])
  310. # 结束该tr
  311. table_text += "</tr>"
  312. tr_index += 1
  313. tr_text_list.append(tc_text_list)
  314. # 结束该table
  315. table_text += "</table>"
  316. return table_text
  317. log("into read_xml_table")
  318. try:
  319. body = document_xml.getElementsByTagName("w:body")[0]
  320. table_text_list = []
  321. body_nodes = body.childNodes
  322. for node in body_nodes:
  323. if 'w:tbl' in str(node).split(' '):
  324. _table = node
  325. _table_text = recursion_read_table(_table)
  326. table_text_list.append(_table_text)
  327. return table_text_list
  328. except Exception as e:
  329. log("read_xml_table error")
  330. print("read_xml_table", traceback.print_exc())
  331. return [-1]
  332. @timeout(25, timeout_exception=TimeoutError)
  333. def parse_xml(path):
  334. # 解析xml
  335. DOMTree = xml.dom.minidom.parse(path)
  336. collection = DOMTree.documentElement
  337. return collection
  338. @timeout(25, timeout_exception=TimeoutError)
  339. def parse_xml2(path):
  340. # 解析xml
  341. tree = xml.etree.ElementTree.parse(path)
  342. root = tree.getroot()
  343. return root
  344. class DocxConvert:
  345. def __init__(self, path, unique_type_dir):
  346. self._doc = _Document(path)
  347. self.path = path
  348. self.unique_type_dir = unique_type_dir
  349. # 解压docx
  350. try:
  351. f = zipfile.ZipFile(path)
  352. for file in f.namelist():
  353. if "word/" in str(file):
  354. f.extract(file, self.unique_type_dir)
  355. f.close()
  356. except Exception as e:
  357. log("docx format error!")
  358. self._doc.error_code = [-3]
  359. # 读取内容
  360. try:
  361. self.document_xml = parse_xml(self.unique_type_dir + "word/document.xml")
  362. if os.path.exists(self.unique_type_dir + "word/numbering.xml"):
  363. self.numbering_xml = parse_xml(self.unique_type_dir + "word/numbering.xml")
  364. else:
  365. self.numbering_xml = []
  366. if os.path.exists(self.unique_type_dir + "word/_rels/document.xml.rels"):
  367. self.document_xml_rels = parse_xml2(self.unique_type_dir + "word/_rels/document.xml.rels")
  368. else:
  369. self.document_xml_rels = []
  370. except FileNotFoundError:
  371. # 找不到解压文件,就用html格式读
  372. log('FileNotFoundError')
  373. self._doc.error_code = None
  374. except TimeoutError:
  375. log("parse_xml timeout")
  376. self._doc.error_code = [-4]
  377. @memory_decorator
  378. def init_package(self):
  379. # 各个包初始化
  380. try:
  381. self.docx = docx.Document(self.path)
  382. self.zip = zipfile.ZipFile(self.path)
  383. except:
  384. log("cannot open docx!")
  385. traceback.print_exc()
  386. self._doc.error_code = [-3]
  387. def convert(self):
  388. self._page = _Page(None, 0)
  389. # 先判断特殊doc文件,可能是html文本
  390. is_html_doc = False
  391. try:
  392. with open(self.path, 'r') as f:
  393. html_str = f.read()
  394. if re.search('<div|<html|<body|<head|<tr|<br|<table|<td', html_str):
  395. soup = BeautifulSoup(html_str, 'lxml')
  396. text = soup.text
  397. is_html_doc = True
  398. except:
  399. pass
  400. if is_html_doc:
  401. _sen = _Sentence(text, (0, 0, 0, 0))
  402. self._page.add_child(_sen)
  403. self._doc.add_child(self._page)
  404. return
  405. self.init_package()
  406. if self._doc.error_code is not None:
  407. return
  408. order_and_text_list = self.get_orders()
  409. if judge_error_code(order_and_text_list):
  410. self._doc.error_code = order_and_text_list
  411. return
  412. order_list, text_list = order_and_text_list
  413. # 乱码返回文件格式错误
  414. match1 = re.findall(get_garble_code(), ''.join(text_list))
  415. if len(match1) > 10:
  416. log("doc/docx garbled code!")
  417. # self._doc.error_code = [-3]
  418. _sen = _Sentence('文件乱码!', (0, 0, 0, 0))
  419. self._page.add_child(_sen)
  420. self._doc.add_child(self._page)
  421. return
  422. # test
  423. # for i in range(len(text_list)):
  424. # print(order_list[i], text_list[i])
  425. table_list = self.get_tables()
  426. if judge_error_code(table_list):
  427. self._doc.error_code = table_list
  428. return
  429. # paragraph_list = self.get_paragraphs()
  430. image_list = self.get_images()
  431. order_y = 0
  432. doc_pr_cnt = 0
  433. for tag in order_list:
  434. bbox = (0, order_y, 0, 0)
  435. if tag == "w:t html":
  436. if len(text_list) > 0:
  437. _para = text_list.pop(0)
  438. _sen = _Sentence(_para, bbox)
  439. _sen.combine = False
  440. _sen.is_html = True
  441. self._page.add_child(_sen)
  442. if tag == "w:t":
  443. if len(text_list) > 0:
  444. _para = text_list.pop(0)
  445. _sen = _Sentence(_para, bbox)
  446. _sen.combine = False
  447. self._page.add_child(_sen)
  448. if tag == "wp:docPr":
  449. if len(image_list) > 0:
  450. temp_image_path = self.unique_type_dir + "docpr" + str(doc_pr_cnt) + ".png"
  451. _image = image_list.pop(0)
  452. with open(temp_image_path, "wb") as f:
  453. f.write(_image)
  454. _img = _Image(_image, temp_image_path, bbox)
  455. _img.is_from_docx = True
  456. self._page.add_child(_img)
  457. doc_pr_cnt += 1
  458. if tag == "w:tbl":
  459. if len(table_list) > 0:
  460. _table = table_list.pop(0)
  461. _table = _Table(_table, bbox)
  462. _table.is_html = True
  463. self._page.add_child(_table)
  464. order_y += 1
  465. if self._doc.error_code is None and self._page.error_code is not None:
  466. self._doc.error_code = self._page.error_code
  467. self._doc.add_child(self._page)
  468. @memory_decorator
  469. def get_tables(self):
  470. # 遍历表
  471. table_list = read_xml_table(self.unique_type_dir, self.document_xml, self.numbering_xml, self.document_xml_rels)
  472. return table_list
  473. def get_images(self):
  474. # 顺序遍历图片
  475. image_list = []
  476. pattern = re.compile('rId\d+')
  477. for graph in self.docx.paragraphs:
  478. for run in graph.runs:
  479. if run.text == '':
  480. try:
  481. if not pattern.search(run.element.xml):
  482. continue
  483. content_id = pattern.search(run.element.xml).group(0)
  484. content_type = self.docx.part.related_parts[content_id].content_type
  485. except Exception as e:
  486. print("docx no image!", e)
  487. continue
  488. if not content_type.startswith('image'):
  489. continue
  490. img_data = self.docx.part.related_parts[content_id].blob
  491. if img_data is not None:
  492. image_list.append(img_data)
  493. return image_list
  494. @memory_decorator
  495. def get_orders(self):
  496. # 解析document.xml,获取文字顺序
  497. order_and_text_list = read_xml_order(self.unique_type_dir, self.document_xml, self.numbering_xml, self.document_xml_rels)
  498. return order_and_text_list
  499. def get_doc_object(self):
  500. return self._doc
  501. def get_html(self):
  502. if self._doc.error_code is not None:
  503. return self._doc.error_code
  504. try:
  505. self.convert()
  506. except:
  507. traceback.print_exc()
  508. self._doc.error_code = [-1]
  509. if self._doc.error_code is not None:
  510. return self._doc.error_code
  511. return self._doc.get_html()
  512. if __name__ == '__main__':
  513. c = DocxConvert("C:/Users/Administrator/Downloads/1631944542835.docx", "C:/Users/Administrator/Downloads/1/")
  514. print(c.get_html())