import copy import io import os import re import sys from bs4 import BeautifulSoup sys.path.append(os.path.dirname(__file__) + "/../") from pdfplumber import PDF from pdfplumber.table import TableFinder from pdfplumber.page import Page as pdfPage from format_convert.convert_tree import _Document, _Page, _Image, _Sentence, _Table, TextBox import time from PIL import Image import traceback import cv2 import PyPDF2 from PyPDF2 import PdfFileReader, PdfFileWriter from pdfminer.pdfparser import PDFParser from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfpage import PDFPage from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from pdfminer.layout import LTTextBoxHorizontal, LAParams, LTFigure, LTImage, LTCurve, LTText, LTChar, LTRect, \ LTTextBoxVertical, LTLine, LTTextContainer, LTTextLine from format_convert.utils import judge_error_code, get_platform, LineTable, log, \ memory_decorator, get_garble_code, get_md5_from_bytes, bytes2np, bbox_iou, get_garble_code2, get_traditional_chinese import fitz from format_convert.wrapt_timeout_decorator import timeout from otr.table_line_pdf import table_line_pdf # import jieba @memory_decorator def pdf2text(path, unique_type_dir): return @timeout(10, timeout_exception=TimeoutError) def pdf_analyze(interpreter, page, device, page_no): pdf_time = time.time() interpreter.process_page(page) layout = device.get_result() log("page_no: " + str(page_no) + " pdf_analyze cost: " + str(time.time() - pdf_time)) return layout @timeout(25, timeout_exception=TimeoutError) def read_pdfminer(path, laparams): fp = open(path, 'rb') parser = PDFParser(fp) doc_pdfminer = PDFDocument(parser) rsrcmgr = PDFResourceManager() device = PDFPageAggregator(rsrcmgr, laparams=laparams) interpreter = PDFPageInterpreter(rsrcmgr, device) return doc_pdfminer, device, interpreter @timeout(15, timeout_exception=TimeoutError) def read_pymupdf(path): return fitz.open(path) @timeout(15, timeout_exception=TimeoutError) def read_pypdf2(path): doc_pypdf2 = PdfFileReader(path, strict=False) doc_pypdf2_new = PdfFileWriter() return doc_pypdf2, doc_pypdf2_new @timeout(25, timeout_exception=TimeoutError, use_signals=False) def read_pdfplumber(path, laparams): fp = open(path, 'rb') lt = LineTable() doc_top = 0 doc_pdfplumber = PDF(fp, laparams=laparams.__dict__) return lt, doc_top, doc_pdfplumber class PDFConvert: def __init__(self, path, unique_type_dir, need_page_no): self._doc = _Document(path) self.path = path self.unique_type_dir = unique_type_dir if not os.path.exists(self.unique_type_dir): os.mkdir(self.unique_type_dir) # 指定提取的页码范围 self.need_page_no = need_page_no self.start_page_no = None self.end_page_no = None # 默认使用limit_page_cnt控制,前10页后10页 if self.need_page_no is None: self.limit_page_cnt = 20 else: # 使用start_page_no,end_page_no范围控制,例如2,5 ss = self.need_page_no.split(',') if len(ss) != 2: self._doc.error_code = [-14] else: self.start_page_no = int(ss[0]) self.end_page_no = int(ss[-1]) if self.end_page_no == -1: self.end_page_no = 1000000 self.start_page_no -= 1 self.end_page_no -= 1 if self.end_page_no <= self.start_page_no or self.start_page_no < 0 or self.end_page_no < -1: self._doc.error_code = [-14] self.packages = ["pdfminer", "PyMuPDF", "PyPDF2", "pdfplumber"] self.has_init_pdf = [0] * len(self.packages) # 记录图片对象的md5,用于去除大量重复图片 self.md5_image_obj_list = [] # 记录该页是不是纯文本 self.only_text_list = [] # 是否提取特殊页 self.convert_specific_page = 1 @memory_decorator def init_package(self, package_name): # 各个包初始化 try: laparams = LAParams(line_overlap=0.01, char_margin=0.3, line_margin=0.01, word_margin=0.01, boxes_flow=0.1, ) if package_name == self.packages[0]: self.doc_pdfminer, self.device, self.interpreter = read_pdfminer(self.path, laparams) self.has_init_pdf[0] = 1 elif package_name == self.packages[1]: self.doc_pymupdf = read_pymupdf(self.path) self.has_init_pdf[1] = 1 elif package_name == self.packages[2]: self.doc_pypdf2, self.doc_pypdf2_new = read_pypdf2(self.path) self.has_init_pdf[2] = 1 elif package_name == self.packages[3]: self.lt, self.doc_top, self.doc_pdfplumber = read_pdfplumber(self.path, laparams) self.has_init_pdf[3] = 1 else: log("Only Support Packages " + str(self.packages)) raise Exception except Exception as e: log(package_name + " cannot open pdf!") traceback.print_exc() self._doc.error_code = [-3] @memory_decorator def convert(self, limit_page_cnt=20): if self.has_init_pdf[0] == 0: self.init_package("pdfminer") if self._doc.error_code is not None: self._doc.error_code = None # pdfminer读不了直接转成图片识别 self.get_all_page_image() return # 判断是否能读pdf try: pages = PDFPage.create_pages(self.doc_pdfminer) for page in pages: break pages = list(pages) # except pdfminer.psparser.PSEOF as e: except: # pdfminer 读不了空白页的对象,直接使用pymupdf转换出的图片进行ocr识别 log("pdfminer read failed! read by pymupdf!") traceback.print_exc() try: self.get_all_page_image() return except: traceback.print_exc() log("use pymupdf read failed!") self._doc.error_code = [-3] return # 每一页进行处理 pages = PDFPage.create_pages(self.doc_pdfminer) pages = list(pages) page_count = len(pages) self.only_text_list = [-1] * len(pages) page_no = 0 for page in pages: # 指定pdf页码 if self.start_page_no is not None and self.end_page_no is not None: if page_count < self.end_page_no: self.end_page_no = page_count if page_no < self.start_page_no or page_no >= self.end_page_no: page_no += 1 continue # 限制pdf页数,只取前后各10页 else: if page_count > limit_page_cnt and int(limit_page_cnt/2) <= page_no < page_count - int(limit_page_cnt/2): page_no += 1 continue # 解析单页 start_time = time.time() self._page = _Page(page, page_no) self.convert_page(page, page_no) log('convert_page page_no: ' + str(page_no) + ' cost: ' + str(time.time()-start_time)) if self._doc.error_code is None and self._page.error_code is not None: if self._page.error_code[0] in [-4, -3, 0]: page_no += 1 continue else: self._doc.error_code = self._page.error_code break self._doc.add_child(self._page) page_no += 1 self._doc.children, detete_header_footer_list = self.delete_header_footer(self._doc.children) if self.convert_specific_page and self.need_page_no is None: # 补充提取特定页 # print('self.only_text_list', self.only_text_list) if self.only_text_list.count(0) == 0: ratio = 0 else: ratio = self.only_text_list.count(0) / (page_count-self.only_text_list.count(-1)) # print('ratio', ratio) if page_count > limit_page_cnt and ratio <= 0.2: page_no = 0 find_flag = 0 add_page_list = [] for page in pages: if not int(limit_page_cnt/2) <= page_no < page_count - int(limit_page_cnt/2): page_no += 1 continue # 解析单页 start_time = time.time() self._page = _Page(page, page_no) self.convert_page(page, page_no, skip_image=1) log('convert_page add page_no: ' + str(page_no) + ' cost: ' + str(time.time()-start_time)) # 删除页眉页脚 pages, _ = self.delete_header_footer([self._page], detete_header_footer_list) self._page = pages[0] # 提取特殊部分 re_str = '采购清单' # 坐标都是上下颠倒的,回正 self._page.children.sort(key=lambda x: x.y, reverse=True) # print('find_flag', find_flag, type(self._page.children[-1])) if find_flag and type(self._page.children[0]) == _Table: add_page_list.append(self._page) if len(self._page.children) - 1 > 3: find_flag = 0 for index in range(len(self._page.children)): obj = self._page.children[index] next_obj = None if index+1 < len(self._page.children): next_obj = self._page.children[index+1] # print('采购清单', type(obj) == _Sentence, re.search(re_str, str(obj.content)), str(obj.content)[:20]) if type(obj) == _Sentence and re.search(re_str, obj.content) \ and next_obj and type(next_obj) == _Table: add_page_list.append(self._page) # print('add_page_list', page_no) if len(self._page.children) - index - 1 > 3: find_flag = 0 else: find_flag = 1 page_no += 1 # print('add_page_list', add_page_list) if add_page_list: self._doc.children = self._doc.children[:int(limit_page_cnt/2)] + add_page_list + self._doc.children[int(limit_page_cnt/2):] self.delete_same_image() # self.delete_bold_text_duplicate() def delete_same_image(self, show=0): # 剔除大量重复图片 md5_dict = {} for _md5, image_obj in self.md5_image_obj_list: if _md5 in md5_dict.keys(): md5_dict[_md5] += [image_obj] else: md5_dict[_md5] = [image_obj] cnt_threshold = 10 delete_obj_list = [] for _md5 in md5_dict.keys(): img_list = md5_dict.get(_md5) # print('len(md5_dict.get(_md5))', _md5, len(img_list)) if len(img_list) >= cnt_threshold: if show: img_np = bytes2np(img_list[0].content) cv2.namedWindow('delete same img_np', cv2.WINDOW_NORMAL) cv2.imshow('delete same img_np', img_np) cv2.waitKey(0) delete_obj_list += img_list for page in self._doc.children: for obj in delete_obj_list: if obj in page.children: page.children.remove(obj) if show: for page in self._doc.children: for obj in page.children: if isinstance(obj, _Image): img_np = bytes2np(obj.content) cv2.imshow('page img_np', img_np) cv2.waitKey(0) def delete_header_footer(self, pages, delete_list=[]): sen_dict = {} for page in pages: for obj in page.children: if isinstance(obj, _Sentence): key = str(obj.content) + ' ' + str(int(obj.y)) # print('key', key) if key in sen_dict.keys(): sen_dict[key] += [obj] else: sen_dict[key] = [obj] # 把需删除的加上 # print('delete_list', delete_list) for key in delete_list: if key in sen_dict: sen_dict[key] = sen_dict.get(key) * 10 # print('sen_dict', sen_dict) delete_footer_header_list = [] for key in sen_dict.keys(): l = sen_dict.get(key) if len(l) >= 1/3 * max(10, len(pages)): delete_footer_header_list.append(key) for page in pages: new_children = [] for obj in page.children: if isinstance(obj, _Sentence): if obj not in l: new_children.append(obj) else: new_children.append(obj) page.children = new_children # print('len(l)', len(l), len(pages)) # print('delete_header_footer l[0]', l[0].content, l[0].y) return pages, delete_footer_header_list def delete_bold_text_duplicate(self, lt_text_box_list): # 拿出所有LTChar lt_char_list = [] for lt_text_box in lt_text_box_list: for lt_text_line in lt_text_box: for lt_char in lt_text_line: if isinstance(lt_char, LTChar): lt_char_list.append(lt_char) # 找出需剔除的 lt_char_list.sort(key=lambda x: (int(x.bbox[1]), x.bbox[0])) delete_list = [] for i in range(len(lt_char_list)): lt_char1 = lt_char_list[i] bbox1 = lt_char1.bbox # lt_char2 = lt_char_list[i+1] # bbox2 = lt_char2.bbox if lt_char1 in delete_list: continue # if lt_char2 in delete_list: # continue # if lt_char1.get_text() == lt_char2.get_text() and bbox1[0] <= bbox2[0] <= bbox1[2] <= bbox2[2] \ # and int(bbox1[1]) == int(bbox2[1]) and int(bbox1[3]) == int(bbox2[3]) \ # and re.search('[\u4e00-\u9fff():、,。]', lt_char1.get_text()): for j in range(i+1, len(lt_char_list)): lt_char2 = lt_char_list[j] bbox2 = lt_char2.bbox if lt_char2 in delete_list: continue if lt_char1.get_text() == lt_char2.get_text() and bbox_iou(bbox1, bbox2) >= 0.3 \ and re.search('[\u4e00-\u9fff():、,。]', lt_char1.get_text()): # continue delete_list.append(lt_char2) # 重新组装 new_lt_text_box_list = [] for lt_text_box in lt_text_box_list: new_lt_text_box = LTTextBoxHorizontal() for lt_text_line in lt_text_box: new_lt_text_line = LTTextLine(0.01) for lt_char in lt_text_line: if lt_char in delete_list: continue if isinstance(lt_char, LTChar): new_lt_text_line.add(lt_char) new_lt_text_box.add(new_lt_text_line) new_lt_text_box_list.append(new_lt_text_box) return new_lt_text_box_list def clean_text(self, _text): return re.sub("\s", "", _text) def get_text_lines(self, page, page_no): lt_line_list = [] page_plumber = pdfPage(self.doc_pdfplumber, page, page_number=page_no, initial_doctop=self.doc_top) self.doc_top += page_plumber.height table_finder = TableFinder(page_plumber) all_width_zero = True for _edge in table_finder.get_edges(): if _edge.get('linewidth') and _edge.get('linewidth') > 0: all_width_zero = False break for _edge in table_finder.get_edges(): # print(_edge) if _edge.get('linewidth', 0.1) > 0 or all_width_zero: lt_line_list.append(LTLine(1, (float(_edge["x0"]), float(_edge["y0"])), (float(_edge["x1"]), float(_edge["y1"])))) log("pdf page_no %s has %s lines" % (str(page_no), str(len(lt_line_list)))) return lt_line_list @memory_decorator def get_page_lines(self, layout, page_no, show=0): lt_line_list = table_line_pdf(layout, page_no, show) return lt_line_list @memory_decorator def recognize_text(self, layout, page_no, lt_text_list, lt_line_list): list_tables, filter_objs, _, connect_textbox_list = self.lt.recognize_table(lt_text_list, lt_line_list, from_pdf=True, is_reverse=False) self._page.in_table_objs = filter_objs # print("=======text_len:%d:filter_len:%d"%(len(lt_text_list),len(filter_objs))) for table in list_tables: _table = _Table(table["table"], table["bbox"]) # self._page.children.append(_table) self._page.add_child(_table) list_sentences = ParseUtils.recognize_sentences(lt_text_list, filter_objs, layout.bbox, page_no) for sentence in list_sentences: # print('sentence.text', sentence.text) _sen = _Sentence(sentence.text, sentence.bbox) self._page.add_child(_sen) # pdf对象需反向排序 # self._page.is_reverse = True return list_tables def is_text_legal(self, lt_text_list, page_no): # 无法识别pdf字符编码,整页用ocr text_temp = "" for _t in lt_text_list: text_temp += _t.get_text() if re.search('[(]cid:[0-9]+[)]', text_temp): log("page_no: " + str(page_no) + " text has cid! try pymupdf...") page_image = self.get_page_image(page_no) if judge_error_code(page_image): self._page.error_code = page_image else: _image = _Image(page_image[1], page_image[0]) self._page.add_child(_image) return False match1 = re.findall(get_garble_code(), text_temp) # match2 = re.search('[\u4e00-\u9fa5]', text_temp) if len(match1) > 8 and len(text_temp) > 10: log("page_no: " + str(page_no) + " garbled code! try pymupdf... " + text_temp[:20]) page_image = self.get_page_image(page_no) if judge_error_code(page_image): self._page.error_code = page_image else: _image = _Image(page_image[1], page_image[0]) self._page.add_child(_image) return False return True def judge_b_table(self, lt_text_list, table_list, page_no): table_h_list = [] for table in table_list: table_h_list.append([table.get('bbox')[1], table.get('bbox')[3]]) # 先分行 lt_text_list.sort(key=lambda x: (x.bbox[1], x.bbox[0])) lt_text_row_list = [] current_h = lt_text_list[0].bbox[1] row = [] threshold = 2 for lt_text in lt_text_list: bbox = lt_text.bbox if current_h - threshold <= bbox[1] <= current_h + threshold: row.append(lt_text) else: if row: lt_text_row_list.append(row) row = [lt_text] current_h = lt_text.bbox[1] if row: lt_text_row_list.append(row) # 判断文本中间是否是空格,或一行文本中间有多个 is_b_table_cnt = 3 tolerate_cnt = 2 t_cnt = 0 row_cnt = 0 b_table_row_list = [] all_b_table = [] for row in lt_text_row_list: # 水印行跳过 if len(row) == 1 and len(row[0].get_text()[:-1]) == 1: continue # 目录行跳过 continue_flag = False for r in row: if re.search('[.·]{7,}', r.get_text()): continue_flag = True break if continue_flag: continue if len(row) == 1: text = row[0].get_text() bbox = row[0].bbox match = re.search('[ ]{3,}', text) if match and re.search('[\u4e00-\u9fff]{2,}', text[:match.span()[0]]) \ and re.search('[\u4e00-\u9fff]{2,}', text[match.span()[1]:]): row_cnt += 1 t_cnt = 0 b_table_row_list += row else: # 容忍 if t_cnt < tolerate_cnt: t_cnt += 1 continue if b_table_row_list and row_cnt >= is_b_table_cnt: all_b_table.append(b_table_row_list) row_cnt = 0 b_table_row_list = [] else: row_cnt += 1 t_cnt = 0 b_table_row_list += row if b_table_row_list and row_cnt >= is_b_table_cnt: all_b_table.append(b_table_row_list) # 对每个可能的b_table判断是否与table相交 is_b_table_flag = False for b_table in all_b_table: # 判断在不在有边框表格的范围 in_flag = False for table_h in table_h_list: for b in b_table: if min(table_h) <= b.bbox[1] <= max(table_h) or min(table_h) <= b.bbox[3] <= max(table_h): in_flag = True break if in_flag: break if in_flag: is_b_table_flag = False else: is_b_table_flag = True # print('is_b_table_flag True ', [[x.get_text(), x.bbox] for x in b_table]) # print('table_h_list', table_h_list) break log("page_no: " + str(page_no) + ' is_b_table_flag ' + str(is_b_table_flag)) return is_b_table_flag @memory_decorator def convert_page(self, page, page_no, skip_image=0): layout = self.get_layout(page, page_no) if self._doc.error_code is not None: return if judge_error_code(layout): self._page.error_code = layout return # 翻转pdf中所有对象的y坐标 max_y, min_y = 0, 10000 for x in layout: min_y = min(min_y, x.y0, x.y1) max_y = max(max_y, x.y0, x.y1) if max_y == 0: return for x in layout: # 外层obj的bbox设置 x.set_bbox((x.x0, round(max_y - max(x.y0, x.y1), 1), x.x1, round(max_y - min(x.y0, x.y1), 1))) # 内层单个字符的bbox设置 if isinstance(x, (LTTextBoxHorizontal, LTTextBoxVertical)): for lt_text_line in x: for lt_char in lt_text_line: if isinstance(lt_char, LTChar): lt_char.set_bbox((lt_char.x0, round(max_y - max(lt_char.y0, lt_char.y1), 1), lt_char.x1, round(max_y - min(lt_char.y0, lt_char.y1), 1))) # 判断该页的对象类型,并存储 lt_text_list = [] lt_image_list = [] for x in layout: if isinstance(x, (LTTextBoxHorizontal, LTTextBoxVertical)): lt_text_list.append(x) if isinstance(x, LTFigure): for y in x: if isinstance(y, LTImage): # 小的图忽略 if y.width <= 300 and y.height <= 300: continue # 图的width超过layout width,很大可能是水印 if y.width > layout.width + 20: continue lt_image_list.append(y) # 判断纯文本 if len(lt_image_list) == 0 and len(lt_text_list) == 0: self.only_text_list[page_no] = 0 elif len(lt_image_list) == 0: self.only_text_list[page_no] = 1 else: self.only_text_list[page_no] = 0 # 跳过图片 if skip_image: lt_image_list = [] # 判断读出来的是乱码,但有图片直接识别 all_text = ''.join([x.get_text() for x in lt_text_list]) all_text = re.sub('[\s\d]', '', all_text) if len(re.findall(get_garble_code2(), all_text)) >= 3 and len(lt_image_list) >= 1: log('嵌入的文字是乱码1: ' + str(all_text[:10])) lt_text_list = [] # print('11111', re.findall(get_traditional_chinese(), all_text)) if 3 <= len(re.findall(get_traditional_chinese(), all_text)) <= len(all_text) / 2 and len(lt_image_list) >= 1: log('嵌入的文字是乱码2: ' + str(all_text[:10])) lt_text_list = [] # 解决重复粗体字问题 lt_text_list = self.delete_bold_text_duplicate(lt_text_list) # 删除水印字 lt_text_list = self.delete_water_mark(lt_text_list, layout.bbox, 15) log("page_no: " + str(page_no) + " len(lt_image_list), len(lt_text_list) " + str(len(lt_image_list)) + " " + str(len(lt_text_list))) # 若该页图片数量过多,或无文本,则直接ocr整页识别 if len(lt_image_list) > 4 or len(lt_text_list) == 0: page_image = self.get_page_image(page_no) if judge_error_code(page_image): self._page.error_code = page_image else: _image = _Image(page_image[1], page_image[0]) _image.is_from_pdf = True _image.is_reverse = False self._page.add_child(_image) # 正常读取该页对象 else: # 图表对象 for image in lt_image_list: try: # print("pdf2text LTImage size", page_no, image.width, image.height) image_stream = image.stream.get_data() # 小的图忽略 if image.width <= 300 and image.height <= 300: continue # 查看提取的图片高宽,太大则用pdf输出图进行ocr识别 img_test = Image.open(io.BytesIO(image_stream)) if image.height >= 1000 and image.width >= 1000: page_image = self.get_page_image(page_no) if judge_error_code(page_image): self._page.error_code = page_image else: _image = _Image(page_image[1], page_image[0]) _image.is_from_pdf = True _image.is_reverse = False self._page.add_child(_image) image_md5 = get_md5_from_bytes(page_image[1]) self.md5_image_obj_list.append([image_md5, _image]) return # 比较小的图则直接保存用ocr识别 else: temp_path = self.unique_type_dir + 'page' + str(page_no) \ + '_lt' + str(lt_image_list.index(image)) + '.jpg' img_test.save(temp_path) with open(temp_path, "rb") as ff: image_stream = ff.read() _image = _Image(image_stream, temp_path, image.bbox) self._page.add_child(_image) image_md5 = get_md5_from_bytes(image_stream) self.md5_image_obj_list.append([image_md5, _image]) except Exception: log("page_no: " + str(page_no) + " pdfminer read image fail! use pymupdf read image...") traceback.print_exc() # pdf对象需反向排序 # self._page.is_reverse = True if self.has_init_pdf[3] == 0: self.init_package("pdfplumber") if not self.is_text_legal(lt_text_list, page_no): return try: lt_line_list = self.get_page_lines(layout, page_no) except: traceback.print_exc() lt_line_list = [] self._page.error_code = [-13] table_list = self.recognize_text(layout, page_no, lt_text_list, lt_line_list) # 根据text规律,判断该页是否可能有无边框表格 if self.judge_b_table(lt_text_list, table_list, page_no): page_image = self.get_page_image(page_no) if judge_error_code(page_image): self._page.error_code = page_image else: _image = _Image(page_image[1], page_image[0]) _image.is_from_pdf = True # _image.is_reverse = True _image.b_table_from_text = True _image.b_table_text_obj_list = lt_text_list _image.b_table_layout_size = (layout.width, layout.height) self._page.add_child(_image) def get_layout(self, page, page_no): if self.has_init_pdf[0] == 0: self.init_package("pdfminer") if self._doc.error_code is not None: return # 获取该页layout start_time = time.time() try: if get_platform() == "Windows": layout = pdf_analyze(self.interpreter, page, self.device, page_no) else: layout = pdf_analyze(self.interpreter, page, self.device, page_no) except TimeoutError as e: log("page_no: " + str(page_no) + " pdfminer read page time out! " + str(time.time() - start_time)) layout = [-4] except Exception: traceback.print_exc() log("page_no: " + str(page_no) + " pdfminer read page error! continue...") layout = [-3] log("page_no: " + str(page_no) + " get_layout cost: " + str(time.time()-start_time)) return layout def get_page_image(self, page_no): start_time = time.time() try: if self.has_init_pdf[1] == 0: self.init_package("PyMuPDF") if self._doc.error_code is not None: return # save_dir = self.path.split(".")[-2] + "_" + self.path.split(".")[-1] output = self.unique_type_dir + "page" + str(page_no) + ".png" page = self.doc_pymupdf.loadPage(page_no) rotate = int(0) zoom_x = 2. zoom_y = 2. mat = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate) pix = page.getPixmap(matrix=mat, alpha=False) pix.writePNG(output) # 输出图片resize self.resize_image(output) with open(output, "rb") as f: pdf_image = f.read() log("page_no: " + str(page_no) + ' get_page_image cost: ' + str(time.time()-start_time)) return [output, pdf_image] except ValueError as e: traceback.print_exc() if str(e) == "page not in document": log("page_no: " + str(page_no) + " page not in document! continue...") return [0] elif "encrypted" in str(e): log("page_no: " + str(page_no) + " document need password") return [-7] except RuntimeError as e: if "cannot find page" in str(e): log("page_no: " + str(page_no) + " page cannot find in document! continue...") return [0] else: traceback.print_exc() return [-3] def get_all_page_image(self): start_time = time.time() if self.has_init_pdf[1] == 0: self.init_package("PyMuPDF") if self._doc.error_code is not None: return page_count = self.doc_pymupdf.page_count for page_no in range(page_count): # 限制pdf页数,只取前10页后10页 if page_count > 20: if 10 <= page_no < page_count - 10: continue self._page = _Page(None, page_no) page_image = self.get_page_image(page_no) if judge_error_code(page_image): self._page.error_code = page_image else: _image = _Image(page_image[1], page_image[0]) self._page.add_child(_image) # 报错继续读后面页面 if self._doc.error_code is None and self._page.error_code is not None: continue self._doc.add_child(self._page) log('get_all_page_image cost: ' + str(time.time()-start_time)) @memory_decorator def connect_table(self, html_list, show=0): if not html_list: return html_list # 判断初始条件1 # 0: 前一页最后一个表格为A,后一页第一个表格为B # 1.1: A后无文本(除了页码),且B前无文本(除了页码) # 1.2: B前有文字(可能是页眉,小于60字),且B的第一行前几个单元格为空,且第一行不为空的单元格有文字较多的格子 # 1.3: B前有文字(可能是页眉,小于60字),且B的第一行第一个单元格为空,且有文字的格子数量占所有格子的一半 # 1.4: B前有文字(可能是页眉,小于60字),且B的第一行第一个单元格为纯数字序号 # 1.5: A后有文字(除了页码还有页眉),且A的后面只有一行且中文不超过15个字 connect_flag_list = [] soup_list = [] connect_rule_dict = {} for i, h in enumerate(html_list): soup = BeautifulSoup(h, 'lxml') soup_list.append(soup) # 找最后一个表格 last_table_start, last_table_end = None, None match = re.finditer('', h[last_table_start:]) for m in match: last_table_end = m.span()[1] + last_table_start # 补充规则,把表格也带上 rule_a = [0, h[last_table_start:last_table_end]] # 最后一个表格后有无除了页码外的内容 connect_flag1 = False if last_table_end is not None: match = re.findall('[^-/第页0-9,,]', re.sub('
|
', '', h[last_table_end:])) # print('match', match.group()) # if not match or match.group() == '': if len(match) == 0: connect_flag1 = True # 有页脚 if not connect_flag1: if len(re.findall('
', h[last_table_end:])) <= 1 \ and len(re.findall('[\u4e00-\u9fff]', h[last_table_end:])) <= 60: connect_flag1 = True # 找第一个表格 first_table_start, first_table_end = None, None match = re.finditer('', h[first_table_start:]) for m in match: first_table_end = m.span()[1] + first_table_start # 补充规则,把表格也带上 rule_b = [0, h[first_table_start:first_table_end]] # 第一个表格前有无内容 connect_flag2 = False if first_table_start is not None and first_table_start == 0: connect_flag2 = True # 有内容但是是页眉 if not connect_flag2: tables = soup.findAll('table') if tables: first_table = tables[0] rows = first_table.findAll('tr') if rows: first_row = rows[0] col_text_len_list = [len(x.text) for x in first_row] col_text_list = [x.text for x in first_row] # 文字大于60且第一个为空 if not connect_flag2 and len(h[:first_table_start]) <= 60 and col_text_len_list[0] == 0 and max(col_text_len_list) >= 30: connect_flag2 = True rule_b[0] = 1 # 有文字格子数占一半一下且第一个格子为空 if not connect_flag2 and col_text_len_list.count(0) >= len(col_text_len_list) / 2 and col_text_len_list[0] == 0: connect_flag2 = True # 表格前最多只有一行且第一个格子为纯数字 if not connect_flag2 and len(col_text_list) > 0 and \ len(re.findall('
', h[:first_table_start])) <= 0 and \ len(re.findall('\d', col_text_list[0])) == len(col_text_list[0]): connect_flag2 = True # if not connect_flag2 and len(re.findall('
', h[:first_table_start])) <= 0 and len(re.findall('[\u4e00-\u9fff]', h[:first_table_start])) <= 25: # connect_flag2 = True connect_flag_list.append([i, connect_flag2, connect_flag1]) connect_rule_dict[i] = [rule_b, rule_a] if show: print('connect_flag_list', connect_flag_list) print('connect_rule_dict', connect_rule_dict) # 根据条件1合并需连接页码,形成组 connect_pages_list = [] if connect_flag_list: temp_list = [connect_flag_list[0]] for i in range(1, len(connect_flag_list)): c = connect_flag_list[i] if c[1] and temp_list[-1][2]: temp_list.append(c) else: if temp_list: connect_pages_list.append(temp_list) temp_list = [c] # connect_pages_list.append([c]) if temp_list: connect_pages_list.append(temp_list) if show: print('connect_pages_list', connect_pages_list) # 判断后续条件:判断组内列数是否相同 connect_pages_list2 = [] for c_list in connect_pages_list: if len(c_list) == 1: connect_pages_list2.append(c_list) else: col_cnt_list = [] # 单元格可能被复制了,相同的合并当做一列 merge_col_cnt_list = [] for c in c_list: soup = soup_list[c[0]] table1 = soup.findAll('table')[-1] table2 = soup.findAll('table')[0] tr1 = table1.findAll('tr') tr2 = table2.findAll('tr') td1 = tr1[-1].findAll('td') td2 = tr2[0].findAll('td') col_cnt_list.append([len(td2), len(td1)]) # # 计算合并重复文本格子后的列数 # last_text = td1[0].text # merge_td1 = [last_text] # for td in td1: # if td.text == last_text: # continue # else: # merge_td1.append(td.text) # last_text = td.text # last_text = td2[0].text # merge_td2 = [last_text] # for td in td2: # if td.text == last_text: # continue # else: # merge_td2.append(td.text) # last_text = td.text # merge_col_cnt_list.append([len(merge_td2), len(merge_td1)]) # 判断 new_c_list = [c_list[0]] # print('col_cnt_list', col_cnt_list) for i in range(len(col_cnt_list) - 1): if col_cnt_list[i][1] != col_cnt_list[i + 1][0]: # and merge_col_cnt_list[i][1] != merge_col_cnt_list[i + 1][0]: connect_pages_list2.append(new_c_list) new_c_list = [c_list[i + 1]] else: new_c_list.append(c_list[i + 1]) if new_c_list: connect_pages_list2.append(new_c_list) if show: print('connect_pages_list2', connect_pages_list2) # 判断连接的两个表格是否需要补单元格内容 for c_list in connect_pages_list2: for i in range(len(c_list)-1): page_index1 = c_list[i][0] page_index2 = c_list[i+1][0] html2 = html_list[page_index2] soup2 = soup_list[page_index2] rule1 = connect_rule_dict.get(page_index1)[1] rule2 = connect_rule_dict.get(page_index2)[0] # print('rule1', rule1) # if rule2[0]: table1 = BeautifulSoup(rule1[1], 'lxml').findAll('table')[0] table2 = BeautifulSoup(rule2[1], 'lxml').findAll('table')[0] add_td_value = [] # 获取最后一行td for tr in table1.findAll('tr')[::-1]: temp_list = [] for td in tr.findAll('td'): temp_list.append(td.get_text()) add_td_value = temp_list break # print('add_td_value', add_td_value) tr_index = 0 for tr in table2.findAll('tr'): temp_list = [] for td in tr.findAll('td'): if len(td.get_text()) < 1: temp_list.append(0) else: temp_list.append(1) # print('temp_list', temp_list) if temp_list and add_td_value and len(temp_list) == len(add_td_value) \ and 1 in temp_list and temp_list[0] != 1 \ and 1 not in temp_list[:temp_list.index(1)]: for j in range(len(temp_list)): if temp_list[j] == 0: tr.findAll('td')[j].string = add_td_value[j] # else: # # 只有第一行,且列数大于3,且只有一列有值情况下,上下两行文本合并 # if tr_index == 0 and len(temp_list) >= 3 and temp_list.count(1) == 1: # tr.findAll('td')[j].string += add_td_value[j] # print('tr.findAll(td)[0]', tr.findAll('td')[0]) tr_index += 1 soup2.findAll('table')[0].replace_with(table2) html_list[page_index2] = str(soup2) # 符合连接条件的拼接表格 new_html_list = [] for c_list in connect_pages_list2: if len(c_list) == 1: new_html_list.append(html_list[c_list[0][0]]) continue new_html = '' for c in c_list: match = re.finditer('', new_html) last_table_index = None for m in match: last_table_index = m.span()[0] new_html += html_list[c[0]] # print('html_list[c[0]]', html_list[c[0]]) if last_table_index is None: continue match = re.finditer('', new_html[last_table_index:]) first_table_index = None for m in match: first_table_index = last_table_index + m.span()[1] break if first_table_index is None: continue # print('re', re.findall('
.*?', new_html[last_table_index:first_table_index])) # 非贪婪匹配 new_html_sub = re.sub('
.*?', '', new_html[last_table_index:first_table_index]) new_html = new_html[:last_table_index] + new_html_sub + new_html[first_table_index:] # print('new_html', new_html) # new_html = new_html[:-5] # ([-/第页0-9]|
|
)* # 非贪婪匹配 # match = re.finditer('
#@#@#
.*?', new_html) # for m in match: # if '#@#@#' in m.group(): # # new_html = re.sub('
.*#@#@#.*?', # '', # new_html) # print('new_html', new_html) soup = BeautifulSoup(new_html, 'lxml') trs = soup.findAll('tr') decompose_trs = [] for i in range(len(trs)): if trs[i].get_text() == '#@#@#': td1 = trs[i - 1].findAll('td') td2 = trs[i + 1].findAll('td') if td2[0].get_text() == '': # 解决连续多页是一行表格,该行会被去掉问题 find_father = False for father, son in decompose_trs: # print('son', son) # print('td1', trs[i - 1]) if father != '' and son == trs[i - 1]: td_father = father.findAll('td') for j in range(len(td_father)): # print('td_father[j].string3', td_father[j].string) td_father[j].string = td_father[j].get_text() + td2[j].get_text() # print('td_father[j].string4', td_father[j].string) find_father = True decompose_trs.append([father, trs[i + 1]]) break if not find_father: for j in range(len(td1)): # print('td1[j].string1', td1[j].string) td1[j].string = td1[j].get_text() + td2[j].get_text() # print('td1[j].string2', td1[j].string) decompose_trs.append([trs[i - 1], trs[i + 1]]) # print('trs[i + 1]', trs[i + 1]) # trs[i + 1].decompose() # print('trs[i-1]', trs[i-1]) # trs[i].decompose() decompose_trs.append(['', trs[i]]) # print('decompose_trs', decompose_trs) # for father, son in decompose_trs: # print('father', father) # print('son', son) # print('len(decompose_trs)', len(decompose_trs)) for father, son in decompose_trs: for tr in trs: if tr == son: tr.decompose() break new_html = str(soup) new_html_list.append(new_html) html_str = '' for h in new_html_list: html_str += h return [html_str] def get_html(self): if self._doc.error_code is not None: return self._doc.error_code self.convert() if self._doc.error_code is not None: return self._doc.error_code html = self._doc.get_html(return_list=True) # 表格连接 try: html = self.connect_table(html) except: traceback.print_exc() return [-12] return html def delete_water_mark(self, lt_text_list, page_bbox, times=5): # 删除过多重复字句,为水印 duplicate_dict = {} for _obj in lt_text_list: t = _obj.get_text() if t in duplicate_dict.keys(): duplicate_dict[t][0] += 1 duplicate_dict[t][1].append(_obj) else: duplicate_dict[t] = [1, [_obj]] delete_text = [] for t in duplicate_dict.keys(): if duplicate_dict[t][0] >= times: obj_list = duplicate_dict[t][1] obj_list.sort(key=lambda x: x.bbox[3]) obj_distance_h = abs(obj_list[-1].bbox[3] - obj_list[0].bbox[1]) obj_list.sort(key=lambda x: x.bbox[2]) obj_distance_w = abs(obj_list[-1].bbox[2] - obj_list[0].bbox[0]) if obj_distance_h >= abs(page_bbox[1] - page_bbox[3]) * 0.7 \ and obj_distance_w >= abs(page_bbox[0] - page_bbox[2]) * 0.7: delete_text.append(t) temp_text_list = [] for _obj in lt_text_list: t = _obj.get_text() if t not in delete_text: temp_text_list.append(_obj) return temp_text_list def resize_image(self, img_path, max_size=2000): _img = cv2.imread(img_path) if _img.shape[0] <= max_size or _img.shape[1] <= max_size: return else: resize_axis = 0 if _img.shape[0] >= _img.shape[1] else 1 ratio = max_size / _img.shape[resize_axis] new_shape = [0, 0] new_shape[resize_axis] = max_size new_shape[1 - resize_axis] = int(_img.shape[1 - resize_axis] * ratio) _img = cv2.resize(_img, (new_shape[1], new_shape[0])) cv2.imwrite(img_path, _img) def get_single_pdf(self, path, page_no): start_time = time.time() try: pdf_origin = copy.deepcopy(self.doc_pypdf2) pdf_new = copy.deepcopy(self.doc_pypdf2_new) pdf_new.addPage(pdf_origin.getPage(page_no)) path_new = path.split(".")[0] + "_split.pdf" with open(path_new, "wb") as ff: pdf_new.write(ff) log("page_no: " + str(page_no) + " get_single_pdf cost: " + str(time.time()-start_time)) return path_new except PyPDF2.utils.PdfReadError as e: return [-3] except Exception as e: log("page_no: " + str(page_no) + " get_single_pdf error!") return [-3] def get_text_font(): def flags_decomposer(flags): """Make font flags human readable.""" l = [] if flags & 2 ** 0: l.append("superscript") if flags & 2 ** 1: l.append("italic") if flags & 2 ** 2: l.append("serifed") else: l.append("sans") if flags & 2 ** 3: l.append("monospaced") else: l.append("proportional") if flags & 2 ** 4: l.append("bold") return ", ".join(l) def get_underlined_textLines(page): """ 获取某页pdf上的所有下划线文本信息 :param page: fitz中的一页 :return: list of tuples,每个tuple都是一个完整的下划线覆盖的整体:[(下划线句, 所在blk_no, 所在line_no), ...] """ paths = page.get_drawings() # get drawings on the current page # 获取该页内所有的height很小的bbox。因为下划线其实大多是这种矩形 # subselect things we may regard as lines lines = [] for p in paths: for item in p["items"]: if item[0] == "l": # an actual line p1, p2 = item[1:] if p1.y == p2.y: lines.append((p1, p2)) elif item[0] == "re": # a rectangle: check if height is small r = item[1] if r.width > r.height and r.height <= 2: lines.append((r.tl, r.tr)) # take top left / right points # 获取该页的`max_lineheight`,用于下面比较距离使用 blocks = page.get_text("dict", flags=11)["blocks"] max_lineheight = 0 for b in blocks: for l in b["lines"]: bbox = fitz.Rect(l["bbox"]) if bbox.height > max_lineheight: max_lineheight = bbox.height underlined_res = [] # 开始对下划线内容进行查询 # make a list of words words = page.get_text("words") # if underlined, the bottom left / right of a word # should not be too far away from left / right end of some line: for wdx, w in enumerate(words): # w[4] is the actual word string r = fitz.Rect(w[:4]) # first 4 items are the word bbox for p1, p2 in lines: # check distances for start / end points if abs(r.bl - p1) <= max_lineheight: # 当前word的左下满足下划线左下 if abs(r.br - p2) <= max_lineheight: # 当前word的右下满足下划线右下(单个词,无空格) print(f"Word '{w[4]}' is underlined! Its block-line number is {w[-3], w[-2]}") underlined_res.append((w[4], w[-3], w[-2])) # 分别是(下划线词,所在blk_no,所在line_no) break # don't check more lines else: # 继续寻找同line右侧的有缘人,因为有些下划线覆盖的词包含多个词,多个词之间有空格 curr_line_num = w[-2] # line nunmber for right_wdx in range(wdx + 1, len(words), 1): _next_w = words[right_wdx] if _next_w[-2] != curr_line_num: # 当前遍历到的右侧word已经不是当前行的了(跨行是不行的) break _r_right = fitz.Rect(_next_w[:4]) # 获取当前同行右侧某word的方框4点 if abs(_r_right.br - p2) <= max_lineheight: # 用此word右下点和p2(目标下划线右上点)算距离,距离要小于max_lineheight print( f"Word '{' '.join([_one_word[4] for _one_word in words[wdx:right_wdx + 1]])}' is underlined! " + f"Its block-line number is {w[-3], w[-2]}") underlined_res.append( (' '.join([_one_word[4] for _one_word in words[wdx:right_wdx + 1]]), w[-3], w[-2]) ) # 分别是(下划线词,所在blk_no,所在line_no) break # don't check more lines return underlined_res _p = r'C:\Users\Administrator\Desktop\test_pdf\error2-2.pdf' doc_pymupdf = read_pymupdf(_p) page = doc_pymupdf[0] blocks = page.get_text("dict", flags=11)["blocks"] for b in blocks: # iterate through the text blocks for l in b["lines"]: # iterate through the text lines for s in l["spans"]: # iterate through the text spans print("") font_properties = "Font: '%s' (%s), size %g, color #%06x" % ( s["font"], # font name flags_decomposer(s["flags"]), # readable font flags s["size"], # font size s["color"], # font color ) print(s) print("Text: '%s'" % s["text"]) # simple print of text print(font_properties) get_underlined_textLines(page) # 以下为现成pdf单页解析接口 class ParseSentence: def __init__(self, bbox, fontname, fontsize, _text, _title, title_text, _pattern, title_degree, is_outline, outline_location, page_no): (x0, y0, x1, y1) = bbox self.x0 = x0 self.y0 = y0 self.x1 = x1 self.y1 = y1 self.bbox = bbox self.fontname = fontname self.fontsize = fontsize self.text = _text self.title = _title self.title_text = title_text self.groups = _pattern self.title_degree = title_degree self.is_outline = is_outline self.outline_location = outline_location self.page_no = page_no def __repr__(self): return "%s,%s,%s,%d,%s" % (self.text, self.title, self.is_outline, self.outline_location, str(self.bbox)) class ParseUtils: @staticmethod def getFontinfo(_page): for _obj in _page._objs: if isinstance(_obj, (LTTextBoxHorizontal, LTTextBoxVertical)): for textline in _obj._objs: done = False for lchar in textline._objs: if isinstance(lchar, (LTChar)): _obj.fontname = lchar.fontname _obj.fontsize = lchar.size done = True break if done: break @staticmethod def recognize_sentences(list_textbox, filter_objs, page_bbox, page_no, remove_space=True, sourceP_LB=True): list_textbox.sort(key=lambda x: x.bbox[0]) list_textbox.sort(key=lambda x: x.bbox[3], reverse=sourceP_LB) cluster_textbox = [] for _textbox in list_textbox: if _textbox in filter_objs: continue _find = False for _ct in cluster_textbox: if abs(_ct["y"] - _textbox.bbox[1]) < 5: _find = True _ct["textbox"].append(_textbox) if not _find: cluster_textbox.append({"y": _textbox.bbox[1], "textbox": [_textbox]}) cluster_textbox.sort(key=lambda x: x["y"], reverse=sourceP_LB) list_sentences = [] for _line in cluster_textbox: _textboxs = _line["textbox"] _textboxs.sort(key=lambda x: x.bbox[0]) _linetext = _textboxs[0].get_text() for _i in range(1, len(_textboxs)): if abs(_textboxs[_i].bbox[0] - _textboxs[_i - 1].bbox[2]) > 60: if _linetext and _linetext[-1] not in (",", ",", "。", ".", "、", ";"): _linetext += "=,=" _linetext += _textboxs[_i].get_text() _linetext = re.sub("[\s\r\n]", "", _linetext) _bbox = (_textboxs[0].bbox[0], _textboxs[0].bbox[1], _textboxs[-1].bbox[2], _textboxs[-1].bbox[3]) _title = None _pattern_groups = None title_text = "" if not _title: _groups = ParseUtils.find_title_by_pattern(_textboxs[0].get_text()) if _groups: _title = _groups[0][0] title_text = _groups[0][1] _pattern_groups = _groups if not _title: _groups = ParseUtils.find_title_by_pattern(_linetext) if _groups: _title = _groups[0][0] title_text = _groups[0][1] _pattern_groups = _groups if not _title: _title = ParseUtils.rec_incenter(_bbox, page_bbox) title_degree = 2 if not _title: _linetext = _linetext.replace("=,=", ",") else: _linetext = _linetext.replace("=,=", "") title_degree = int(_title.split("_")[1]) # 页码 if ParseUtils.rec_incenter(_bbox, page_bbox) and re.search("^\d+$", _linetext) is not None: continue if _linetext == "" or re.search("^,+$", _linetext) is not None: continue is_outline = False outline_location = -1 _search = re.search("(?P.+?)\.{5,}(?P\d+)$", _linetext) if _search is not None: is_outline = True _linetext = _search.group("text") outline_location = int(_search.group("nums")) list_sentences.append( ParseSentence(_bbox, _textboxs[-1].__dict__.get("fontname"), _textboxs[-1].__dict__.get("fontsize"), _linetext, _title, title_text, _pattern_groups, title_degree, is_outline, outline_location, page_no)) # for _sen in list_sentences: # print(_sen.__dict__) return list_sentences @staticmethod def find_title_by_pattern(_text, _pattern="(?P(?P^第?)(?P[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P[、章]))|" \ "(?P^(?P[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+))|" \ "(?P^(?P第?)(?P[一二三四五六七八九十]+)(?P[节]))|" \ "(?P^(?P\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P\d{1,2})(?P[\..、\s\-]))|" \ "(?P^(?P\d{1,2}[\..、\s\-]\d{1,2}[\..、\s\-])(?P\d{1,2})(?P[\..、\s\-]))|" \ "(?P^(?P\d{1,2}[\..、\s\-])(?P\d{1,2})(?P[\..、\s\-]))|" \ "(?P^(?P\d{1,2})(?P[\..、\s\-]))|" \ "(?P^(?P(?)(?P\d{1,2})(?P)))|" \ "(?P^(?P(?)(?P[a-zA-Z]+)(?P)))|" "(?P^(?P(?)(?P[一二三四五六七八九十ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]+)(?P)))|" \ ): _se = re.search(_pattern, _text) groups = [] if _se is not None: _gd = _se.groupdict() for k, v in _gd.items(): if v is not None: groups.append((k, v)) if len(groups): groups.sort(key=lambda x: x[0]) return groups return None @staticmethod def rec_incenter(o_bbox, p_bbox): p_width = p_bbox[2] - p_bbox[0] l_space = (o_bbox[0] - p_bbox[0]) / p_width r_space = (p_bbox[2] - o_bbox[2]) / p_width if abs((l_space - r_space)) < 0.1 and l_space > 0.2: return "title_2" @staticmethod def is_first_title(_title): if _title is None: return False if re.search("^\d+$", _title) is not None: if int(_title) == 1: return True return False if re.search("^[一二三四五六七八九十百]+$", _title) is not None: if _title == "一": return True return False if re.search("^[a-z]+$", _title) is not None: if _title == "a": return True return False if re.search("^[A-Z]+$", _title) is not None: if _title == "A": return True return False if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$", _title) is not None: if _title == "Ⅰ": return True return False return False @staticmethod def get_next_title(_title): if re.search("^\d+$", _title) is not None: return str(int(_title) + 1) if re.search("^[一二三四五六七八九十百]+$", _title) is not None: _next_title = ParseUtils.make_increase(['一', '二', '三', '四', '五', '六', '七', '八', '九', '十'], re.sub("[十百]", '', _title)) _next_title = list(_next_title) _next_title.reverse() if _next_title[-1] != "十": if len(_next_title) >= 2: _next_title.insert(-1, '十') if len(_next_title) >= 4: _next_title.insert(-3, '百') if _title[0] == "十": if _next_title == "十": _next_title = ["二", "十"] _next_title.insert(0, "十") _next_title = "".join(_next_title) return _next_title if re.search("^[a-z]+$", _title) is not None: _next_title = ParseUtils.make_increase([chr(i + ord('a')) for i in range(26)], _title) _next_title = list(_next_title) _next_title.reverse() return "".join(_next_title) if re.search("^[A-Z]+$", _title) is not None: _next_title = ParseUtils.make_increase([chr(i + ord('A')) for i in range(26)], _title) _next_title = list(_next_title) _next_title.reverse() return "".join(_next_title) if re.search("^[ⅠⅡⅢⅣⅤⅥⅦⅧⅨⅩⅪⅫ]$", _title) is not None: _sort = ["Ⅰ", "Ⅱ", "Ⅲ", "Ⅳ", "Ⅴ", "Ⅵ", "Ⅶ", "Ⅷ", "Ⅸ", "Ⅹ", "Ⅺ", "Ⅻ"] _index = _sort.index(_title) if _index < len(_sort) - 1: return _sort[_index + 1] return None @staticmethod def make_increase(_sort, _title, _add=1): if len(_title) == 0 and _add == 0: return "" if len(_title) == 0 and _add == 1: return _sort[0] _index = _sort.index(_title[-1]) next_index = (_index + _add) % len(_sort) next_chr = _sort[next_index] if _index == len(_sort) - 1: _add = 1 else: _add = 0 return next_chr + ParseUtils.make_increase(_sort, _title[:-1], _add) @staticmethod def rec_serial(_text, o_bbox, p_bbox, fontname, _pattern="(?P^[一二三四五六七八九十]+[、])|" \ "(?P^\d+[\.、\s])|" \ "(?P^\d+\.\d+[\.、\s])|" \ "(?P^\d+\.\d+\.\d+[\.、\s])|" \ "(?P^\d+\.\d+\.\d+\.\d+[\.、\s])"): # todo :recog the serial of the sentence _se = re.search(_pattern, _text) if _se is not None: _gd = _se.groupdict() for k, v in _gd.items(): if v is not None: return k return None if __name__ == '__main__': PDFConvert(r"C:/Users/Administrator/Downloads/1651896704621.pdf", "C:/Users/Administrator/Downloads/1").get_html()
#@#@#