Sfoglia il codice sorgente

解决keras多进程部署问题;表头识别部署

fangjiasheng 2 anni fa
parent
commit
3040d86818

+ 12 - 6
BiddingKG/dl/interface/Preprocessing.py

@@ -8,7 +8,7 @@ import time
 import codecs
 
 from BiddingKG.dl.ratio.re_ratio import extract_ratio
-# from BiddingKG.dl.table_head.predict import predict
+from BiddingKG.dl.table_head.predict import predict
 
 sys.setrecursionlimit(1000000)
 sys.path.append(os.path.abspath("../.."))
@@ -117,7 +117,9 @@ def tableToText(soup):
             if len(tds)==0:
                 tr_line.append([re.sub('\xa0','',segment(tr,final=False)),0]) # 2021/12/21 修复部分表格没有td 造成数据丢失
             for td in tds:
+                # print("td", td)
                 tr_line.append([re.sub('\xa0','',segment(td,final=False)),0])
+                # print("segment td", segment(td,final=False))
                 #tr_line.append([td.get_text(),0])
             inner_table.append(tr_line)
         return inner_table                          
@@ -422,7 +424,11 @@ def tableToText(soup):
     def set_head_model(inner_table):
         for i in range(len(inner_table)):
             for j in range(len(inner_table[i])):
-                inner_table[i][j] = inner_table[i][j][0]
+                # 删掉单格前后符号,以免影响表头预测
+                col = inner_table[i][j][0]
+                col = re.sub("^[^\u4e00-\u9fa5a-zA-Z0-9]+", "", col)
+                col = re.sub("[^\u4e00-\u9fa5a-zA-Z0-9]+$", "", col)
+                inner_table[i][j] = col
 
         # 模型预测表头
         predict_list = predict(inner_table)
@@ -990,7 +996,7 @@ def tableToText(soup):
                     inner_table[h][w][0] = ""
     
     def trunTable(tbody,in_attachment):
-        # print(tbody.find('tbody'))
+        # print("tbody", tbody.find('tbody'))
         # 附件中的表格,排除异常错乱的表格
         if in_attachment:
             if tbody.name=='table':
@@ -1011,10 +1017,10 @@ def tableToText(soup):
         if len(inner_table)>0 and len(inner_table[0])>0:
             #inner_table,head_list = setHead_withRule(inner_table,pat_head,pat_value,3)
             #inner_table,head_list = setHead_inline(inner_table)
-            inner_table, head_list = setHead_initem(inner_table,pat_head)
-            # inner_table, head_list = set_head_model(inner_table)
+            # inner_table, head_list = setHead_initem(inner_table,pat_head)
+            inner_table, head_list = set_head_model(inner_table)
             # inner_table,head_list = setHead_incontext(inner_table,pat_head)
-            # print(inner_table)
+            # print("table_head", inner_table)
             # for begin in range(len(head_list[:-1])):
             #     for item in inner_table[head_list[begin]:head_list[begin+1]]:
             #         print(item)

+ 2 - 3
BiddingKG/dl/table_head/models/model.py

@@ -1,10 +1,9 @@
 import sys
 import os
 import numpy as np
+sys.path.append(os.path.abspath(os.path.dirname(__file__)))
 from keras.layers import Lambda, Dense, Reshape, Bidirectional, LSTM, Conv2D, BatchNormalization, LeakyReLU, Masking
 from keras.preprocessing.sequence import pad_sequences
-sys.path.append(os.path.dirname(__file__))
-
 from models.layer_utils import BatchReshape1, BatchReshape2, MyPadding, MySplit, BatchReshape3, \
     BatchReshape4, BatchReshape5, BatchReshape6
 from keras import layers, models, Sequential
@@ -70,7 +69,7 @@ def model_1(input_shape, output_shape):
     model = models.Model(inputs=[input_1, input_2, input_3, input_4, input_5, input_6],
                          outputs=output)
 
-    model.summary()
+    # model.summary()
     return model
 
 

+ 3 - 2
BiddingKG/dl/table_head/post_process.py

@@ -1,7 +1,8 @@
 
 
-def table_post_process(table_text_list, predict_result, threshold=0.5):
-    predict_result = predict_result.tolist()
+def table_post_process(table_text_list, predict_result, threshold=0.5, is_list=False):
+    if not is_list:
+        predict_result = predict_result.tolist()
     predict_list = []
     for i in range(0, len(predict_result)):
         predict = predict_result[i][0]

+ 15 - 17
BiddingKG/dl/table_head/pre_process.py

@@ -440,24 +440,22 @@ def my_data_loader(data_list, data_label_list, batch_size, is_train=True):
                   {'output': Y}
 
     else:
-        while True:
-            new_data_list = []
-            for j in range(batch_size):
-                if i >= data_num:
-                    i = 0
-
-                # 中文字符映射为Embedding
-                data = data_list[i]
-                data = embedding_word(data, output_shape)
-                if data.shape == output_shape:
-                    new_data_list.append(data)
-                i += 1
+        new_data_list = []
+        for j in range(batch_size):
+            if i >= data_num:
+                i = 0
+            # 中文字符映射为Embedding
+            data = data_list[i]
+            data = embedding_word(data, output_shape)
+            if data.shape == output_shape:
+                new_data_list.append(data)
+            i += 1
 
-            new_data_list = np.array(new_data_list)
-            X = new_data_list
-            X = np.transpose(X, (1, 0, 2, 3))
-            yield {'input_1': X[0], 'input_2': X[1], 'input_3': X[2],
-                   'input_4': X[3], 'input_5': X[4], 'input_6': X[5], }
+        new_data_list = np.array(new_data_list)
+        X = new_data_list
+        X = np.transpose(X, (1, 0, 2, 3))
+        yield {'input_1': X[0], 'input_2': X[1], 'input_3': X[2],
+               'input_4': X[3], 'input_5': X[4], 'input_6': X[5], }
 
 
 def my_data_loader_2(table_list, table_label_list, batch_size, is_train=True):

File diff suppressed because it is too large
+ 231 - 10
BiddingKG/dl/table_head/predict.py


Some files were not shown because too many files changed in this diff