Selaa lähdekoodia

cost_time格式化

luojiehua 3 vuotta sitten
vanhempi
commit
1bbdf1cc6f
2 muutettua tiedostoa jossa 14 lisäystä ja 14 poistoa
  1. 2 2
      BiddingKG/dl/interface/Preprocessing.py
  2. 12 12
      BiddingKG/dl/interface/extract.py

+ 2 - 2
BiddingKG/dl/interface/Preprocessing.py

@@ -1506,7 +1506,7 @@ def get_preprocessed_article(articles,cost_time = dict(),useselffool=True):
 
         if key_preprocess not in cost_time:
             cost_time[key_preprocess] = 0
-        cost_time[key_preprocess] += time.time()-start_time
+        cost_time[key_preprocess] += round(time.time()-start_time,2)
 
         #article_processed = article[1]
         _article = Article(doc_id,article_processed,sourceContent,_send_doc_id,_title,
@@ -1633,7 +1633,7 @@ def get_preprocessed_entitys(list_sentences,useselffool=True,cost_time=dict()):
         ner_entitys_all = getNers(sentences,useselffool=useselffool)
         if key_nerToken not in cost_time:
             cost_time[key_nerToken] = 0
-        cost_time[key_nerToken] += time.time()-start_time
+        cost_time[key_nerToken] += round(time.time()-start_time,2)
 
 
         for sentence_index in range(len(list_sentence)):

+ 12 - 12
BiddingKG/dl/interface/extract.py

@@ -41,7 +41,7 @@ class MyEncoder(json.JSONEncoder):
             return obj
         return json.JSONEncoder.default(self, obj)
 
-def predict(doc_id,text,title="",page_time=""):
+def predict(doc_id,text,title="",page_time="",**kwargs):
 
     cost_time = dict()
 
@@ -49,58 +49,58 @@ def predict(doc_id,text,title="",page_time=""):
     log("start process doc %s"%(str(doc_id)))
     list_articles,list_sentences,list_entitys,_cost_time = Preprocessing.get_preprocessed([[doc_id,text,"","",title,page_time]],useselffool=True)
     log("get preprocessed done of doc_id%s"%(doc_id))
-    cost_time["preprocess"] = time.time()-start_time
+    cost_time["preprocess"] = round(time.time()-start_time,2)
     cost_time.update(_cost_time)
 
     start_time = time.time()
     codeName = predictor.getPredictor("codeName").predict(list_sentences,MAX_AREA=5000,list_entitys=list_entitys)
     log("get codename done of doc_id%s"%(doc_id))
-    cost_time["codename"] = time.time()-start_time
+    cost_time["codename"] = round(time.time()-start_time,2)
 
     start_time = time.time()
     predictor.getPredictor("prem").predict(list_sentences,list_entitys)
     log("get prem done of doc_id%s"%(doc_id))
-    cost_time["prem"] = time.time()-start_time
+    cost_time["prem"] = round(time.time()-start_time,2)
 
     start_time = time.time()
     predictor.getPredictor("product").predict(list_sentences,list_entitys)
     log("get product done of doc_id%s"%(doc_id))
-    cost_time["product"] = time.time()-start_time
+    cost_time["product"] = round(time.time()-start_time,2)
 
     start_time = time.time()
     product_attrs = predictor.getPredictor("product_attrs").predict(doc_id, text)
     log("get product attributes done of doc_id%s"%(doc_id))
-    cost_time["product_attrs"] = time.time()-start_time
+    cost_time["product_attrs"] = round(time.time()-start_time,2)
 
     start_time = time.time()
     predictor.getPredictor("roleRule").predict(list_articles,list_sentences, list_entitys,codeName)
-    cost_time["rule"] = time.time()-start_time
+    cost_time["rule"] = round(time.time()-start_time,2)
 
     start_time = time.time()
     predictor.getPredictor("epc").predict(list_sentences,list_entitys)
     log("get epc done of doc_id%s"%(doc_id))
-    cost_time["person"] = time.time()-start_time
+    cost_time["person"] = round(time.time()-start_time,2)
 
     start_time = time.time()
     predictor.getPredictor("time").predict(list_sentences, list_entitys)
     log("get time done of doc_id%s"%(doc_id))
-    cost_time["time"] = time.time()-start_time
+    cost_time["time"] = round(time.time()-start_time,2)
 
     #依赖句子顺序
     start_time = time.time()
     entityLink.link_entitys(list_entitys)
     prem = getAttributes.getPREMs(list_sentences,list_entitys,list_articles)
     log("get attributes done of doc_id%s"%(doc_id))
-    cost_time["attrs"] = time.time()-start_time
+    cost_time["attrs"] = round(time.time()-start_time,2)
 
     #依赖句子顺序
     start_time = time.time()
     list_channel_dic = predictor.getPredictor("channel").predict(title=title, content=list_sentences[0])
-    cost_time["channel"] = time.time()-start_time
+    cost_time["channel"] = round(time.time()-start_time,2)
 
     start_time = time.time()
     list_punish_dic = predictor.getPredictor("punish").get_punish_extracts(list_articles,list_sentences, list_entitys)
-    cost_time["punish"] = time.time()-start_time
+    cost_time["punish"] = round(time.time()-start_time,2)
 
     #print(prem)
     # data_res = Preprocessing.union_result(Preprocessing.union_result(codeName, prem),list_punish_dic)[0]