|
@@ -52,6 +52,12 @@ def predict(doc_id,text,title="",page_time="",**kwargs):
|
|
|
cost_time["preprocess"] = round(time.time()-start_time,2)
|
|
|
cost_time.update(_cost_time)
|
|
|
|
|
|
+ for list_entity in list_entitys:
|
|
|
+ for _entity in list_entity:
|
|
|
+ log("type:%s,text:%s,label:%s,values:%s,sentence:%s,begin_index:%s,end_index:%s"%
|
|
|
+ (str(_entity.entity_type),str(_entity.entity_text),str(_entity.label),str(_entity.values),str(_entity.sentence_index),
|
|
|
+ str(_entity.begin_index),str(_entity.end_index)))
|
|
|
+
|
|
|
#依赖句子顺序
|
|
|
start_time = time.time() # 公告类型/生命周期提取
|
|
|
list_channel_dic = predictor.getPredictor("channel").predict(title=title, content=list_sentences[0])
|
|
@@ -142,14 +148,14 @@ def predict(doc_id,text,title="",page_time="",**kwargs):
|
|
|
data_res["cost_time"] = cost_time
|
|
|
data_res["success"] = True
|
|
|
|
|
|
- # for _article in list_articles:
|
|
|
- # log(_article.content)
|
|
|
- #
|
|
|
- # for list_entity in list_entitys:
|
|
|
- # for _entity in list_entity:
|
|
|
- # log("type:%s,text:%s,label:%s,values:%s,sentence:%s,begin_index:%s,end_index:%s"%
|
|
|
- # (str(_entity.entity_type),str(_entity.entity_text),str(_entity.label),str(_entity.values),str(_entity.sentence_index),
|
|
|
- # str(_entity.begin_index),str(_entity.end_index)))
|
|
|
+ for _article in list_articles:
|
|
|
+ log(_article.content)
|
|
|
+
|
|
|
+ for list_entity in list_entitys:
|
|
|
+ for _entity in list_entity:
|
|
|
+ log("type:%s,text:%s,label:%s,values:%s,sentence:%s,begin_index:%s,end_index:%s"%
|
|
|
+ (str(_entity.entity_type),str(_entity.entity_text),str(_entity.label),str(_entity.values),str(_entity.sentence_index),
|
|
|
+ str(_entity.begin_index),str(_entity.end_index)))
|
|
|
|
|
|
return json.dumps(data_res,cls=MyEncoder,sort_keys=True,indent=4,ensure_ascii=False)
|
|
|
|