123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612 |
- #coding:UTF8
- from odps.udf import annotate
- from odps.udf import BaseUDAF
- from odps.udf import BaseUDTF
- def getSet(list_dict,key):
- _set = set()
- for item in list_dict:
- if key in item:
- if item[key]!='' and item[key] is not None:
- if re.search("^[\d\.]+$",item[key]) is not None:
- _set.add(str(float(item[key])))
- else:
- _set.add(str(item[key]))
- return _set
- def split_with_time(list_dict,sort_key,timedelta=86400*120):
- if len(list_dict)>0:
- if sort_key in list_dict[0]:
- list_dict.sort(key=lambda x:x[sort_key])
- list_group = []
- _begin = 0
- for i in range(len(list_dict)-1):
- if abs(list_dict[i][sort_key]-list_dict[i+1][sort_key])<timedelta:
- continue
- else:
- _group = []
- for j in range(_begin,i+1):
- _group.append(list_dict[j])
- if len(_group)>1:
- list_group.append(_group)
- _begin = i + 1
- if len(list_dict)>1:
- _group = []
- for j in range(_begin,len(list_dict)):
- _group.append(list_dict[j])
- if len(_group)>1:
- list_group.append(_group)
- return list_group
- return [list_dict]
- @annotate('bigint,bigint,string,string,string,string,string,string,bigint->string')
- class f_merge_rule_limit_num_contain_greater(BaseUDAF):
- '''
- 项目编号、中标单位、len(项目编号)>7、中标单位<> ""、合并后非空招标单位数<2、合并后同公告类型非空金额相同
- '''
- def __init__(self):
- import logging
- import json,re
- global json,logging,re
- logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- def new_buffer(self):
- return [list()]
- def iterate(self, buffer,docid,page_time_stamp,set_limit_column1,set_limit_column2,set_limit_column3,set_limit_column4,contain_column,greater_column,MAX_NUM):
- buffer[0].append({"docid":docid,"page_time_stamp":page_time_stamp,"set_limit_column1":set_limit_column1,
- "set_limit_column2":set_limit_column2,"set_limit_column3":set_limit_column3,"set_limit_column4":set_limit_column4,
- "contain_column":contain_column,"greater_column":greater_column,"MAX_NUM":MAX_NUM})
- def merge(self, buffer, pbuffer):
- buffer[0].extend(pbuffer[0])
- def terminate(self, buffer):
- MAX_NUM = 5
- if len(buffer[0])>0:
- MAX_NUM = buffer[0][0]["MAX_NUM"]
- list_split = split_with_time(buffer[0],"page_time_stamp")
- list_group = []
- for _split in list_split:
- flag = True
- keys = ["set_limit_column1","set_limit_column2","set_limit_column3","set_limit_column4"]
- dict_set = {}
- for _key in keys:
- dict_set[_key] = set()
- if len(_split)>MAX_NUM:
- flag = False
- else:
- for _key in keys:
- logging.info(_key+str(getSet(_split,_key)))
- if len(getSet(_split,_key))>1:
- flag = False
- break
- MAX_CONTAIN_COLUMN = None
- #判断组内每条公告是否包含
- if flag:
- for _d in _split:
- contain_column = _d["contain_column"]
- if contain_column is not None and contain_column !="":
- if MAX_CONTAIN_COLUMN is None:
- MAX_CONTAIN_COLUMN = contain_column
- else:
- if len(MAX_CONTAIN_COLUMN)<len(contain_column):
- if contain_column.find(MAX_CONTAIN_COLUMN)==-1:
- flag = False
- break
- MAX_CONTAIN_COLUMN = contain_column
- else:
- if MAX_CONTAIN_COLUMN.find(contain_column)==-1:
- flag = False
- break
- if len(getSet(_split,"greater_column"))==1:
- flag = False
- break
- if flag:
- _set_docid = set()
- for item in _split:
- _set_docid.add(item["docid"])
- if len(_set_docid)>1:
- list_group.append(list(_set_docid))
- return json.dumps(list_group)
- def getDiffIndex(list_dict,key):
- _set = set()
- for _i in range(len(list_dict)):
- item = list_dict[_i]
- if key in item:
- if item[key]!='' and item[key] is not None:
- if re.search("^\d[\d\.]*$",item[key]) is not None:
- _set.add(str(float(item[key])))
- else:
- _set.add(str(item[key]))
- if len(_set)>1:
- return _i
- return len(list_dict)
- @annotate('bigint,bigint,string,string,string,string,string,string,string,bigint->string')
- class f_remege_limit_num_contain(BaseUDAF):
- '''
- 项目编号、中标单位、len(项目编号)>7、中标单位<> ""、合并后非空招标单位数<2、合并后同公告类型非空金额相同
- '''
- def __init__(self):
- import logging
- import json,re
- global json,logging,re
- logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- def new_buffer(self):
- return [list()]
- def iterate(self, buffer,docid,page_time_stamp,set_limit_column1,set_limit_column2,set_limit_column3,set_limit_column4,contain_column1,contain_column2,notLike_column,confidence):
- buffer[0].append({"docid":docid,"page_time_stamp":page_time_stamp,"set_limit_column1":set_limit_column1,
- "set_limit_column2":set_limit_column2,"set_limit_column3":set_limit_column3,"set_limit_column4":set_limit_column4,
- "contain_column1":contain_column1,"contain_column2":contain_column2,"notLike_column":notLike_column,"confidence":confidence})
- def merge(self, buffer, pbuffer):
- buffer[0].extend(pbuffer[0])
- def getNotLikeSet(self,_dict,column_name):
- column_value = _dict.get(column_name,None)
- _set = set()
- if column_value is not None:
- for _i in range(1,len(column_value)):
- _set.add(column_value[_i-1:_i+1])
- _dict["notLike_set"] = _set
- def getSimilarity(self,_set1,_set2):
- _sum = max([1,min([len(_set1),len(_set2)])])
- return len(_set1&_set2)/_sum
- def terminate(self, buffer):
- list_group = []
- the_group = buffer[0]
- SIM_PROB = 0.6
- for _d in the_group:
- self.getNotLikeSet(_d,"notLike_column")
- #判断多个值与否
- keys = ["set_limit_column1","set_limit_column2","set_limit_column3","set_limit_column4"]
- re_merge = False
- for _key in keys:
- if len(getSet(the_group,_key))>1:
- re_merge = True
- break
- #判断是否相似而不相同
- re_merge_sim = False
- for _i1 in range(0,len(the_group)):
- for _j1 in range(_i1+1,len(the_group)):
- _set1 = the_group[_i1]["notLike_set"]
- _set2 = the_group[_j1]["notLike_set"]
- _sim = self.getSimilarity(_set1,_set2)
- if _sim>SIM_PROB and _sim<1:
- re_merge_sim = True
- break
- contain_keys = ["contain_column1","contain_column2"]
- logging.info(the_group)
- if re_merge or re_merge_sim:
- the_group.sort(key=lambda x:x["confidence"],reverse=True)
- the_group.sort(key=lambda x:x["page_time_stamp"])
- #重新成组
- dict_docid_doc = {}
- for _doc in the_group:
- dict_docid_doc[_doc["docid"]] = _doc
- for _doc in the_group:
- merge_flag = False
- for _index in range(len(list_group)):
- _g = list_group[_index]
- hit_count = 0
- dict_temp = dict()
- #多个值的异常
- if re_merge:
- for _c_key in contain_keys:
- dict_temp[_c_key] = _g[_c_key]
- if _g[_c_key] is not None and _doc[_c_key] is not None:
- if len(_g[_c_key])>len(_doc[_c_key]):
- if str(_g[_c_key]).find(str(_doc[_c_key]))>=0:
- dict_temp[_c_key] = _g[_c_key]
- hit_count += 1
- else:
- if str(_doc[_c_key]).find(str(_g[_c_key]))>=0:
- dict_temp[_c_key] = _doc[_c_key]
- _g[_c_key] = _doc[_c_key]
- hit_count += 1
- else:
- hit_count = 1
- # if hit_count==len(contain_keys):
- if hit_count>0:
- _flag_sim = False
- #相似而不相同的异常
- if re_merge_sim:
- for _docid in _g["docid"]:
- tmp_d = dict_docid_doc[_docid]
- _sim = self.getSimilarity(tmp_d["notLike_set"],_doc["notLike_set"])
- if _sim>SIM_PROB and _sim<1:
- _flag_sim = True
- if not _flag_sim:
- for _c_key in dict_temp.keys():
- _g[_c_key] = dict_temp[_c_key]
- _g["docid"].append(_doc["docid"])
- merge_flag = True
- break
- if not merge_flag:
- _dict = dict()
- _dict["docid"] = [_doc["docid"]]
- for _c_key in contain_keys:
- _dict[_c_key] = _doc[_c_key]
- list_group.append(_dict)
- final_group = []
- #判断是否符合一个值
- for _group in list_group:
- _split = []
- for _docid in _group["docid"]:
- _split.append(dict_docid_doc[_docid])
- #通过置信度排序,尽可能保留组
- _split.sort(key=lambda x:x["confidence"],reverse=True)
- #置信度
- list_key_index = []
- for _k in keys:
- list_key_index.append(getDiffIndex(_split,_k))
- _index = min(list_key_index)
- final_group.append([_c["docid"] for _c in _split[:_index]])
- for _c in _split[_index:]:
- final_group.append([_c["docid"]])
- #若是找到两个以上,则全部单独成组,否则成一组
- # _flag = True
- # for _key in keys:
- # if len(getSet(_split,_key))>1:
- # _flag = False
- # break
- # if not _flag:
- # for _docid in _group["docid"]:
- # final_group.append([_docid])
- # else:
- # final_group.append(list(set(_group["docid"])))
- else:
- final_group = [list(set([item["docid"] for item in the_group]))]
- return json.dumps(final_group)
- @annotate('string -> string')
- class f_get_remerge_group(BaseUDTF):
- '''
- 将多个组拆解成多条记录
- '''
- def __init__(self):
- import logging
- import json
- global json,logging
- logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- def process(self,json_remerge):
- if json_remerge is not None:
- list_group = json.loads(json_remerge)
- for _group in list_group:
- l_g = list(set(_group))
- l_g.sort(key=lambda x:x)
- list_docid = [str(_docid) for _docid in l_g]
- self.forward(",".join(list_docid))
- @annotate('bigint,bigint,string->string')
- class f_merge_probability(BaseUDAF):
- '''
- 合并组为一条记录
- '''
- def __init__(self):
- import json
- global json
- def new_buffer(self):
- return [[]]
- def iterate(self, buffer,docid,page_time_stamp,_type):
- buffer[0].append({"docid":docid,"page_time_stamp":page_time_stamp,"type":_type})
- def merge(self, buffer, pbuffer):
- buffer[0].extend(pbuffer[0])
- def terminate(self, buffer):
- list_dict = buffer[0]
- list_dict = list_dict[:10000]
- list_group = split_with_time(list_dict,sort_key="page_time_stamp",timedelta=86400*120)
- return json.dumps(list_group)
- @annotate('string -> bigint,bigint,bigint,bigint,string')
- class f_split_merge_probability(BaseUDTF):
- def __init__(self):
- import logging
- import json
- global logging,json
- logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- def process(self,list_group_str):
- logging.info("0")
- logging.info(list_group_str)
- if list_group_str is not None:
- logging.info("1")
- try:
- list_group = json.loads(list_group_str)
- logging.info("2")
- for _group in list_group:
- if len(_group)>0:
- _type = _group[0].get("type","")
- logging.info("3%d"%len(list_group))
- # _group.sort(key=lambda x:x["page_time_stamp"])
- _len = min(100,len(_group))
- for _index_i in range(_len):
- _count = 0
- for _index_j in range(_index_i+1,_len):
- if abs(_group[_index_j]["page_time_stamp"]-_group[_index_i]["page_time_stamp"])>86400*120:
- break
- _count += 1
- _docid1 = _group[_index_i]["docid"]
- _docid2 = _group[_index_j]["docid"]
- if _docid1<_docid2:
- self.forward(_docid1,_docid2,1,_len,_type)
- else:
- self.forward(_docid2,_docid1,1,_len,_type)
- except Exception as e:
- logging(str(e))
- @annotate('bigint,bigint,string->string')
- class f_merge_groupPairs(BaseUDAF):
- '''
- 合并组为一条记录
- '''
- def __init__(self):
- import json
- global json
- def new_buffer(self):
- return [[]]
- def iterate(self, buffer,is_exists,counts,_type):
- buffer[0].append({"is_exists":is_exists,"counts":counts,"_type":_type})
- def merge(self, buffer, pbuffer):
- buffer[0].extend(pbuffer[0])
- def terminate(self, buffer):
- list_dict = buffer[0]
- list_dict = list_dict[:10000]
- return json.dumps(list_dict)
- @annotate("string -> bigint,bigint,bigint")
- class f_merge_getLabel(BaseUDTF):
- def __init__(self):
- import logging
- import json
- global logging,json
- def process(self,str_docids):
- if str_docids is not None:
- list_docids = [int(i) for i in str_docids.split(",")]
- list_docids.sort(key=lambda x:x)
- _len = min(100,len(list_docids))
- for index_i in range(_len):
- docid_less = list_docids[index_i]
- for index_j in range(index_i+1,_len):
- docid_greater = list_docids[index_j]
- self.forward(docid_less,docid_greater,1)
- def getSimilarityOfString(str1,str2):
- _set1 = set()
- _set2 = set()
- if str1 is not None:
- for i in range(1,len(str1)):
- _set1.add(str1[i-1:i+1])
- if str2 is not None:
- for i in range(1,len(str2)):
- _set2.add(str2[i-1:i+1])
- _len = max(1,min(len(_set1),len(_set2)))
- return len(_set1&_set2)/_len
- def check_columns(tenderee_less,tenderee_greater,
- agency_less,agency_greater,project_code_less,project_code_greater,project_name_less,project_name_greater,
- win_tenderer_less,win_tenderer_greater,win_bid_price_less,win_bid_price_greater,
- bidding_budget_less,bidding_budget_greater,doctitle_refine_less,doctitle_refine_greater):
- flag = True
- _set_tenderee = set()
- if tenderee_less is not None and tenderee_less!="":
- _set_tenderee.add(tenderee_less)
- if tenderee_greater is not None and tenderee_greater!="":
- _set_tenderee.add(tenderee_greater)
- if len(_set_tenderee)>1:
- return False
- code_sim = getSimilarityOfString(project_code_less,project_code_greater)
- if code_sim>0.6 and code_sim<1:
- return False
- _set_win_tenderer = set()
- if win_tenderer_less is not None and tenderee_less!="":
- _set_win_tenderer.add(win_tenderer_less)
- if win_tenderer_greater is not None and win_tenderer_greater!="":
- _set_win_tenderer.add(win_tenderer_greater)
- if len(_set_win_tenderer)>1:
- return False
- _set_win_bid_price = set()
- if win_bid_price_less is not None and win_bid_price_less!="":
- _set_win_bid_price.add(win_bid_price_less)
- if win_bid_price_greater is not None and win_bid_price_greater!="":
- _set_win_bid_price.add(win_bid_price_greater)
- if len(_set_win_bid_price)>1:
- return False
- _set_bidding_budget = set()
- if bidding_budget_less is not None and bidding_budget_less!="":
- _set_bidding_budget.add(bidding_budget_less)
- if bidding_budget_greater is not None and bidding_budget_greater!="":
- _set_bidding_budget.add(bidding_budget_greater)
- if len(_set_bidding_budget)>1:
- return False
- return True
- def getSimLevel(str1,str2):
- str1_null = False
- str2_null = False
- _v = 0
- if str1 is None or str1=="":
- str1_null = True
- if str2 is None or str2=="":
- str2_null = True
- if str1_null and str2_null:
- _v = 2
- elif str1_null and not str2_null:
- _v = 4
- elif not str1_null and str2_null:
- _v = 6
- elif not str1_null and not str2_null:
- if str1==str2:
- _v = 10
- else:
- _v = 0
- return _v
- import math
- def featurnCount(_count,max_count=100):
- return max(0,min(1,_count))*(1/math.sqrt(max(1,_count-1)))
- @annotate("string,string,string,string,string,string,string,string,string,string,string,string,string,string,string,string,string->string")
- class f_merge_featureMatrix(BaseUDTF):
- def __init__(self):
- import logging
- import json
- global logging,json
- def process(self,json_context,tenderee_less,tenderee_greater,
- agency_less,agency_greater,project_code_less,project_code_greater,project_name_less,project_name_greater,
- win_tenderer_less,win_tenderer_greater,win_bid_price_less,win_bid_price_greater,
- bidding_budget_less,bidding_budget_greater,doctitle_refine_less,doctitle_refine_greater):
- # if not check_columns(tenderee_less,tenderee_greater,
- # agency_less,agency_greater,project_code_less,project_code_greater,project_name_less,project_name_greater,
- # win_tenderer_less,win_tenderer_greater,win_bid_price_less,win_bid_price_greater,
- # bidding_budget_less,bidding_budget_greater,doctitle_refine_less,doctitle_refine_greater)
- # return
- _context = json.loads(json_context)
- dict_context = {}
- for item in _context:
- dict_context[item["_type"]] = [item["is_exists"],item["counts"]]
- context_key = ["tenderee","agency","project_code","project_name","win_tenderer","win_bid_price","bidding_budget","doctitle_refine"]
- list_matrix = []
- for index_i in range(len(context_key)):
- for index_j in range(index_i+1,len(context_key)):
- _key = "%s&%s"%(context_key[index_i],context_key[index_j])
- _v = featurnCount(dict_context.get(_key,[0,0])[1])
- list_matrix.append(_v)
- context3_key = ["tenderee","agency","win_tenderer","win_bid_price","bidding_budget"]
- for index_i in range(len(context3_key)):
- for index_j in range(index_i+1,len(context3_key)):
- for index_k in range(index_j+1,len(context3_key)):
- _key = "%s&%s&%s"%(context3_key[index_i],context3_key[index_j],context3_key[index_k])
- _v = featurnCount(dict_context.get(_key,[0,0])[1])
- list_matrix.append(_v)
- list_matrix.append(getSimLevel(tenderee_less,tenderee_greater)/10)
- list_matrix.append(getSimLevel(agency_less,agency_greater)/10)
- list_matrix.append(getSimilarityOfString(project_code_less,project_code_greater))
- list_matrix.append(getSimilarityOfString(project_name_less,project_name_greater))
- list_matrix.append(getSimLevel(win_tenderer_less,win_tenderer_greater)/10)
- list_matrix.append(getSimLevel(win_bid_price_less,win_bid_price_greater)/10)
- list_matrix.append(getSimLevel(bidding_budget_less,bidding_budget_greater)/10)
- list_matrix.append(getSimilarityOfString(doctitle_refine_less,doctitle_refine_greater))
- self.forward(json.dumps(list_matrix))
- @annotate('string -> bigint,bigint')
- class f_check_remerge(BaseUDTF):
- '''
- 将多个组拆解成多条记录
- '''
- def __init__(self):
- import logging
- import json
- global json,logging
- logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- def process(self,json_remerge):
- if json_remerge is not None:
- list_group = json.loads(json_remerge)
- for _group in list_group:
- for _docid in _group:
- self.forward(_group[-1],_docid)
- def getConfidence(rule_id):
- if rule_id >=1 and rule_id <=20:
- return 30
- elif rule_id>=31 and rule_id<=50:
- return 20
- else:
- return 10
- @annotate('string,bigint -> bigint,bigint,bigint')
- class f_arrange_group_single(BaseUDTF):
- '''
- 将多个组拆解成多条记录
- '''
- def __init__(self):
- import logging
- import json
- global json,logging
- logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
- def process(self,json_set_docid,rule_id):
- if json_set_docid is not None:
- list_group = json.loads(json_set_docid)
- for _group in list_group:
- for index_i in range(len(_group)):
- for index_j in range(len(_group)):
- # if index_i!=index_j and _group[index_i]!=_group[index_j]:
- if index_i!=index_j:
- self.forward(_group[index_i],_group[index_j],getConfidence(rule_id))
- @annotate('bigint,bigint->string')
- class f_get_merge_docids(BaseUDAF):
- '''
- 合并组为一条记录
- '''
- def __init__(self):
- import json
- global json
- def new_buffer(self):
- return [set()]
- def iterate(self, buffer,docid1,docid2):
- buffer[0].add(docid1)
- buffer[0].add(docid2)
- def merge(self, buffer, pbuffer):
- buffer[0] |= pbuffer[0]
- def terminate(self, buffer):
- set_docid = buffer[0]
- list_docid = list(set_docid)
- list_docid.sort(key=lambda x:x)
- list_docid_str = []
- for _docid in list_docid:
- list_docid_str.append(str(_docid))
- return ",".join(list_docid_str)
|