123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619 |
- #coding:UTF8
- import sys
- import os
- sys.path.append("../")
- import pandas as pd
- from dataSource.source import *
- import json
- from utils.multiThread import MultiThreadHandler
- import queue
- from utils.Utils import *
- from dataSource.pool import ConnectorPool
- import re
- from tablestore import *
- import traceback
- data_path = "../data/"
- def getCompanys():
- list_company = []
- keywords = ["环境","生态","再生","回收","环保"]
- provinces = ["广东"]
- for _name in keywords:
- for _prov in provinces:
- data = make_elasticSearch({
- "query": {
- "bool": {
- "must": [
- {
- "wildcard": {
- "name.keyword": "*%s*"%_name
- }
- }
- # ,
- # {
- # "term": {
- # "province.keyword": "%s"%_prov
- # }
- # }
- # ,
- # {
- # "range": {
- # "zhongBiaoNumber": {
- # "gt": "0"
- # }
- # }
- # }
- ],
- "must_not": [ ],
- "should": [ ]
- }
- },
- "from": 0,
- "size": 1000000,
- "sort": [ ],
- "aggs": { }
- })
- print("--",data["hits"]["total"])
- for item in data["hits"]["hits"]:
- _company = {"enterprise_name":"","regCapital":"","legal_person":"","phone":"","industry":"","province":""}
- _company["enterprise_name"] = item["_source"].get("name","")
- _company["regCapital"] = item["_source"].get("regCapital","")
- _company["zhongBiaoNumber"] = item["_source"].get("zhongBiaoNumber","0")
- list_company.append(_company)
- # data = make_elasticSearch({
- # "query": {
- # "bool": {
- # "must": [
- # {
- # "wildcard": {
- # "name.keyword": "*电商*"
- # }
- # }
- # ,
- # {
- # "term": {
- # "province.keyword": "北京"
- # }
- # }
- # ,
- # {
- # "range": {
- # "zhongBiaoNumber": {
- # "gt": "0"
- # }
- # }
- # }
- # ],
- # "must_not": [ ],
- # "should": [ ]
- # }
- # },
- # "from": 0,
- # "size": 10000,
- # "sort": [ ],
- # "aggs": { }
- # })
- #
- # for item in data["hits"]["hits"]:
- # _company = {"enterprise_name":"","regCapital":"","legal_person":"","phone":"","industry":"","province":""}
- # _company["enterprise_name"] = item["_source"].get("name","")
- # _company["regCapital"] = item["_source"].get("regCapital","")
- # list_company.append(_company)
- print(len(list_company))
- return list_company
- def exportFactory():
- def _handle(item,result_queue,pool_mongo,pool_neo4j):
- company_name = item["enterprise_name"]
- mongo = pool_mongo.getConnector()
- coll_zb = mongo.enterprise_profile
- rows = coll_zb.find({"enterprise_name":item["enterprise_name"]},{"enterprise_name":1, "actualCapital":1,"estiblishTime":1,"legal_person":1,"phone":1 })
- _flag = False
- for row in rows:
- actualCapital = row.get("actualCapital","0")
- estiblishTime = row.get("estiblishTime","2020-01-01")
- _captial = re.match("\d+[亿万]+",actualCapital)
- # if _captial is not None:
- # if getUnifyMoney(_captial.group())>getUnifyMoney("5000万"):
- # if estiblishTime<="2015-10-09":
- item["legal_person"] = row.get("legal_person","")
- item["phone"] = row.get("phone","")
- item["actualCapital"] = actualCapital
- item["estiblishTime"] = row.get("estiblishTime","")
- _flag = True
- break
- if _flag:
- result_queue.put(item)
- cql = "MATCH (n:Organization)-[r:ZhongBiaoRelation]->(p:Project) where n.name='%s' RETURN count(p) as _c "%(company_name)
- graph = pool_neo4j.getConnector()
- finded = graph.run(cql)
- data = json.loads(json.dumps(finded.data()))
- _count = data[0]["_c"]
- # list_project = []
- # for _data in data:
- # if _count<=3:
- # if "zhong_biao_page_time" in _data and _data["zhong_biao_page_time"]>"2019-01-01":
- # if _data["project_name"] is not None:
- # list_project.append(_data["project_name"])
- # _count += 1
- item["count"] = _count
- pool_mongo.putConnector(mongo)
- pool_neo4j.putConnector(graph)
- # list_company = getCompanys()
- list_company = []
- filename = "../data/天眼查1(1).xlsx"
- df1 = pd.read_excel(filename)
- for item in df1["公司名称"]:
- list_company.append({"enterprise_name":item,"regCapital":"","legal_person":"","phone":"","industry":"","province":""})
- task_queue = queue.Queue()
- result_queue = queue.Queue()
- for item in list_company:
- task_queue.put(item)
- pool_mongo = ConnectorPool(init_num=10,max_num=50,method_init=getConnect_mongodb)
- pool_neo4j = ConnectorPool(init_num=10,max_num=50,method_init=getConnect_neo4j)
- _mult = MultiThreadHandler(task_queue=task_queue,task_handler=_handle,result_queue=result_queue,thread_count=70,pool_mongo=pool_mongo,pool_neo4j=pool_neo4j)
- _mult.run()
- list_name = []
- list_actualCapital = []
- list_estiblishTime = []
- list_legal_person = []
- list_phone = []
- list_zb = []
- while(True):
- try:
- item = result_queue.get(False)
- list_name.append(item["enterprise_name"])
- list_actualCapital.append(item["actualCapital"])
- list_estiblishTime.append(item["estiblishTime"])
- list_legal_person.append(item["legal_person"])
- list_phone.append(item["phone"])
- list_zb.append(item["count"])
- except:
- break
- df = pd.DataFrame({"公司":list_name,"实缴":list_actualCapital,
- "注册时间":list_estiblishTime,"联系人":list_legal_person,"联系电话":list_phone,
- "中标次数":list_zb})
- df.to_excel("%s"%filename+"_export.xlsx",columns=["公司","实缴","注册时间","联系人","联系电话","中标次数"])
- def deal():
- def _handle(item,result_queue):
- graph = getConnect_neo4j()
- company_name = item["enterprise_name"]
- cql = "MATCH (n:Organization)-[r:ZhongBiaoRelation]->(p:Project) where n.name='%s' RETURN p.zhong_biao_page_time as zhong_biao_page_time,p.project_name as project_name order by p.zhong_biao_page_time desc limit 3"%(company_name)
- finded = graph.run(cql)
- data = json.loads(json.dumps(finded.data()))
- _count = 1
- list_project = []
- for _data in data:
- if _count<=3:
- if "zhong_biao_page_time" in _data and _data["zhong_biao_page_time"]>"2019-01-01":
- list_project.append(_data["project_name"])
- _count += 1
- item["project"] = str(list_project)
- result_queue.put(item)
- file = "../data/北京行业_export.xls"
- df = pd.read_excel(file)
- list_company = []
- for _company,rep,industry,project,count,person,phone in zip(df["公司名字"],df["注册资金"],df["行业"],df["中标项目"],df["中标次数"],df["联系人"],df["联系电话"]):
- list_company.append({"enterprise_name":_company,"regCapital":rep,"legal_person":person,"phone":phone,"industry":industry,"province":"","count":count})
- task_queue = queue.Queue()
- result_queue = queue.Queue()
- for item in list_company:
- task_queue.put(item)
- _mult = MultiThreadHandler(task_queue=task_queue,task_handler=_handle,result_queue=result_queue,thread_count=30)
- _mult.run()
- list_name = []
- list_regCapital = []
- list_industry = []
- list_count = []
- list_person = []
- list_phone = []
- list_project = []
- while(True):
- try:
- _result = result_queue.get(False)
- list_name.append(_result["enterprise_name"])
- list_regCapital.append(_result["regCapital"])
- list_industry.append(_result["industry"])
- list_count.append(_result["count"])
- list_person.append(_result["legal_person"])
- list_phone.append(_result["phone"])
- list_project.append(_result["project"])
- except Exception as e:
- print(e)
- break
- df1 = pd.DataFrame({"公司名字":list_name,"注册资金":list_regCapital,"行业":list_industry,"中标项目":list_project,"中标次数":list_count,"联系人":list_person,"联系电话":list_phone})
- df1.to_excel("%s_export1.xls"%("北京行业"),columns=["公司名字","注册资金","行业","中标项目","中标次数","联系人","联系电话"])
- def deal1():
- def _handle(item,result_queue):
- graph = getConnect_neo4j()
- company_name = item["enterprise_name"]
- cql = "MATCH (n:Organization)-[r:ZhongBiaoRelation]->(p:Project) where n.name='%s' RETURN p.zhong_biao_page_time as zhong_biao_page_time,p.project_name as project_name order by p.zhong_biao_page_time desc "%(company_name)
- finded = graph.run(cql)
- data = json.loads(json.dumps(finded.data()))
- _count = 0
- list_project = []
- for _data in data:
- if _count<=2:
- if "zhong_biao_page_time" in _data and _data["zhong_biao_page_time"]>"2019-01-01":
- list_project.append(_data["project_name"])
- _count += 1
- item["count"] = _count
- item["project"] = str(list_project)
- cql = "MATCH (n:Organization)-[r:ZhongBiaoRelation]->(p:Project) where n.name='%s' RETURN r.price"%(company_name)
- print(cql)
- finded = graph.run(cql)
- finded_money = json.loads(json.dumps(finded.data()))
- whole_money = 0
- for _item in finded_money:
- if _item["r.price"] is not None:
- whole_money += getUnifyMoney(_item["r.price"])
- item["whole_money"] = str(whole_money)
- result_queue.put(item)
- # filename = "数据导出需求9.11(1)(1).xlsx"
- filename = "../data/新建 XLSX 工作表(1).xlsx"
- df = pd.read_excel(filename)
- list_company = []
- for _key in df.keys():
- print(_key,len(df[_key]))
- for _company in df["公司名称"]:
- list_company.append({"enterprise_name":_company,"regCapital":"","legal_person":"","phone":"","industry":"","province":"","count":0})
- task_queue = queue.Queue()
- result_queue = queue.Queue()
- for item in list_company:
- task_queue.put(item)
- _mult = MultiThreadHandler(task_queue=task_queue,task_handler=_handle,result_queue=result_queue,thread_count=30)
- _mult.run()
- _dict_item = {}
- while(True):
- try:
- item = result_queue.get(False)
- if item["enterprise_name"]!="":
- _dict_item[item["enterprise_name"]] = item
- except Exception as e:
- print(str(e))
- break
- list_count = []
- list_project = []
- list_money = []
- list_zb = []
- for _company in df["公司名称"]:
- if _company in _dict_item:
- list_count.append(_dict_item[_company]["count"])
- list_project.append(_dict_item[_company]["project"])
- list_money.append(_dict_item[_company]["whole_money"])
- list_zb.append("是" if _dict_item[_company]["count"]>0 else "否")
- else:
- print(_company)
- list_count.append(0)
- list_project.append("")
- list_money.append("0")
- list_zb.append("否")
- print(len(list_count),len(list_project),len(list_money),len(list_zb))
- df2 = pd.DataFrame({"公司名称":df["公司名称"],"次数":list_count})
- df2.to_excel("%s_export.xls"%filename)
- # df1 = pd.DataFrame({"月份":df["月份"],"电话":df["电话"],"公司名字":df["公司名字"],"开通时间":df["开通时间"],
- # "到期时间":df["到期时间"],"客户公司注册时间":df["客户公司注册时间"],"客户公司注册资金":df["客户公司注册资金"],
- # "实际缴费资金":df["实际缴费资金"],"天眼查行业分类":df["天眼查行业分类"],"是否中标":list_zb,
- # "中标次数":list_count,"中标项目|3个":list_project,"中标金额":list_money,"客户设置关键词":df["客户设置关键词"],"客户搜索词":df["客户搜索词"].xls})
- # df1.to_excel("%s_补充.xls"%filename,columns=["月份","电话","公司名字", "开通时间" ,"到期时间" ,"客户公司注册时间" ,"客户公司注册资金" ,"实际缴费资金" ,"天眼查行业分类" ,"是否中标" ,"中标次数" ,"中标项目|3个" ,"中标金额" ,"客户设置关键词" ,"客户搜索词"])
- def deal3():
- filename = "../data/导出工厂.xlsx"
- df = pd.DataFrame(filename)
- count = 0
- for item in df["实缴"]:
- if getUnifyMoney(item)>getUnifyMoney("5000万"):
- count += 1
- print(count)
- def exportEnterpriseByName():
- df = pd.read_csv("../data/中标家具公司.csv",encoding="GBK")
- def _handle(item,result_queue,pool_ots):
- ots_client = pool_ots.getConnector()
- primary_key = [('name',str(item["name"]))]
- columns_to_get = ["reg_capital","actual_capital","contacts","industry","estiblish_time","social_staff_num","business_scope","zhong_biao_number"]
- consumed, return_row, next_token = ots_client.get_row("enterprise",primary_key, columns_to_get, None, 1)
- print(return_row)
- for _item in return_row.attribute_columns:
- if _item[0]=="contacts":
- a = json.loads(_item[1])
- for i in a:
- if i.get("mobile_no","")==item["phone"] or i.get("phone_no","")==item["phone"]:
- item["contact_person"] = i.get("contact_person","")
- else:
- item[_item[0]] = _item[1]
- list_dict = []
- for name,phone in zip(df["name"],df["phone"]):
- list_dict.append({"name":name,"phone":phone})
- task_queue = queue.Queue()
- for item in list_dict:
- task_queue.put(item)
- result_queue = queue.Queue()
- pool_ots = ConnectorPool(init_num=10,max_num=30,method_init=getConnect_ots)
- mt = MultiThreadHandler(task_queue=task_queue,task_handler=_handle,result_queue=result_queue,thread_count=70,pool_ots=pool_ots)
- mt.run()
- columns = ["name","contact_person","phone","reg_capital","actual_capital","industry","estiblish_time","social_staff_num","business_scope","zhong_biao_number"]
- df_data = {}
- for _c in columns:
- df_data[_c] = []
- for item in list_dict:
- for _key in columns:
- df_data[_key].append(item.get(_key,""))
- df1 = pd.DataFrame(df_data)
- df1.to_csv("中标家具公司1.csv")
- def getCompanys():
- conn = getConnection_mysql()
- cursor = conn.cursor()
- sql = '''select C.login_id as 登陆名,B.company ,B.contactname as 联系人,B.phone as 联系电话 ,(select MLEVELNAME from sys_memberlevel where id =A.memberlevelid) as 会员等级,( select name from b2c_mall_staff_basic_info where userid=B.aftermarket) as 售后客服 from bxkc.bxkc_member_term A,bxkc.b2c_mall_staff_basic_info B,bxkc.b2c_user_login_info C
- where A.USERID=B.USERID and B.USERID=C.USERID and B.innerOrg like '广州%'
- and A.memberlevelid!=81 and A.status='01' and str_to_date('2020-11-20','%Y-%m-%d') between A.stime and A.etiem ;
- '''
- cursor.execute(sql)
- vol = cursor.description
- list_company = []
- rows = cursor.fetchall()
- for row in rows:
- _company = {}
- for _vol,_value in zip(vol,row):
- _name = _vol[0]
- _company[_name] = _value
- list_company.append(_company)
- return list_company
- def exportEnterprise_byindustry(page_time,
- columns = ["name","address","business_scope","province","city","district","reg_capital","phone","estiblish_time"],
- keywords = ["钢材","水泥","五金","水电","暖通","暖气","电缆"]):
- list_should_q = []
- for _key in keywords:
- list_should_q.append(WildcardQuery("industry","*%s*"%_key))
- list_should_q.append(WildcardQuery("nicknames","*%s*"%_key))
- key_query = BoolQuery(should_queries=list_should_q)
- #WildcardQuery("industry","*建筑*")
- ots_client = getConnect_ots()
- bool_query = BoolQuery(must_queries=[RangeQuery("bidi_id",0,include_lower=True),
- key_query,
- RangeQuery("estiblish_time",range_to="2017-01-01")])
- rows, next_token, total_count, is_all_succeed = ots_client.search("enterprise", "enterprise_index",
- SearchQuery(bool_query, limit=100, get_total_count=True),
- ColumnsToGet(columns,return_type=ColumnReturnType.SPECIFIED))
- all_rows = 0
- df_data = {}
- for key in columns:
- df_data[key] = []
- for row in rows:
- _dict = dict()
- for part in row:
- for item in part:
- _dict[item[0]] = item[1]
- for key in columns:
- df_data[key].append(_dict.get(key,""))
- # if "reg_capital" in _dict:
- # _money = re.match("\d+[万亿千百十]",_dict["reg_capital"])
- # if _money is not None:
- # if getUnifyMoney(_money.group())>2000000:
- # for key in columns:
- # df_data[key].append(_dict.get(key,""))
- all_rows += len(rows)
- # print(next_token)
- while(next_token):
- rows, next_token, total_count, is_all_succeed = ots_client.search("enterprise", "enterprise_index",
- SearchQuery(bool_query, next_token=next_token,limit=100, get_total_count=True),
- ColumnsToGet(columns,return_type=ColumnReturnType.SPECIFIED))
- for row in rows:
- _dict = dict()
- for part in row:
- for item in part:
- _dict[item[0]] = item[1]
- for key in columns:
- df_data[key].append(_dict.get(key,""))
- # if "reg_capital" in _dict:
- # _money = re.match("\d+[万亿千百十]",_dict["reg_capital"])
- # if _money is not None:
- # if getUnifyMoney(_money.group())>2000000:
- # for key in columns:
- # df_data[key].append(_dict.get(key,""))
- all_rows += len(rows)
- print(all_rows,total_count,len(df_data[columns[0]]))
- df = pd.DataFrame(df_data)
- df.to_csv("../data/enterprise_2017_a.csv",columns=columns)
- def getTyc_company():
- root_path = ["G:/文档/tyc国企","G:/文档/tyc机构"]
- list_files = []
- for _path in root_path:
- for file in os.listdir(_path):
- list_files.append(os.path.join(_path,file))
- list_files = ["G:/文档/tyc机构\\高级搜索导出数据结果—自定义条件—天眼查(W20011656561610789770227).xlsx"]
- pool_mysql = ConnectorPool(method_init=getConnection_testmysql,init_num=10,max_num=30)
- task_queue = queue.Queue()
- result_queue = queue.Queue()
- for _file in list_files:
- task_queue.put(_file)
- def _handle(_file,task_queue,pool_mysql):
- print("handle",_file)
- conn = pool_mysql.getConnector()
- cursor = conn.cursor()
- df = pd.read_excel(_file,header=2)
- for name,social_credit,identification,regist_num,organization_code in zip(df["公司名称"],df["统一社会信用代码"],df["纳税人识别号"],df["注册号"],df["组织机构代码"]):
- try:
- sql = " insert into Enterprise(name,social_credit,identification,regist_num,organization_code) values ('%s','%s','%s','%s','%s')"%(name,social_credit,identification,regist_num,organization_code)
- cursor.execute(sql)
- except Exception as e:
- print("error")
- conn.commit()
- pool_mysql.putConnector(conn)
- mt = MultiThreadHandler(task_queue,_handle,result_queue,20,pool_mysql=pool_mysql)
- mt.run()
- def exportEnterprise_by_bidNum():
- ots_client = getConnect_ots()
- bool_query = BoolQuery(must_queries=[RangeQuery("tyc_id",1,include_lower=True),
- RangeQuery("bid_number",4,include_lower=True)
- ])
- columns = ["name"]
- rows, next_token, total_count, is_all_succeed = ots_client.search("enterprise", "enterprise_index",
- SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("tyc_id",SortOrder.ASC)]), limit=100, get_total_count=True),
- ColumnsToGet(columns,return_type=ColumnReturnType.SPECIFIED))
- df_data = {}
- for _key in columns:
- df_data[_key] = []
- def getData(df_data,rows):
- list_dict = getRow_ots(rows)
- for _dict in list_dict:
- for _key in columns:
- _v = _dict.get(_key,"")
- if len(_v)>4:
- df_data[_key].append(_v)
- getData(df_data,rows)
- _count = len(rows)
- while(next_token):
- print("%d/%d"%(_count,total_count))
- rows, next_token, total_count, is_all_succeed = ots_client.search("enterprise", "enterprise_index",
- SearchQuery(bool_query, next_token=next_token,limit=100, get_total_count=True),
- ColumnsToGet(columns,return_type=ColumnReturnType.SPECIFIED))
- getData(df_data,rows)
- _count += len(rows)
- df = pd.DataFrame(df_data)
- df.to_csv("../data/enterprise_bidinum.csv",columns=columns)
- def make_Legal_enterprise():
- import codecs
- def format(_e):
- if _e is None:
- return None
- if not isinstance(_e,str):
- return None
- if re.search("^[a-zA-Z0-9]+$",_e) is not None:
- return None
- if re.search("[<《]>-。\-\.\?]",_e) is not None:
- return None
- _e1 = re.sub("\s+","",_e.replace("(","(").replace(")",")"))
- if re.search("[省市区县乡镇]$",_e) is not None:
- return None
- if len(_e1)>=4:
- return _e1
- return None
- set_enterprise = set()
- df = pd.read_csv("../data/enterprise_bidinum.csv",encoding="GBK")
- _count = 0
- for _e in df["name"]:
- _count += 1
- if _count%10000==0:
- print(_count)
- _e1 = format(_e)
- if _e1 is not None:
- set_enterprise.add(_e1)
- conn = getConnection_testmysql()
- cursor = conn.cursor()
- sql = " select name from Enterprise "
- cursor.execute(sql)
- rows = cursor.fetchmany(10000)
- while rows:
- for row in rows:
- _count += 1
- if _count%10000==0:
- print(_count)
- _e = row[0]
- _e1 = format(_e)
- if _e1 is not None:
- set_enterprise.add(_e1)
- rows = cursor.fetchmany(10000)
- with codecs.open("../data/LEGAL_ENTERPRISE.txt","w",encoding="UTF8") as f:
- for _e in list(set_enterprise):
- f.write(_e+"\n")
- def getDictEnterprise(list_enterprise,columns_to_get = ["reg_capital","actual_capital","contacts","industry","estiblish_time","social_staff_num","business_scope","zhong_biao_number"]):
- task_queue = queue.Queue()
- result_queue= queue.Queue()
- for _enterprise in list_enterprise:
- task_queue.put(_enterprise)
- def _handle(item,result_queue,pool_ots):
- ots_client = pool_ots.getConnector()
- primary_key = [("name",item)]
- consumed,return_row,next_token = ots_client.get_row("enterprise",primary_key,columns_to_get,None,1)
- dict_data = getRow_ots_primary(return_row)
- if dict_data is not None:
- result_queue.put({item:dict_data})
- pool_ots.putConnector(ots_client)
- pool_ots = ConnectorPool(init_num=10,max_num=30,method_init=getConnect_ots)
- mt = MultiThreadHandler(task_queue=task_queue,task_handler=_handle,result_queue=result_queue,thread_count=30,pool_ots=pool_ots)
- mt.run()
- dict_enterprise = {}
- while True:
- try:
- _dict = result_queue.get(False)
- for k,v in _dict.items():
- dict_enterprise[k] = v
- except Exception as e:
- break
- return dict_enterprise
- def getOneContact(contacts,tojson=True,mobile_first=True):
- if tojson:
- list_contacts = json.loads(contacts)
- else:
- list_contacts = contacts
- mobile_person = ""
- mobile_no = ''
- phone_person = ""
- phone_no = ''
- for _contact in list_contacts:
- if _contact.get("mobile_no","")!="":
- mobile_person = _contact.get("contact_person","")
- mobile_no = _contact.get("mobile_no","")
- if _contact.get("phone_no","")!="":
- phone_person = _contact.get("phone_no","")
- phone_no = _contact.get("phone_no","")
- if mobile_first:
- return mobile_person,mobile_no
- return phone_person,phone_no
- if __name__=="__main__":
- # getTyc_company()
- exportEnterprise_by_bidNum()
- make_Legal_enterprise()
|