document.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. from BaseDataMaintenance.model.ots.BaseModel import BaseModel
  2. from tablestore import *
  3. from BaseDataMaintenance.common.Utils import *
  4. from bs4 import BeautifulSoup
  5. from BaseDataMaintenance.common.Utils import article_limit
  6. document_partitionkey = "partitionkey"
  7. document_docid = "docid"
  8. document_dochtmlcon = "dochtmlcon"
  9. document_doctextcon = "doctextcon"
  10. document_doctitle = "doctitle"
  11. document_attachmenttextcon = "attachmenttextcon"
  12. document_attachment_path = "page_attachments"
  13. document_attachment_path_filemd5 = "fileMd5"
  14. document_attachment_path_fileTitle = "fileTitle"
  15. document_attachment_path_fileLink = "fileLink"
  16. document_crtime = "crtime"
  17. document_status = "status"
  18. document_page_time = "page_time"
  19. document_attachment_extract_status = "attachment_extract_status"
  20. document_web_source_no = "web_source_no"
  21. document_fingerprint = "fingerprint"
  22. document_opertime = "opertime"
  23. document_docchannel = "docchannel"
  24. document_original_docchannel = "original_docchannel"
  25. document_life_docchannel = "life_docchannel"
  26. document_area = "area"
  27. document_province = "province"
  28. document_city = "city"
  29. document_district = "district"
  30. document_extract_json = "extract_json"
  31. document_bidway = "bidway"
  32. document_industry = "industry"
  33. document_info_type = "info_type"
  34. document_qcodes = "qcodes"
  35. document_project_name = "project_name"
  36. document_project_code = "project_code"
  37. document_project_codes = "project_codes"
  38. document_tenderee = "tenderee"
  39. document_tenderee_addr = "tenderee_addr"
  40. document_tenderee_phone = "tenderee_phone"
  41. document_tenderee_contact = "tenderee_contact"
  42. document_agency = "agency"
  43. document_agency_phone = "agency_phone"
  44. document_agency_contact = "agency_contact"
  45. document_product = "product"
  46. document_moneysource = "moneysource"
  47. document_service_time = "service_time"
  48. document_time_bidclose = "time_bidclose"
  49. document_time_bidopen = "time_bidopen"
  50. document_time_bidstart = "time_bidstart"
  51. document_time_commencement = "time_commencement"
  52. document_time_completion = "time_completion"
  53. document_time_earnest_money_start = "time_earnest_money_start"
  54. document_time_earnest_money_end = "time_earnest_money_end"
  55. document_time_get_file_end = "time_get_file_end"
  56. document_time_get_file_start = "time_get_file_start"
  57. document_time_publicity_end = "time_publicity_end"
  58. document_time_publicity_start = "time_publicity_start"
  59. document_time_registration_end = "time_registration_end"
  60. document_time_registration_start = "time_registration_start"
  61. document_time_release = "time_release"
  62. document_info_source = "info_source"
  63. document_nlp_enterprise = "nlp_enterprise"
  64. document_nlp_enterprise_attachment = "nlp_enterprise_attachment"
  65. document_total_tenderee_money = "total_tenderee_money"
  66. document_update_document = "update_document"
  67. class Document(BaseModel):
  68. def __init__(self,_dict):
  69. BaseModel.__init__(self)
  70. for k,v in _dict.items():
  71. self.setValue(k,v,True)
  72. self.table_name = "document"
  73. self.prefixs = ["www.bidizhaobiao.com","bxkc.oss-cn-shanghai.aliyuncs.com"]
  74. def getPrimary_keys(self):
  75. return ["partitionkey","docid"]
  76. def getAttribute_turple(self):
  77. _list = []
  78. for _key in self.getAttribute_keys():
  79. if _key=="all_columns":
  80. continue
  81. _v = self.getProperties().get(_key)
  82. if _v is not None and _v!="":
  83. if isinstance(_v,list):
  84. _v = json.dumps(_v)
  85. _list.append((_key,_v))
  86. return _list
  87. # def delete_row(self,ots_client):
  88. # raise NotImplementedError()
  89. def isLegalUrl(self,_url,_type):
  90. _flag = False
  91. for _prefix in self.prefixs:
  92. if _url.find(_prefix)>=0:
  93. _flag = True
  94. if _type==0:
  95. if _flag:
  96. return True
  97. else:
  98. return False
  99. else:
  100. if _flag:
  101. return False
  102. else:
  103. return True
  104. def fromInitialed(self):
  105. self.setValue(document_status,random.randint(1,50),True)
  106. def fromEas2Maxcompute(self):
  107. self.setValue(document_status,random.randint(151,170),True)
  108. def fromEasFailed(self):
  109. self.setValue(document_status,random.randint(51,60),True)
  110. def fromEas2Extract(self):
  111. self.setValue(document_status,random.randint(61,70),True)
  112. def updateSWFImages(self,swf_urls):
  113. if len(swf_urls)>0:
  114. _dochtmlcon = self.getProperties().get(document_dochtmlcon)
  115. _soup = BeautifulSoup(_dochtmlcon,"lxml")
  116. if _soup.find("img",{"src":swf_urls[0]}) is None:
  117. _div = '<div class="swf_urls">'
  118. for _url in swf_urls:
  119. _div += '<p><img src="%s"/></p>'%(_url)
  120. _div += "</div>"
  121. _dochtmlcon += _div
  122. self.setValue(document_dochtmlcon,_dochtmlcon,True)
  123. def getRichTextFetch(self,list_html):
  124. _text = ""
  125. for _ht in list_html:
  126. if isinstance(_ht,str):
  127. _text += "<div>%s</div>"%(_ht)
  128. elif isinstance(_ht,dict):
  129. _filemd5 = _ht.get("filemd5","")
  130. _html = _ht.get("html","")
  131. _text += '<div filemd5="%s">%s</div>'%(_filemd5,_html)
  132. if len(_text)>50000:
  133. _soup = BeautifulSoup(_text,"lxml")
  134. _soup = article_limit(_soup,50000)
  135. _text = re.sub("<html>|</html>|<body>|</body>","",str(_soup))
  136. return _text
  137. def updateAttachment(self,list_html):
  138. if len(list_html)>0:
  139. _dochtmlcon = self.getProperties().get(document_dochtmlcon,"")
  140. _dochtmlcon = re.sub("<html>|</html>|<body>|</body>","",_dochtmlcon)
  141. _dochtmlcon_len = len(bytes(_dochtmlcon,encoding="utf8"))
  142. fix_len = self.COLUMN_MAX_SIZE-_dochtmlcon_len-100
  143. # _text = '\n<div style="display:none;" class="richTextFetch">%s</div>'%("\n".join(list_html))
  144. _text = '<div style="display:none;" class="richTextFetch">%s</div>'%(self.getRichTextFetch(list_html))
  145. if _dochtmlcon is not None:
  146. _soup = BeautifulSoup(_dochtmlcon,"lxml")
  147. _node = _soup.find("div",attrs={"class":"richTextFetch"})
  148. if _node is not None:
  149. _node.decompose()
  150. self.setValue(document_dochtmlcon,str(_soup)+_text,True)
  151. def getTitleFromHtml(self,filemd5,_html):
  152. _soup = BeautifulSoup(_html,"lxml")
  153. _find = _soup.find("a",attrs={"data":filemd5})
  154. _title = ""
  155. if _find is not None:
  156. _title = _find.get_text()
  157. return _title
  158. def getSourceLinkFromHtml(self,filemd5,_html):
  159. _soup = BeautifulSoup(_html,"lxml")
  160. _find = _soup.find("a",attrs={"filelink":filemd5})
  161. filelink = ""
  162. if _find is None:
  163. _find = _soup.find("img",attrs={"filelink":filemd5})
  164. if _find is not None:
  165. filelink = _find.attrs.get("src","")
  166. else:
  167. filelink = _find.attrs.get("href","")
  168. return filelink
  169. import random
  170. def turn_extract_status():
  171. from BaseDataMaintenance.dataSource.source import getConnect_ots
  172. from BaseDataMaintenance.common.multiThread import MultiThreadHandler
  173. import queue
  174. from threading import Thread
  175. import json
  176. task_queue = queue.Queue()
  177. from BaseDataMaintenance.model.ots.attachment import attachment_filemd5,attachment_file_title,attachment_file_link
  178. ots_client = getConnect_ots()
  179. def producer(task_queue,ots_client):
  180. bool_query = BoolQuery(must_queries=[
  181. # WildcardQuery(document_web_source_no,"00295*"),
  182. # RangeQuery(document_crtime,"2021-07-26 00:00:00"),
  183. RangeQuery(document_status,61,70,True,True),
  184. #TermQuery(document_docid,171146519),
  185. ]
  186. )
  187. rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
  188. SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid",SortOrder.DESC)]),limit=100,get_total_count=True),
  189. columns_to_get=ColumnsToGet([document_fingerprint],return_type=ColumnReturnType.SPECIFIED))
  190. list_data = getRow_ots(rows)
  191. print(total_count)
  192. _count = len(list_data)
  193. for _data in list_data:
  194. _document = Document(_data)
  195. task_queue.put(_document)
  196. while next_token:
  197. rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
  198. SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
  199. columns_to_get=ColumnsToGet([document_fingerprint],return_type=ColumnReturnType.SPECIFIED))
  200. list_data = getRow_ots(rows)
  201. _count += len(list_data)
  202. print("%d/%d"%(_count,total_count))
  203. for _data in list_data:
  204. _document = Document(_data)
  205. task_queue.put(_document)
  206. def _handle(item,result_queue,ots_client):
  207. #change attach value
  208. # list_attachment = json.loads(item.getProperties().get(document_attachment_path))
  209. # print("docid",item.getProperties().get(document_docid))
  210. # for attach in list_attachment:
  211. #
  212. # filemd5 = attach.get(document_attachment_path_filemd5,"")
  213. # _document_html = item.getProperties().get(document_dochtmlcon,"")
  214. #
  215. # _file_title = item.getTitleFromHtml(filemd5,_document_html)
  216. # filelink = item.getSourceLinkFromHtml(filemd5,_document_html)
  217. # attach[document_attachment_path_fileTitle] = _file_title
  218. # attach[document_attachment_path_fileLink] = filelink
  219. #
  220. # item.setValue(document_attachment_path,json.dumps(list_attachment,ensure_ascii=False),True)
  221. # item.all_columns.remove(document_dochtmlcon)
  222. #change status
  223. item.setValue(document_status,random.randint(1,50),True)
  224. item.update_row(ots_client)
  225. t_producer = Thread(target=producer,kwargs={"task_queue":task_queue,"ots_client":ots_client})
  226. t_producer.start()
  227. t_producer.join()
  228. # mt = MultiThreadHandler(task_queue,_handle,None,30,ots_client=ots_client)
  229. # mt.run()
  230. dict_fingerprint = {}
  231. while True:
  232. try:
  233. item = task_queue.get(timeout=2)
  234. fingerprint = item.getProperties().get(document_fingerprint)
  235. if fingerprint is not None:
  236. if fingerprint not in dict_fingerprint:
  237. dict_fingerprint[fingerprint] = []
  238. dict_fingerprint[fingerprint].append(item)
  239. except Exception as e:
  240. print(e)
  241. break
  242. print(len(dict_fingerprint.keys()))
  243. status_queue = queue.Queue()
  244. for k,v in dict_fingerprint.items():
  245. print("key",k,len(v))
  246. v.sort(key=lambda x:x.docid)
  247. for _d in v[1:]:
  248. _d.setValue(document_status,random.randint(401,450),True)
  249. status_queue.put(_d)
  250. mt = MultiThreadHandler(status_queue,_handle,None,30,ots_client=ots_client)
  251. mt.run()
  252. def turn_document_status():
  253. from BaseDataMaintenance.dataSource.source import getConnect_ots
  254. from BaseDataMaintenance.common.multiThread import MultiThreadHandler
  255. import queue
  256. from threading import Thread
  257. import json
  258. task_queue = queue.Queue()
  259. from BaseDataMaintenance.model.ots.attachment import attachment_filemd5,attachment_file_title,attachment_file_link
  260. ots_client = getConnect_ots()
  261. def producer(task_queue,ots_client):
  262. # bool_query = BoolQuery(
  263. # must_queries=[
  264. # MatchPhraseQuery("doctitle","珠海城市职业技术学院2022年05月至2022年06月政府采购意向"),
  265. # # BoolQuery(should_queries=[
  266. # # # TermQuery("tenderee","山西利民工业有限责任公司"),
  267. # # # MatchPhraseQuery("doctitle","中国电信"),
  268. # # # MatchPhraseQuery("doctextcon","中国电信"),
  269. # # # MatchPhraseQuery("attachmenttextcon","中国电信")]),
  270. # # # RangeQuery(document_status,88,120,True,True),
  271. # # RangeQuery("page_time","2022-03-24","2022-03-25",True,False),
  272. # # ExistsQuery
  273. # # #,TermQuery(document_docid,171146519)
  274. # # ]
  275. # # )
  276. # ],
  277. # # must_not_queries=[WildcardQuery("DX004354*")]
  278. # )
  279. #
  280. # rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
  281. # SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid",SortOrder.DESC)]),limit=100,get_total_count=True),
  282. # columns_to_get=ColumnsToGet([document_area],return_type=ColumnReturnType.SPECIFIED))
  283. # list_data = getRow_ots(rows)
  284. # print(total_count)
  285. # _count = len(list_data)
  286. # for _data in list_data:
  287. # _document = Document(_data)
  288. # task_queue.put(_document)
  289. # while next_token:
  290. # rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
  291. # SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
  292. # columns_to_get=ColumnsToGet([document_area],return_type=ColumnReturnType.SPECIFIED))
  293. # list_data = getRow_ots(rows)
  294. # _count += len(list_data)
  295. # print("%d/%d"%(_count,total_count))
  296. # for _data in list_data:
  297. # _document = Document(_data)
  298. # task_queue.put(_document)
  299. # docids = [223820830,224445409]
  300. # for docid in docids:
  301. # _dict = {document_docid:int(docid),
  302. # document_partitionkey:int(docid)%500+1,
  303. # }
  304. # task_queue.put(Document(_dict))
  305. import pandas as pd
  306. df = pd.read_excel("G:\\20221212error.xlsx")
  307. for docid in df["docid"]:
  308. _dict = {document_docid:int(docid),
  309. document_partitionkey:int(docid)%500+1,
  310. }
  311. task_queue.put(Document(_dict))
  312. log("task_queue size:%d"%(task_queue.qsize()))
  313. def _handle(item,result_queue,ots_client):
  314. #change attach value
  315. # list_attachment = json.loads(item.getProperties().get(document_attachment_path))
  316. # print("docid",item.getProperties().get(document_docid))
  317. # for attach in list_attachment:
  318. #
  319. # filemd5 = attach.get(document_attachment_path_filemd5,"")
  320. # _document_html = item.getProperties().get(document_dochtmlcon,"")
  321. #
  322. # _file_title = item.getTitleFromHtml(filemd5,_document_html)
  323. # filelink = item.getSourceLinkFromHtml(filemd5,_document_html)
  324. # attach[document_attachment_path_fileTitle] = _file_title
  325. # attach[document_attachment_path_fileLink] = filelink
  326. #
  327. # item.setValue(document_attachment_path,json.dumps(list_attachment,ensure_ascii=False),True)
  328. # item.all_columns.remove(document_dochtmlcon)
  329. #change status
  330. # item.setValue(document_docchannel,item.getProperties().get(document_original_docchannel),True)
  331. # item.setValue(document_status,random.randint(151,171),True)
  332. # item.setValue(document_area,"华南",True)
  333. # item.setValue(document_province,"广东",True)
  334. # item.setValue(document_city,"珠海",True)
  335. # item.setValue(document_district,"金湾区",True)
  336. item.setValue(document_status,1,True)
  337. # print(item.getProperties())
  338. item.update_row(ots_client)
  339. # log("update %d status done"%(item.getProperties().get(document_docid)))
  340. pass
  341. t_producer = Thread(target=producer,kwargs={"task_queue":task_queue,"ots_client":ots_client})
  342. t_producer.start()
  343. t_producer.join()
  344. mt = MultiThreadHandler(task_queue,_handle,None,30,ots_client=ots_client)
  345. mt.run()
  346. def drop_extract2():
  347. from BaseDataMaintenance.dataSource.source import getConnect_ots
  348. from BaseDataMaintenance.common.multiThread import MultiThreadHandler
  349. import queue
  350. from threading import Thread
  351. import json
  352. task_queue = queue.Queue()
  353. from BaseDataMaintenance.model.ots.attachment import attachment_filemd5,attachment_file_title,attachment_file_link
  354. ots_client = getConnect_ots()
  355. from BaseDataMaintenance.model.ots.document_extract2 import Document_extract2
  356. def producer(task_queue,ots_client):
  357. bool_query = BoolQuery(must_queries=[
  358. BoolQuery(should_queries=[
  359. # TermQuery("tenderee","山西利民工业有限责任公司"),
  360. # MatchPhraseQuery("doctitle","中国电信"),
  361. # MatchPhraseQuery("doctextcon","中国电信"),
  362. # MatchPhraseQuery("attachmenttextcon","中国电信")]),
  363. RangeQuery("status",1,1000,True,True),
  364. # RangeQuery("page_time","2021-12-20","2022-01-05",True,False),
  365. #,TermQuery(document_docid,171146519)
  366. ]
  367. ),
  368. # TermQuery("docid",228359000)
  369. ],
  370. # must_not_queries=[NestedQuery("sub_docs_json",WildcardQuery("sub_docs_json.win_tenderer","*"))]
  371. )
  372. rows,next_token,total_count,is_all_succeed = ots_client.search("document_extract2","document_extract2_index",
  373. SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid",SortOrder.DESC)]),limit=100,get_total_count=True),
  374. columns_to_get=ColumnsToGet(["status"],return_type=ColumnReturnType.SPECIFIED))
  375. list_data = getRow_ots(rows)
  376. print(total_count)
  377. _count = len(list_data)
  378. for _data in list_data:
  379. task_queue.put(_data)
  380. while next_token:
  381. rows,next_token,total_count,is_all_succeed = ots_client.search("document_extract2","document_extract2_index",
  382. SearchQuery(bool_query,next_token=next_token,limit=100,get_total_count=True),
  383. columns_to_get=ColumnsToGet(["status"],return_type=ColumnReturnType.SPECIFIED))
  384. list_data = getRow_ots(rows)
  385. _count += len(list_data)
  386. print("%d/%d"%(_count,total_count))
  387. for _data in list_data:
  388. task_queue.put(_data)
  389. # docids = [223820830,224445409]
  390. # for docid in docids:
  391. # _dict = {document_docid:int(docid),
  392. # document_partitionkey:int(docid)%500+1,
  393. # }
  394. # task_queue.put(Document(_dict))
  395. # import pandas as pd
  396. # df = pd.read_excel("2022-01-19_214304_export11.xlsx")
  397. # for docid,tenderee,win in zip(df["docid"],df["招标单位"],df["中标单位"]):
  398. # if not isinstance(tenderee,(str)) or not isinstance(win,(str)) or win=="" or tenderee=="":
  399. # # print(docid)
  400. # _dict = {document_docid:int(docid),
  401. # document_partitionkey:int(docid)%500+1,
  402. # }
  403. # task_queue.put(Document(_dict))
  404. log("task_queue size:%d"%(task_queue.qsize()))
  405. def _handle(item,result_queue,ots_client):
  406. #change attach value
  407. # list_attachment = json.loads(item.getProperties().get(document_attachment_path))
  408. # print("docid",item.getProperties().get(document_docid))
  409. # for attach in list_attachment:
  410. #
  411. # filemd5 = attach.get(document_attachment_path_filemd5,"")
  412. # _document_html = item.getProperties().get(document_dochtmlcon,"")
  413. #
  414. # _file_title = item.getTitleFromHtml(filemd5,_document_html)
  415. # filelink = item.getSourceLinkFromHtml(filemd5,_document_html)
  416. # attach[document_attachment_path_fileTitle] = _file_title
  417. # attach[document_attachment_path_fileLink] = filelink
  418. #
  419. # item.setValue(document_attachment_path,json.dumps(list_attachment,ensure_ascii=False),True)
  420. # item.all_columns.remove(document_dochtmlcon)
  421. #change status
  422. # item.setValue(document_docchannel,item.getProperties().get(document_original_docchannel),True)
  423. # item.setValue(document_status,random.randint(151,170),True)
  424. # item.update_row(ots_client)
  425. # log("update %d status done"%(item.getProperties().get(document_docid)))
  426. _dict = {}
  427. _dict.update(item)
  428. _dict.pop("status")
  429. _dict["status"] = 1
  430. print(_dict)
  431. _document = Document(_dict)
  432. _document.update_row(ots_client)
  433. _d_extract = Document_extract2(_dict)
  434. _d_extract.delete_row(ots_client)
  435. pass
  436. t_producer = Thread(target=producer,kwargs={"task_queue":task_queue,"ots_client":ots_client})
  437. t_producer.start()
  438. t_producer.join()
  439. mt = MultiThreadHandler(task_queue,_handle,None,30,ots_client=ots_client)
  440. mt.run()
  441. def fixDocumentHtml():
  442. from BaseDataMaintenance.dataSource.source import getConnect_ots,getConnect_ots_capacity
  443. from queue import Queue
  444. ots_client = getConnect_ots()
  445. from BaseDataMaintenance.common.multiThread import MultiThreadHandler
  446. from BaseDataMaintenance.model.ots.document_html import Document_html
  447. capacity_client = getConnect_ots_capacity()
  448. list_data = []
  449. bool_query = BoolQuery(must_queries=[
  450. MatchPhraseQuery("doctextcon","信友-城市之光"),
  451. MatchPhraseQuery("doctextcon","Copyright"),
  452. # TermQuery("docid",254249505)
  453. ])
  454. rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
  455. SearchQuery(bool_query,sort=Sort(sorters=[FieldSort("docid")]),get_total_count=True,limit=100),
  456. columns_to_get=ColumnsToGet(["doctextcon"],return_type=ColumnReturnType.SPECIFIED))
  457. print("total_count",total_count)
  458. list_data.extend(getRow_ots(rows))
  459. while next_token:
  460. rows,next_token,total_count,is_all_succeed = ots_client.search("document","document_index",
  461. SearchQuery(bool_query,next_token=next_token,get_total_count=True,limit=100),
  462. columns_to_get=ColumnsToGet(["doctextcon"],return_type=ColumnReturnType.SPECIFIED))
  463. list_data.extend(getRow_ots(rows))
  464. task_queue = Queue()
  465. for _data in list_data:
  466. task_queue.put(_data)
  467. _pattern = "(?P<_find>城市之光.*Ltd.)"
  468. _pattern1 = "(?P<_find>Evaluation.*Ltd.)"
  469. def _handle(item,result_queue):
  470. _doctextcon = item.get("doctextcon")
  471. _search = re.search(_pattern,_doctextcon)
  472. print(_search.groupdict().get("_find"))
  473. item["doctextcon"] = re.sub(_pattern,"",_doctextcon)
  474. _d = Document(item)
  475. _d.update_row(ots_client)
  476. _d1 = {"partitionkey":item.get("partitionkey"),
  477. "docid":item.get("docid")}
  478. _dh = Document(_d1)
  479. _dh.fix_columns(capacity_client,["dochtmlcon"],True)
  480. _dochtmlcon = _dh.getProperties().get("dochtmlcon")
  481. _dochtmlcon = re.sub("\n","",_dochtmlcon)
  482. _search = re.search(_pattern1,_dochtmlcon)
  483. _dochtmlcon = re.sub(_pattern1,"",_dochtmlcon)
  484. _d1["dochtmlcon"] = _dochtmlcon
  485. _dh = Document(_d1)
  486. _dh.update_row(capacity_client)
  487. # print(re.sub(_pattern,"</div><p><span>",_dochtmlcon))
  488. mt = MultiThreadHandler(task_queue,_handle,None,2)
  489. mt.run()
  490. def delete_documents():
  491. from BaseDataMaintenance.dataSource.source import getConnect_ots
  492. from BaseDataMaintenance.dataSource.source import getConnect_ots_capacity
  493. ots_client = getConnect_ots()
  494. ots_capacity = getConnect_ots_capacity()
  495. import pandas as pd
  496. df = pd.read_excel("2022-10-14_190838_数据导出.xlsx")
  497. _count = 0
  498. for _docid in df["docid"]:
  499. partitionkey = int(_docid)%500+1
  500. _d = {document_partitionkey:partitionkey,
  501. document_docid:int(_docid)}
  502. _doc = Document(_d)
  503. _doc.delete_row(ots_client)
  504. _doc.delete_row(ots_capacity)
  505. _count += 1
  506. print(_docid)
  507. print("delete count:%d"%_count)
  508. if __name__=="__main__":
  509. # turn_extract_status()
  510. turn_document_status()
  511. # drop_extract2()
  512. # fixDocumentHtml()