predictor.py 503 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874
  1. '''
  2. Created on 2018年12月26日
  3. @author: User
  4. '''
  5. import os
  6. import sys
  7. from BiddingKG.dl.common.nerUtils import *
  8. sys.path.append(os.path.abspath("../.."))
  9. # from keras.engine import topology
  10. # from keras import models
  11. # from keras import layers
  12. # from keras_contrib.layers.crf import CRF
  13. # from keras.preprocessing.sequence import pad_sequences
  14. # from keras import optimizers,losses,metrics
  15. from BiddingKG.dl.common.Utils import *
  16. from BiddingKG.dl.interface.modelFactory import *
  17. import tensorflow as tf
  18. import pandas as pd
  19. from BiddingKG.dl.product.data_util import decode, process_data
  20. from BiddingKG.dl.interface.Entitys import Entity
  21. from BiddingKG.dl.complaint.punish_predictor import Punish_Extract
  22. from BiddingKG.dl.money.re_money_total_unit import extract_total_money, extract_unit_money
  23. from bs4 import BeautifulSoup
  24. import copy
  25. import calendar
  26. import datetime
  27. from BiddingKG.dl.entityLink.entityLink import get_business_data
  28. from BiddingKG.dl.proposed_building.pb_extract import PBPredictor
  29. from BiddingKG.dl.interface.getAttributes import turnMoneySource, extract_serviceTime
  30. from BiddingKG.dl.time.re_servicetime import extract_servicetime
  31. # import fool # 统一用 selffool ,阿里云上只有selffool 包
  32. cpu_num = int(os.environ.get("CPU_NUM",0))
  33. sess_config = tf.ConfigProto(
  34. inter_op_parallelism_threads = cpu_num,
  35. intra_op_parallelism_threads = cpu_num,
  36. log_device_placement=True)
  37. sess_config = None
  38. file = os.path.dirname(__file__) + '/agency_set.pkl'
  39. with open(file, 'rb') as f:
  40. agency_set = pickle.load(f)
  41. def is_agency(entity_text):
  42. if re.search('(招投?标|采购|代理|咨询|管理|物资|事务所?|顾问|监理|拍卖)[()\w]{,4}(有限)?(责任)?公司|(采购|招投?标|交易|代理|咨询)[()\w]{,4}(中心|服务所)|法院$',
  43. entity_text) or entity_text in agency_set:
  44. return True
  45. return False
  46. from threading import RLock
  47. dict_predictor = {"codeName":{"predictor":None,"Lock":RLock()},
  48. "prem":{"predictor":None,"Lock":RLock()},
  49. "epc":{"predictor":None,"Lock":RLock()},
  50. "roleRule":{"predictor":None,"Lock":RLock()},
  51. "roleRuleFinal":{"predictor":None,"Lock":RLock()},
  52. "tendereeRuleRecall":{"predictor":None,"Lock":RLock()},
  53. "form":{"predictor":None,"Lock":RLock()},
  54. "time":{"predictor":None,"Lock":RLock()},
  55. "punish":{"predictor":None,"Lock":RLock()},
  56. "product":{"predictor":None,"Lock":RLock()},
  57. "product_attrs":{"predictor":None,"Lock":RLock()},
  58. "channel": {"predictor": None, "Lock": RLock()},
  59. "deposit_payment_way": {"predictor": None, "Lock": RLock()},
  60. "total_unit_money": {"predictor": None, "Lock": RLock()},
  61. "industry": {"predictor": None, "Lock": RLock()},
  62. "rolegrade": {"predictor": None, "Lock": RLock()},
  63. "moneygrade": {"predictor": None, "Lock": RLock()},
  64. "district": {"predictor": None, "Lock": RLock()},
  65. 'tableprem': {"predictor": None, "Lock": RLock()},
  66. 'candidate': {"predictor": None, "Lock": RLock()},
  67. 'websource_tenderee': {"predictor": None, "Lock": RLock()},
  68. 'project_label': {"predictor": None, "Lock": RLock()},
  69. 'pb_extract': {"predictor": None, "Lock": RLock()},
  70. 'property_label': {"predictor": None, "Lock": RLock()},
  71. 'approval': {"predictor": None, "Lock": RLock()} # 审批项目预测
  72. }
  73. def getPredictor(_type):
  74. if _type in dict_predictor:
  75. with dict_predictor[_type]["Lock"]:
  76. if dict_predictor[_type]["predictor"] is None:
  77. if _type == "codeName":
  78. dict_predictor[_type]["predictor"] = CodeNamePredict(config=sess_config)
  79. if _type == "prem":
  80. dict_predictor[_type]["predictor"] = PREMPredict(config=sess_config)
  81. if _type == "epc":
  82. dict_predictor[_type]["predictor"] = EPCPredict(config=sess_config)
  83. if _type == "roleRule":
  84. dict_predictor[_type]["predictor"] = RoleRulePredictor()
  85. if _type == "roleRuleFinal":
  86. dict_predictor[_type]["predictor"] = RoleRuleFinalAdd()
  87. if _type == "tendereeRuleRecall":
  88. dict_predictor[_type]["predictor"] = TendereeRuleRecall()
  89. if _type == "form":
  90. dict_predictor[_type]["predictor"] = FormPredictor(config=sess_config)
  91. if _type == "time":
  92. dict_predictor[_type]["predictor"] = TimePredictor(config=sess_config)
  93. if _type == "punish":
  94. dict_predictor[_type]["predictor"] = Punish_Extract()
  95. if _type == "product":
  96. dict_predictor[_type]["predictor"] = ProductPredictor(config=sess_config)
  97. if _type == "product_attrs":
  98. dict_predictor[_type]["predictor"] = ProductAttributesPredictor()
  99. if _type == "channel":
  100. dict_predictor[_type]["predictor"] = DocChannel(config=sess_config)
  101. if _type == 'deposit_payment_way':
  102. dict_predictor[_type]["predictor"] = DepositPaymentWay()
  103. if _type == 'total_unit_money':
  104. dict_predictor[_type]["predictor"] = TotalUnitMoney()
  105. if _type == 'industry':
  106. dict_predictor[_type]["predictor"] = IndustryPredictor()
  107. if _type == 'rolegrade':
  108. dict_predictor[_type]["predictor"] = RoleGrade()
  109. if _type == 'moneygrade':
  110. dict_predictor[_type]["predictor"] = MoneyGrade()
  111. if _type == 'district':
  112. dict_predictor[_type]["predictor"] = DistrictPredictor()
  113. if _type == 'tableprem':
  114. dict_predictor[_type]["predictor"] = TablePremExtractor()
  115. if _type == 'candidate':
  116. dict_predictor[_type]["predictor"] = CandidateExtractor()
  117. if _type == 'websource_tenderee':
  118. dict_predictor[_type]['predictor'] = WebsourceTenderee()
  119. if _type == 'project_label':
  120. dict_predictor[_type]['predictor'] = ProjectLabel()
  121. if _type == 'pb_extract':
  122. dict_predictor[_type]['predictor'] = PBPredictor()
  123. if _type == 'property_label':
  124. dict_predictor[_type]['predictor'] = PropertyLabel()
  125. if _type == 'approval':
  126. dict_predictor[_type]['predictor'] = ApprovalPredictor()
  127. return dict_predictor[_type]["predictor"]
  128. raise NameError("no this type of predictor")
  129. # 编号名称模型
  130. class CodeNamePredict():
  131. def __init__(self,EMBED_DIM=None,BiRNN_UNITS=None,lazyLoad=getLazyLoad(),config=None):
  132. self.model = None
  133. self.MAX_LEN = None
  134. self.model_code = None
  135. if EMBED_DIM is None:
  136. self.EMBED_DIM = 60
  137. else:
  138. self.EMBED_DIM = EMBED_DIM
  139. if BiRNN_UNITS is None:
  140. self.BiRNN_UNITS = 200
  141. else:
  142. self.BiRNN_UNITS = BiRNN_UNITS
  143. self.filepath = os.path.dirname(__file__)+"/../projectCode/models/model_project_"+str(self.EMBED_DIM)+"_"+str(self.BiRNN_UNITS)+".hdf5"
  144. #self.filepath = "../projectCode/models/model_project_60_200_200ep017-loss6.456-val_loss7.852-val_acc0.969.hdf5"
  145. self.filepath_code = os.path.dirname(__file__)+"/../projectCode/models/model_code.hdf5"
  146. vocabpath = os.path.dirname(__file__)+"/codename_vocab.pk"
  147. classlabelspath = os.path.dirname(__file__)+"/codename_classlabels.pk"
  148. self.vocab = load(vocabpath)
  149. self.class_labels = load(classlabelspath)
  150. #生成提取编号和名称的正则
  151. id_PC_B = self.class_labels.index("PC_B")
  152. id_PC_M = self.class_labels.index("PC_M")
  153. id_PC_E = self.class_labels.index("PC_E")
  154. id_PN_B = self.class_labels.index("PN_B")
  155. id_PN_M = self.class_labels.index("PN_M")
  156. id_PN_E = self.class_labels.index("PN_E")
  157. self.PC_pattern = re.compile(str(id_PC_B)+str(id_PC_M)+"*"+str(id_PC_E))
  158. self.PN_pattern = re.compile(str(id_PN_B)+str(id_PN_M)+"*"+str(id_PN_E))
  159. # print("pc",self.PC_pattern)
  160. # print("pn",self.PN_pattern)
  161. self.word2index = dict((w,i) for i,w in enumerate(np.array(self.vocab)))
  162. self.inputs = None
  163. self.outputs = None
  164. self.sess_codename = tf.Session(graph=tf.Graph(),config=config)
  165. self.sess_codesplit = tf.Session(graph=tf.Graph(),config=config)
  166. self.inputs_code = None
  167. self.outputs_code = None
  168. if not lazyLoad:
  169. self.getModel()
  170. self.getModel_code()
  171. def getModel(self):
  172. '''
  173. @summary: 取得编号和名称模型
  174. '''
  175. if self.inputs is None:
  176. log("get model of codename")
  177. with self.sess_codename.as_default():
  178. with self.sess_codename.graph.as_default():
  179. meta_graph_def = tf.saved_model.loader.load(self.sess_codename, ["serve"], export_dir=os.path.dirname(__file__)+"/codename_savedmodel_tf")
  180. signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
  181. signature_def = meta_graph_def.signature_def
  182. self.inputs = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].inputs["inputs"].name)
  183. self.inputs_length = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].inputs["inputs_length"].name)
  184. self.keepprob = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].inputs["keepprob"].name)
  185. self.logits = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].outputs["logits"].name)
  186. self.trans = self.sess_codename.graph.get_tensor_by_name(signature_def[signature_key].outputs["trans"].name)
  187. return self.inputs,self.inputs_length,self.keepprob,self.logits,self.trans
  188. else:
  189. return self.inputs,self.inputs_length,self.keepprob,self.logits,self.trans
  190. '''
  191. if self.model is None:
  192. self.model = self.getBiLSTMCRFModel(self.MAX_LEN, self.vocab, self.EMBED_DIM, self.BiRNN_UNITS, self.class_labels,weights=None)
  193. self.model.load_weights(self.filepath)
  194. return self.model
  195. '''
  196. def getModel_code(self):
  197. if self.inputs_code is None:
  198. log("get model of code")
  199. with self.sess_codesplit.as_default():
  200. with self.sess_codesplit.graph.as_default():
  201. meta_graph_def = tf.saved_model.loader.load(self.sess_codesplit, ["serve"], export_dir=os.path.dirname(__file__)+"/codesplit_savedmodel")
  202. signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
  203. signature_def = meta_graph_def.signature_def
  204. self.inputs_code = []
  205. self.inputs_code.append(self.sess_codesplit.graph.get_tensor_by_name(signature_def[signature_key].inputs["input0"].name))
  206. self.inputs_code.append(self.sess_codesplit.graph.get_tensor_by_name(signature_def[signature_key].inputs["input1"].name))
  207. self.inputs_code.append(self.sess_codesplit.graph.get_tensor_by_name(signature_def[signature_key].inputs["input2"].name))
  208. self.outputs_code = self.sess_codesplit.graph.get_tensor_by_name(signature_def[signature_key].outputs["outputs"].name)
  209. self.sess_codesplit.graph.finalize()
  210. return self.inputs_code,self.outputs_code
  211. else:
  212. return self.inputs_code,self.outputs_code
  213. '''
  214. if self.model_code is None:
  215. log("get model of model_code")
  216. with self.sess_codesplit.as_default():
  217. with self.sess_codesplit.graph.as_default():
  218. self.model_code = models.load_model(self.filepath_code, custom_objects={'precision':precision,'recall':recall,'f1_score':f1_score})
  219. return self.model_code
  220. '''
  221. def getBiLSTMCRFModel(self,MAX_LEN,vocab,EMBED_DIM,BiRNN_UNITS,chunk_tags,weights):
  222. '''
  223. model = models.Sequential()
  224. model.add(layers.Embedding(len(vocab), EMBED_DIM, mask_zero=True)) # Random embedding
  225. model.add(layers.Bidirectional(layers.LSTM(BiRNN_UNITS // 2, return_sequences=True)))
  226. crf = CRF(len(chunk_tags), sparse_target=True)
  227. model.add(crf)
  228. model.summary()
  229. model.compile('adam', loss=crf.loss_function, metrics=[crf.accuracy])
  230. return model
  231. '''
  232. input = layers.Input(shape=(None,))
  233. if weights is not None:
  234. embedding = layers.embeddings.Embedding(len(vocab),EMBED_DIM,mask_zero=True,weights=[weights],trainable=True)(input)
  235. else:
  236. embedding = layers.embeddings.Embedding(len(vocab),EMBED_DIM,mask_zero=True)(input)
  237. bilstm = layers.Bidirectional(layers.LSTM(BiRNN_UNITS//2,return_sequences=True))(embedding)
  238. bilstm_dense = layers.TimeDistributed(layers.Dense(len(chunk_tags)))(bilstm)
  239. crf = CRF(len(chunk_tags),sparse_target=True)
  240. crf_out = crf(bilstm_dense)
  241. model = models.Model(input=[input],output = [crf_out])
  242. model.summary()
  243. model.compile(optimizer = 'adam', loss = crf.loss_function, metrics = [crf.accuracy])
  244. return model
  245. #根据规则补全编号或名称两边的符号
  246. def fitDataByRule(self,data):
  247. symbol_dict = {"(":")",
  248. "(":")",
  249. "[":"]",
  250. "【":"】",
  251. ")":"(",
  252. ")":"(",
  253. "]":"[",
  254. "】":"【"}
  255. leftSymbol_pattern = re.compile("[\((\[【]")
  256. rightSymbol_pattern = re.compile("[\))\]】]")
  257. leftfinds = re.findall(leftSymbol_pattern,data)
  258. rightfinds = re.findall(rightSymbol_pattern,data)
  259. result = data
  260. if len(leftfinds)+len(rightfinds)==0:
  261. return data
  262. elif len(leftfinds)==len(rightfinds):
  263. return data
  264. elif abs(len(leftfinds)-len(rightfinds))==1:
  265. if len(leftfinds)>len(rightfinds):
  266. if symbol_dict.get(data[0]) is not None:
  267. result = data[1:]
  268. else:
  269. #print(symbol_dict.get(leftfinds[0]))
  270. result = data+symbol_dict.get(leftfinds[0])
  271. else:
  272. if symbol_dict.get(data[-1]) is not None:
  273. result = data[:-1]
  274. else:
  275. result = symbol_dict.get(rightfinds[0])+data
  276. return result
  277. def decode(self,logits, trans, sequence_lengths, tag_num):
  278. viterbi_sequences = []
  279. for logit, length in zip(logits, sequence_lengths):
  280. score = logit[:length]
  281. viterbi_seq, viterbi_score = viterbi_decode(score, trans)
  282. viterbi_sequences.append(viterbi_seq)
  283. return viterbi_sequences
  284. def predict(self,list_sentences,list_entitys=None,MAX_AREA = 5000):
  285. #@summary: 获取每篇文章的code和name
  286. # pattern_score = re.compile("工程|服务|采购|施工|项目|系统|招标|中标|公告|学校|[大中小]学校?|医院|公司|分公司|研究院|政府采购中心|学院|中心校?|办公室|政府|财[政务]局|办事处|委员会|[部总支]队|警卫局|幼儿园|党委|党校|银行|分行|解放军|发电厂|供电局|管理所|供电公司|卷烟厂|机务段|研究[院所]|油厂|调查局|调查中心|出版社|电视台|监狱|水厂|服务站|信用合作联社|信用社|交易所|交易中心|交易中心党校|科学院|测绘所|运输厅|管理处|局|中心|机关|部门?|处|科|厂|集团|图书馆|馆|所|厅|楼|区|酒店|场|基地|矿|餐厅|酒店")
  287. pattern_score = re.compile('建设项目|服务项目|工程项目|工程施工|建设工程|服务中心|基础设施|物业管理|工程设计|妇幼保健|咨询服务|管理系统|管理中心|改建工程|配套工程|公安局|幼儿园|管理局|使用权|办公楼|教育局|管理处|图书馆|经营权|项目|采购|工程|改造|服务|设备|中心|医院|系统|建设|监理|施工|维修|学院|安装|设计|关于|标段|招标|技术|询价|管理|学校|小学|中学|平台|提升|设施|检测|整治|社区|装修|政府|绿化|物资|租赁|地块|医疗|编制|公开|规划|监控|教育|维护|校区|治理|升级|安置|竞价|购置|评估|勘察|承包|实验|大学|材料|生产|耗材|招租|硬化|维保|用地|消防|审计|拍卖|物业|入围|养护|机关|企业|用房|出让|资产|分局|验收|宣传|处置|校园|研究|咨询|修缮|更换|装饰|劳务|保养|物流|出租|局|院')
  288. result = []
  289. index_unk = self.word2index.get("<unk>")
  290. # index_pad = self.word2index.get("<pad>")
  291. if list_entitys is None:
  292. list_entitys = [[] for _ in range(len(list_sentences))]
  293. for list_sentence,list_entity in zip(list_sentences,list_entitys):
  294. if len(list_sentence)==0:
  295. result.append([{"code":[],"name":""}])
  296. continue
  297. doc_id = list_sentence[0].doc_id
  298. # sentences = []
  299. # for sentence in list_sentence:
  300. # if len(sentence.sentence_text)>MAX_AREA:
  301. # for _sentence_comma in re.split("[;;,\n]",sentence):
  302. # _comma_index = 0
  303. # while(_comma_index<len(_sentence_comma)):
  304. # sentences.append(_sentence_comma[_comma_index:_comma_index+MAX_AREA])
  305. # _comma_index += MAX_AREA
  306. # else:
  307. # sentences.append(sentence+"。")
  308. list_sentence.sort(key=lambda x:len(x.sentence_text),reverse=True)
  309. _begin_index = 0
  310. item = {"code":[],"name":""}
  311. code_set = set()
  312. dict_name_freq_score = dict()
  313. while(True):
  314. MAX_LEN = len(list_sentence[_begin_index].sentence_text)
  315. if MAX_LEN>MAX_AREA:
  316. MAX_LEN = MAX_AREA
  317. _LEN = MAX_AREA//MAX_LEN
  318. #预测
  319. x = [[self.word2index.get(word,index_unk)for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
  320. # x = [[getIndexOfWord(word) for word in sentence.sentence_text[:MAX_AREA]]for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
  321. x_len = [len(_x) if len(_x) < MAX_LEN else MAX_LEN for _x in x]
  322. x = pad_sequences(x,maxlen=MAX_LEN,padding="post",truncating="post")
  323. if USE_API:
  324. requests_result = requests.post(API_URL + "/predict_codeName", json={"inouts": x.tolist(), "inouts_len": x_len},verify=True)
  325. predict_y = json.loads(requests_result.text)['result']
  326. # print("cost_time:", json.loads(requests_result.text)['cost_time'])
  327. # print(MAX_LEN,_LEN,_begin_index)
  328. else:
  329. with self.sess_codename.as_default():
  330. t_input,t_input_length,t_keepprob,t_logits,t_trans = self.getModel()
  331. _logits,_trans = self.sess_codename.run([t_logits,t_trans],feed_dict={t_input:x,
  332. t_input_length:x_len,
  333. t_keepprob:1.0})
  334. predict_y = self.decode(_logits,_trans,x_len,7)
  335. # print('==========',_logits)
  336. '''
  337. for item11 in np.argmax(predict_y,-1):
  338. print(item11)
  339. print(predict_y)
  340. '''
  341. # print(predict_y)
  342. for sentence,predict in zip(list_sentence[_begin_index:_begin_index+_LEN],np.array(predict_y)):
  343. pad_sentence = sentence.sentence_text[:MAX_LEN]
  344. join_predict = "".join([str(s) for s in predict])
  345. # print(pad_sentence)
  346. # print(join_predict)
  347. code_x = []
  348. code_text = []
  349. pre_text = []
  350. temp_entitys = []
  351. for iter in re.finditer(self.PC_pattern,join_predict):
  352. get_len = 40
  353. if iter.span()[0]<get_len:
  354. begin = 0
  355. else:
  356. begin = iter.span()[0]-get_len
  357. end = iter.span()[1]+get_len
  358. code_x.append(embedding_word([pad_sentence[begin:iter.span()[0]],pad_sentence[iter.span()[0]:iter.span()[1]].replace(",",""),pad_sentence[iter.span()[1]:end]],shape=(3,get_len,60)))
  359. code_text.append(pad_sentence[iter.span()[0]:iter.span()[1]].replace(",", ""))
  360. pre_text.append(pad_sentence[begin:iter.span()[0]])
  361. _entity = Entity(doc_id=sentence.doc_id,entity_id="%s_%s_%s_%s"%(sentence.doc_id,sentence.sentence_index,iter.span()[0],iter.span()[1]),entity_text=pad_sentence[iter.span()[0]:iter.span()[1]].replace(",",""),entity_type="code",sentence_index=sentence.sentence_index,begin_index=0,end_index=0,wordOffset_begin=iter.span()[0],wordOffset_end=iter.span()[1],in_attachment=sentence.in_attachment)
  362. temp_entitys.append(_entity)
  363. #print("code",code_text)
  364. if len(code_x)>0:
  365. code_x = np.transpose(np.array(code_x,dtype=np.float32),(1,0,2,3))
  366. if USE_PAI_EAS:
  367. request = tf_predict_pb2.PredictRequest()
  368. request.inputs["input0"].dtype = tf_predict_pb2.DT_FLOAT
  369. request.inputs["input0"].array_shape.dim.extend(np.shape(code_x[0]))
  370. request.inputs["input0"].float_val.extend(np.array(code_x[0],dtype=np.float64).reshape(-1))
  371. request.inputs["input1"].dtype = tf_predict_pb2.DT_FLOAT
  372. request.inputs["input1"].array_shape.dim.extend(np.shape(code_x[1]))
  373. request.inputs["input1"].float_val.extend(np.array(code_x[1],dtype=np.float64).reshape(-1))
  374. request.inputs["input2"].dtype = tf_predict_pb2.DT_FLOAT
  375. request.inputs["input2"].array_shape.dim.extend(np.shape(code_x[2]))
  376. request.inputs["input2"].float_val.extend(np.array(code_x[2],dtype=np.float64).reshape(-1))
  377. request_data = request.SerializeToString()
  378. list_outputs = ["outputs"]
  379. _result = vpc_requests(codeclasses_url, codeclasses_authorization, request_data, list_outputs)
  380. if _result is not None:
  381. predict_code = _result["outputs"]
  382. else:
  383. with self.sess_codesplit.as_default():
  384. with self.sess_codesplit.graph.as_default():
  385. predict_code = self.getModel_code().predict([code_x[0],code_x[1],code_x[2]])
  386. else:
  387. with self.sess_codesplit.as_default():
  388. with self.sess_codesplit.graph.as_default():
  389. inputs_code,outputs_code = self.getModel_code()
  390. predict_code = limitRun(self.sess_codesplit,[outputs_code],feed_dict={inputs_code[0]:code_x[0],inputs_code[1]:code_x[1],inputs_code[2]:code_x[2]})[0]
  391. #predict_code = self.sess_codesplit.run(outputs_code,feed_dict={inputs_code[0]:code_x[0],inputs_code[1]:code_x[1],inputs_code[2]:code_x[2]})
  392. #predict_code = self.getModel_code().predict([code_x[0],code_x[1],code_x[2]])
  393. for h in range(len(predict_code)):
  394. if predict_code[h][0]>0.5:
  395. the_code = self.fitDataByRule(code_text[h])
  396. # print(the_code)
  397. #add code to entitys
  398. list_entity.append(temp_entitys[h])
  399. if re.search(',|/|;|、|,', the_code) and len(the_code)>25:
  400. for it in re.split(',|/|;|、|,', the_code):
  401. if len(it) > 8:
  402. if it not in code_set:
  403. code_set.add(it)
  404. # item['code'].append(it)
  405. if re.search("(项目编号|招标编号):?$", pre_text[h]):
  406. item['code'].append((it, 0, sentence.sentence_index))
  407. elif re.search('采购(计划)?编号:?$', pre_text[h]):
  408. item['code'].append((it, 1, sentence.sentence_index))
  409. elif re.search('(询价|合同)编号:?$', pre_text[h]):
  410. item['code'].append((it, 2, sentence.sentence_index))
  411. else:
  412. item['code'].append((it, 3, sentence.sentence_index))
  413. elif len(item['code']) > 0:
  414. new_it = item['code'][-1][0] + re.search(',|/|;|、|,', the_code).group(0) + it
  415. if new_it not in code_set:
  416. code_set.add(new_it)
  417. # item['code'][-1] = new_it
  418. if re.search("(项目编号|招标编号):?$", pre_text[h]):
  419. item['code'][-1] = (new_it, 0, sentence.sentence_index)
  420. elif re.search('采购(计划)?编号:?$', pre_text[h]):
  421. item['code'][-1] = (new_it, 1, sentence.sentence_index)
  422. elif re.search('(询价|合同)编号:?$', pre_text[h]):
  423. item['code'][-1] = (new_it, 2, sentence.sentence_index)
  424. else:
  425. item['code'][-1] = (new_it, 3, sentence.sentence_index)
  426. else:
  427. if the_code not in code_set:
  428. code_set.add(the_code)
  429. # item['code'].append(the_code)
  430. if re.search("(项目编号|招标编号):?$", pre_text[h]):
  431. item['code'].append((the_code, 0, sentence.sentence_index))
  432. elif re.search('采购(计划)?编号:?$', pre_text[h]):
  433. item['code'].append((the_code, 1, sentence.sentence_index))
  434. elif re.search('(询价|合同)编号:?$', pre_text[h]):
  435. item['code'].append((the_code, 2, sentence.sentence_index))
  436. else:
  437. item['code'].append((the_code, 3, sentence.sentence_index))
  438. break
  439. elif the_code not in code_set:
  440. if len(the_code)<5: # 避免510545935 这种把 招标项目编号:2024年第二期 只提取2024
  441. continue
  442. code_set.add(the_code)
  443. # item['code'].append(the_code)
  444. if re.search("(项目编号|招标编号):?$", pre_text[h]):
  445. item['code'].append((the_code, 0, sentence.sentence_index))
  446. elif re.search('采购(计划)?编号:?$', pre_text[h]):
  447. item['code'].append((the_code, 1, sentence.sentence_index))
  448. elif re.search('(询价|合同)编号:?$', pre_text[h]):
  449. item['code'].append((the_code, 2, sentence.sentence_index))
  450. else:
  451. item['code'].append((the_code, 3, sentence.sentence_index))
  452. # if the_code not in code_set:
  453. # code_set.add(the_code)
  454. # item['code'] = list(code_set)
  455. for iter in re.finditer(self.PN_pattern,join_predict):
  456. _name = self.fitDataByRule(pad_sentence[iter.span()[0]:iter.span()[1]])
  457. if len(_name)>200: # 避免模型预测类似 202750503 这种很长重复字很多的错误项目名称
  458. continue
  459. elif '公司:你单位在' in _name: # 避免类似 339900030 这种作为项目名称,导致中标角色作为招标角色
  460. continue
  461. #add name to entitys
  462. _entity = Entity(doc_id=sentence.doc_id,entity_id="%s_%s_%s_%s"%(sentence.doc_id,sentence.sentence_index,iter.span()[0],iter.span()[1]),entity_text=_name,entity_type="name",sentence_index=sentence.sentence_index,begin_index=0,end_index=0,wordOffset_begin=iter.span()[0],wordOffset_end=iter.span()[1],in_attachment=sentence.in_attachment)
  463. list_entity.append(_entity)
  464. # w = 1 if re.search('(项目|工程|招标|合同|标项|标的|计划|询价|询价单|询价通知书|申购)(名称|标题|主题)[::\s]', pad_sentence[iter.span()[0]-10:iter.span()[0]])!=None else 0.5
  465. w = 1 if re.search('(项目|工程|招标|采购|合同|标项|标的|计划|询价|询价单|询价通知书|申购)(名称|标题|主题|项目)[::\s]', pad_sentence[iter.span()[0]-10:iter.span()[0]])!=None else 0.5
  466. if _name not in dict_name_freq_score:
  467. # dict_name_freq_score[_name] = [1,len(re.findall(pattern_score,_name))+len(_name)*0.1]
  468. len_name = len(_name) if len(_name) <50 else 100-len(_name) # 2023/03/02 超出50长度的逐渐递减
  469. dict_name_freq_score[_name] = [1, (len(re.findall(pattern_score, _name)) + len_name * 0.05)*w+(5-sentence.sentence_index)*0.2]
  470. else:
  471. dict_name_freq_score[_name][0] += 1
  472. '''
  473. for iter in re.finditer(self.PN_pattern,join_predict):
  474. print("name-",self.fitDataByRule(pad_sentence[iter.span()[0]:iter.span()[1]]))
  475. if item[1]['name']=="":
  476. for iter in re.finditer(self.PN_pattern,join_predict):
  477. #item[1]['name']=item[1]['name']+";"+self.fitDataByRule(pad_sentence[iter.span()[0]:iter.span()[1]])
  478. item[1]['name']=self.fitDataByRule(pad_sentence[iter.span()[0]:iter.span()[1]])
  479. break
  480. '''
  481. if _begin_index+_LEN>=len(list_sentence):
  482. break
  483. _begin_index += _LEN
  484. list_name_freq_score = []
  485. # print('模型预测项目名称:', dict_name_freq_score)
  486. # 2020/11/23 大网站规则调整
  487. if len(dict_name_freq_score) == 0:
  488. # name_re1 = '(项目|工程|招标|合同|标项|标的|计划|询价|询价单|询价通知书|申购)(名称|标题|主题)[::\s]+([^,。:;]{2,60})[,。]'
  489. name_re1 = '(项目|工程|招标|采购(条目)?|合同|标项|标的|计划|询价|询价单|询价通知书|申购单|申购)(名称|标名|标题|主题)[::\s]+(?P<name>[^,。:;]{2,60})[,。]'
  490. for sentence in list_sentence:
  491. # pad_sentence = sentence.sentence_text
  492. othername = re.search(name_re1, sentence.sentence_text)
  493. if othername != None:
  494. project_name = othername.group('name')
  495. if re.search('[\u4e00-\u9fa5]+', project_name) == None: # 没有中文的项目名称去除
  496. # log('没有中文的项目名称去除')
  497. continue
  498. beg = find_index([project_name], sentence.sentence_text)[0]
  499. end = beg + len(project_name)
  500. _name = self.fitDataByRule(sentence.sentence_text[beg:end])
  501. # print('规则召回项目名称:', _name)
  502. # add name to entitys
  503. _entity = Entity(doc_id=sentence.doc_id, entity_id="%s_%s_%s_%s" % (
  504. sentence.doc_id, sentence.sentence_index, beg, end), entity_text=_name,
  505. entity_type="name", sentence_index=sentence.sentence_index, begin_index=0,
  506. end_index=0, wordOffset_begin=beg, wordOffset_end=end,in_attachment=sentence.in_attachment)
  507. list_entity.append(_entity)
  508. w = 1
  509. if _name not in dict_name_freq_score:
  510. # dict_name_freq_score[_name] = [1,len(re.findall(pattern_score,_name))+len(_name)*0.1]
  511. dict_name_freq_score[_name] = [1, (len(re.findall(pattern_score, _name)) + len(_name) * 0.05) * w+(5-sentence.sentence_index)*0.2]
  512. else:
  513. dict_name_freq_score[_name][0] += 1
  514. # othername = re.search(name_re1, sentence.sentence_text)
  515. # if othername != None:
  516. # _name = othername.group(3)
  517. # if _name not in dict_name_freq_score:
  518. # dict_name_freq_score[_name] = [1, len(re.findall(pattern_score, _name)) + len(_name) * 0.1]
  519. # else:
  520. # dict_name_freq_score[_name][0] += 1
  521. for _name in dict_name_freq_score.keys():
  522. list_name_freq_score.append([_name,dict_name_freq_score[_name]])
  523. # print(list_name_freq_score)
  524. if len(list_name_freq_score)>0:
  525. list_name_freq_score.sort(key=lambda x:x[1][0]*x[1][1],reverse=True)
  526. item['name'] = list_name_freq_score[0][0]
  527. # for it in list_name_freq_score:
  528. # print('项目名称及分值:',it[0],it[1], it[1][0]*it[1][1])
  529. # if list_name_freq_score[0][1][0]>1:
  530. # item[1]['name'] = list_name_freq_score[0][0]
  531. # else:
  532. # list_name_freq_score.sort(key=lambda x:x[1][1],reverse=True)
  533. # item[1]["name"] = list_name_freq_score[0][0]
  534. #下面代码加上去用正则添加某些识别不到的项目编号
  535. if item['code'] == []:
  536. for sentence in list_sentence:
  537. # othercode = re.search('(采购计划编号|询价编号)[\))]?[::]?([\[\]a-zA-Z0-9\-]{5,30})', sentence.sentence_text)
  538. # if othercode != None:
  539. # item[1]['code'].append(othercode.group(2))
  540. # 2020/11/23 大网站规则调整
  541. othercode = re.search('(项目|采购|招标|品目|询价|竞价|询价[单书]|磋商|订单|账单|交易|文件|计划|场次|标的|标段|标包|分包|标段\(包\)|招标文件|合同|通知书|公告|工程|寻源|标书|包件|谈判|申购)(单据?号|编号|标号|编码|代码|备案号|号)[::\s]+(?P<code>[^,。;:、]{6,30}[a-zA-Z0-9\号期])[\),。\u4e00-\u9fa5]', sentence.sentence_text)
  542. if othercode != None:
  543. # item['code'].append(othercode.group('code'))
  544. if re.search("(项目编号|招标编号):?$", othercode.group(0)):
  545. item['code'].append((othercode.group('code'), 0, sentence.sentence_index))
  546. elif re.search('采购(计划)?编号:?$', othercode.group(0)):
  547. item['code'].append((othercode.group('code'), 1, sentence.sentence_index))
  548. elif re.search('(询价|合同)编号:?$', othercode.group(0)):
  549. item['code'].append((othercode.group('code'), 2, sentence.sentence_index))
  550. else:
  551. item['code'].append((othercode.group('code'), 3, sentence.sentence_index))
  552. # print('规则召回项目编号:', othercode.group('code'))
  553. # item['code'] = [code for code in item['code'] if len(code)<500]
  554. # item['code'].sort(key=lambda x:len(x),reverse=True)
  555. item['code'] = [code for code in item['code'] if len(code[0]) < 500]
  556. item['code'].sort(key=lambda x: [x[1],x[2]])
  557. item['code'] = [it[0] for it in item['code']]
  558. result.append(item)
  559. list_sentence.sort(key=lambda x: x.sentence_index,reverse=False)
  560. return result
  561. '''
  562. #当数据量过大时会报错
  563. def predict(self,articles,MAX_LEN = None):
  564. sentences = []
  565. for article in articles:
  566. for sentence in article.content.split("。"):
  567. sentences.append([sentence,article.id])
  568. if MAX_LEN is None:
  569. sent_len = [len(sentence[0]) for sentence in sentences]
  570. MAX_LEN = max(sent_len)
  571. #print(MAX_LEN)
  572. #若为空,则直接返回空
  573. result = []
  574. if MAX_LEN==0:
  575. for article in articles:
  576. result.append([article.id,{"code":[],"name":""}])
  577. return result
  578. index_unk = self.word2index.get("<unk>")
  579. index_pad = self.word2index.get("<pad>")
  580. x = [[self.word2index.get(word,index_unk)for word in sentence[0]]for sentence in sentences]
  581. x = pad_sequences(x,maxlen=MAX_LEN,padding="post",truncating="post")
  582. predict_y = self.getModel().predict(x)
  583. last_doc_id = ""
  584. item = []
  585. for sentence,predict in zip(sentences,np.argmax(predict_y,-1)):
  586. pad_sentence = sentence[0][:MAX_LEN]
  587. doc_id = sentence[1]
  588. join_predict = "".join([str(s) for s in predict])
  589. if doc_id!=last_doc_id:
  590. if last_doc_id!="":
  591. result.append(item)
  592. item = [doc_id,{"code":[],"name":""}]
  593. code_set = set()
  594. code_x = []
  595. code_text = []
  596. for iter in re.finditer(self.PC_pattern,join_predict):
  597. get_len = 40
  598. if iter.span()[0]<get_len:
  599. begin = 0
  600. else:
  601. begin = iter.span()[0]-get_len
  602. end = iter.span()[1]+get_len
  603. code_x.append(embedding_word([pad_sentence[begin:iter.span()[0]],pad_sentence[iter.span()[0]:iter.span()[1]],pad_sentence[iter.span()[1]:end]],shape=(3,get_len,60)))
  604. code_text.append(pad_sentence[iter.span()[0]:iter.span()[1]])
  605. if len(code_x)>0:
  606. code_x = np.transpose(np.array(code_x),(1,0,2,3))
  607. predict_code = self.getModel_code().predict([code_x[0],code_x[1],code_x[2]])
  608. for h in range(len(predict_code)):
  609. if predict_code[h][0]>0.5:
  610. the_code = self.fitDataByRule(code_text[h])
  611. if the_code not in code_set:
  612. code_set.add(the_code)
  613. item[1]['code'] = list(code_set)
  614. if item[1]['name']=="":
  615. for iter in re.finditer(self.PN_pattern,join_predict):
  616. #item[1]['name']=item[1]['name']+";"+self.fitDataByRule(pad_sentence[iter.span()[0]:iter.span()[1]])
  617. item[1]['name']=self.fitDataByRule(pad_sentence[iter.span()[0]:iter.span()[1]])
  618. break
  619. last_doc_id = doc_id
  620. result.append(item)
  621. return result
  622. '''
  623. #角色金额模型
  624. class PREMPredict():
  625. def __init__(self,config=None):
  626. #self.model_role_file = os.path.abspath("../role/models/model_role.model.hdf5")
  627. # self.model_role_file = os.path.dirname(__file__)+"/../role/log/new_biLSTM-ep012-loss0.028-val_loss0.040-f10.954.h5"
  628. self.model_role = Model_role_classify_word(config=config)
  629. self.model_money = Model_money_classify(config=config)
  630. # self.role_file = open('/data/python/lsm/role_model_predict.txt', 'a', encoding='utf-8')
  631. # self.money_file = open('/data/python/lsm/money_model_predict.txt', 'a', encoding='utf-8')
  632. return
  633. def search_role_data(self,list_sentences,list_entitys):
  634. '''
  635. @summary:根据句子list和实体list查询角色模型的输入数据
  636. @param:
  637. list_sentences:文章的sentences
  638. list_entitys:文章的entitys
  639. @return:角色模型的输入数据
  640. '''
  641. text_list = []
  642. data_x = []
  643. points_entitys = []
  644. for list_entity,list_sentence in zip(list_entitys,list_sentences):
  645. list_entity.sort(key=lambda x:x.sentence_index)
  646. list_sentence.sort(key=lambda x:x.sentence_index)
  647. p_entitys = 0
  648. p_sentences = 0
  649. while(p_entitys<len(list_entity)):
  650. entity = list_entity[p_entitys]
  651. if entity.entity_type in ['org','company']:
  652. while(p_sentences<len(list_sentence)):
  653. sentence = list_sentence[p_sentences]
  654. if entity.doc_id==sentence.doc_id and entity.sentence_index==sentence.sentence_index:
  655. # text_list.append(sentence.sentence_text[max(0, entity.wordOffset_begin-13):entity.wordOffset_end+10])
  656. text_sen = sentence.sentence_text
  657. b = entity.wordOffset_begin
  658. e = entity.wordOffset_end
  659. text_list.append((text_sen[max(0, b-13):b], text_sen[b:e], text_sen[e:e+15]))
  660. # item_x = embedding(spanWindow(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,size=settings.MODEL_ROLE_INPUT_SHAPE[1]),shape=settings.MODEL_ROLE_INPUT_SHAPE)
  661. # item_x = self.model_role.encode(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,entity_text=entity.entity_text)
  662. item_x = self.model_role.encode_word(sentence_text=text_sen, begin_index=entity.wordOffset_begin, end_index=entity.wordOffset_end, size=30)
  663. data_x.append(item_x)
  664. points_entitys.append(entity)
  665. break
  666. p_sentences += 1
  667. p_entitys += 1
  668. if len(points_entitys)==0:
  669. return None
  670. return [data_x,points_entitys, text_list]
  671. def search_money_data(self,list_sentences,list_entitys):
  672. '''
  673. @summary:根据句子list和实体list查询金额模型的输入数据
  674. @param:
  675. list_sentences:文章的sentences
  676. list_entitys:文章的entitys
  677. @return:金额模型的输入数据
  678. '''
  679. text_list = []
  680. data_x = []
  681. points_entitys = []
  682. for list_entity,list_sentence in zip(list_entitys,list_sentences):
  683. list_entity.sort(key=lambda x:x.sentence_index)
  684. list_sentence.sort(key=lambda x:x.sentence_index)
  685. p_entitys = 0
  686. while(p_entitys<len(list_entity)):
  687. entity = list_entity[p_entitys]
  688. if entity.entity_type=="money":
  689. p_sentences = 0
  690. while(p_sentences<len(list_sentence)):
  691. sentence = list_sentence[p_sentences]
  692. if entity.doc_id==sentence.doc_id and entity.sentence_index==sentence.sentence_index:
  693. # text_list.append(sentence.sentence_text[max(0, entity.wordOffset_begin - 13):entity.wordOffset_begin])
  694. text_sen = sentence.sentence_text
  695. b = entity.wordOffset_begin
  696. e = entity.wordOffset_end
  697. text_list.append((text_sen[max(0, b - 13):b], text_sen[b:e], text_sen[e:e + 10]))
  698. #item_x = embedding(spanWindow(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,size=settings.MODEL_MONEY_INPUT_SHAPE[1]),shape=settings.MODEL_MONEY_INPUT_SHAPE)
  699. #item_x = embedding_word(spanWindow(tokens=sentence.tokens, begin_index=entity.begin_index, end_index=entity.end_index, size=10, center_include=True, word_flag=True),shape=settings.MODEL_MONEY_INPUT_SHAPE)
  700. item_x = self.model_money.encode(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index)
  701. data_x.append(item_x)
  702. points_entitys.append(entity)
  703. break
  704. p_sentences += 1
  705. p_entitys += 1
  706. if len(points_entitys)==0:
  707. return None
  708. return [data_x,points_entitys, text_list]
  709. def predict_role(self,list_sentences, list_entitys):
  710. datas = self.search_role_data(list_sentences, list_entitys)
  711. if datas is None:
  712. return
  713. points_entitys = datas[1]
  714. text_list = datas[2]
  715. if USE_PAI_EAS:
  716. _data = datas[0]
  717. _data = np.transpose(np.array(_data),(1,0,2))
  718. request = tf_predict_pb2.PredictRequest()
  719. request.inputs["input0"].dtype = tf_predict_pb2.DT_FLOAT
  720. request.inputs["input0"].array_shape.dim.extend(np.shape(_data[0]))
  721. request.inputs["input0"].float_val.extend(np.array(_data[0],dtype=np.float64).reshape(-1))
  722. request.inputs["input1"].dtype = tf_predict_pb2.DT_FLOAT
  723. request.inputs["input1"].array_shape.dim.extend(np.shape(_data[1]))
  724. request.inputs["input1"].float_val.extend(np.array(_data[1],dtype=np.float64).reshape(-1))
  725. request.inputs["input2"].dtype = tf_predict_pb2.DT_FLOAT
  726. request.inputs["input2"].array_shape.dim.extend(np.shape(_data[2]))
  727. request.inputs["input2"].float_val.extend(np.array(_data[2],dtype=np.float64).reshape(-1))
  728. request_data = request.SerializeToString()
  729. list_outputs = ["outputs"]
  730. _result = vpc_requests(role_url, role_authorization, request_data, list_outputs)
  731. if _result is not None:
  732. predict_y = _result["outputs"]
  733. else:
  734. predict_y = self.model_role.predict(datas[0])
  735. else:
  736. predict_y = self.model_role.predict(np.array(datas[0],dtype=np.float64))
  737. for i in range(len(predict_y)):
  738. entity = points_entitys[i]
  739. label = np.argmax(predict_y[i])
  740. values = predict_y[i]
  741. # text = text_list[i]
  742. text_tup = text_list[i]
  743. front, middle, behind = text_tup
  744. whole = "".join(text_tup)
  745. # print('模型预测角色:', front, entity.entity_text, behind,label, values)
  746. # if label in [0, 1, 2, 3, 4]:
  747. # self.role_file.write("{0}#split#{1}#split#{2}#split#{3}#split#{4}\n".format(front, entity.entity_text, behind,label, entity.doc_id))
  748. if re.search('^以\d+[\d,.]+万?元中标', behind) and label != 2: # 优化244261884预测错误 大连长之琳科技发展有限公司以7.63277万元中标
  749. label = 2
  750. values[label] = 0.8
  751. if label in [0, 1, 2, 3, 4] and values[label] < 0.5: # 小于阈值的设为其他,让后面的规则召回重新判断
  752. # print(' # 小于阈值的设为其他,让后面的规则召回重新判断', values[label])
  753. label = 5
  754. elif label in [2,3,4] and re.search('序号:\d+,\w{,2}候选', front):
  755. label = 5
  756. elif label == 0:
  757. if re.search('拟邀请$|受邀谈判方', front):
  758. label = 2
  759. values[label] = 0.501
  760. elif re.search('(发布(人|方|单位|机构|组织|用户|业主|主体|部门|公司|企业)|组织(单位|人|方|机构)?|(采购|招标|发布)机构)(名称)?[是为:]+', front) and is_agency(entity.entity_text):
  761. label = 1
  762. values[label] = 0.501
  763. elif re.search('受托人((盖章))?:$', front):
  764. label = 1
  765. values[label] = 0.501
  766. elif re.search('采用$|异议受理部门|本次招标有:$|直购企业:$', front): # 368177736 因本项目招标采用广西壮族自治区公共资源交易平台系统- 标公告,本次招标有:内黄县汇融钢材有限公司、安阳正元建筑工程有限公司、内黄县鸿业贸易有限责任公司三家合格供应商进行报名投标。 438880541 直购企业可能为多个,其中一个中标
  767. label = 5
  768. elif re.search(',单位名称:$', front) and re.search('^,(中标|中选)价格', behind):
  769. label = 2
  770. values[label] = 0.501
  771. elif label == 2:
  772. if re.search('中标单位和.{,25}签订合同', whole):
  773. label = 0
  774. values[label] = 0.501
  775. elif re.search('尊敬的供应商:.{,25}我公司', whole):
  776. label = 0
  777. values[label] = 0.801
  778. elif re.search('尊敬的供应商:$', front):
  779. label = 0
  780. values[label] = 0.501
  781. elif re.search('第[4-9四五六]中标候选人|(提交单位|竞投单位):$', front): #修复第4以上的预测错为中标人
  782. label = 5
  783. values[label] = 0.5
  784. elif re.search('(排名|排序|名次):([4-9]|\d{2,}),', front) or re.search('序号:\d+,(供应商|投标|候选)', front): # 293225236 附件中 排名预测错误
  785. values[2] = 0.5
  786. label = 5
  787. elif re.search('税费', front) and re.search('^承担', behind):
  788. label = 5
  789. elif re.search('第一候补|第一后备|备选', front):
  790. label = 3
  791. values[label] = 0.6
  792. elif re.search('^放弃中标资格|是否中标:否|^(中标|成交)(公示|公告)', behind):
  793. values[2] = 0.5
  794. label = 5
  795. elif re.search('^,?(投标报价|(资格性审查:|符合性审查:)?(不通过|不符合))', behind) and re.search('中标|成交|中选|排名|排序|名次|第[一1]名', front)==None:
  796. values[2] = 0.5
  797. label = 5
  798. elif re.search('(承包权人|帐户名称|债务人|推荐预审合格投标人名单):$|确定为标的的受让方,$|[主次出]入口?,?$|确定(项目|\w{,2})成交供应商,$', front): # 234501112 民币元,序号:1,债务人: 东营市海宁工贸有限责任公司 ,债权本金: 262414286 八、中标后签约单位,合同签约单位:
  799. label = 5
  800. elif re.search(',来源:$', front) and re.search('^,', behind): # 修复 472062585 项目采购-关于定制手机询比价采购中标公告,来源:深圳市网联安瑞网络科技有限公司 预测为中标
  801. label = 0
  802. values[label] = 0.5
  803. elif re.search('合同供方:?$|合同签约单位', front):
  804. label = 0
  805. values[label] = 0.5
  806. elif re.search('现由$', front) and re.search('^作为\d个单位的牵头(单位|公司)?', behind): # 修复 469369884 站源批量预测错误 现由第七合同段保利长大工程有限公司作为6个单位的牵头单位,
  807. label = 5
  808. elif re.search('是否中标:是,供应商', front) and label == 5:
  809. label = 2
  810. values[label] = 0.9
  811. elif label == 1:
  812. if re.search('委托(单位|人|方)[是为:]+',front) and re.search('受委托(单位|人|方)[是为:]+', front)==None:
  813. label = 0
  814. values[label] = 0.501
  815. elif re.search('([,。:]|^)(第一)?(服务|中选|中标)(中介服务|代理)?(公司|机构)(名称)?', front):
  816. label = 2
  817. values[label] = 0.501
  818. elif re.search('在中介超市委托$', front) and re.search('^负责', behind):
  819. label = 2
  820. values[label] = 0.501
  821. elif re.search('^:受', behind): # 354009560 附件格式问题 ,中选中介服务机构通知书,编号:HZ2305120541,中汕项目管理有限公司:受惠东县人民政府大岭街道办事处委托
  822. label = 5
  823. elif re.search('发布机构', front) and not is_agency(entity.entity_text):
  824. label = 0
  825. values[label] = 0.501
  826. elif re.search('开户银行:$|环境影响评价机构|环评机构|评价机构', front): # 368214232 法定代表人:委托代理人:开户银行:鸡东建行
  827. label = 5
  828. elif re.search('委托$', front) and re.search('^(抽样|送检|看样)', behind):
  829. label = 5
  830. elif re.search('推荐入围的招标代理单位:$', front): # 20240709 修复302505502预测错为代理
  831. label = 2
  832. values[label] = 0.501
  833. elif label in [3,4]:
  834. if re.search('第[二三]分(公司|店),中标(人|供应商|单位|公司):$', front):
  835. label = 2
  836. values[label] = 0.7
  837. elif re.search('决定选择第[二三]名', front) and re.search('^作为(中标|成交)(人|供应商|单位|公司)', behind):
  838. label = 2
  839. values[label] = 0.8
  840. elif re.search('\d+\.\d+,供应商名称:', front): # 341385226 30.2,供应商名称: 预测为第二名
  841. label = 2
  842. values[label] = 0.501
  843. elif re.search('\d+\.\d+[,、]?(中标|成交)候选人|[;,][23]、(中标|中选|成交)候选人:', front):
  844. label = 5
  845. values[label] = 0.501
  846. elif re.search('第一名:$', front):
  847. label = 2
  848. values[label] = 0.7
  849. elif re.search('(中标|成交)通知书[,:]$', front) and re.search('^:', behind) and label != 2:
  850. label = 2
  851. values[label] = 0.8
  852. elif label==5 and re.search('^拟(招标|采购)一批|^须购置一批', front):
  853. label = 0
  854. values[label] = 0.7
  855. entity.set_Role(label, values)
  856. def predict_money(self,list_sentences,list_entitys):
  857. datas = self.search_money_data(list_sentences, list_entitys)
  858. if datas is None:
  859. return
  860. points_entitys = datas[1]
  861. _data = datas[0]
  862. text_list = datas[2]
  863. if USE_PAI_EAS:
  864. _data = np.transpose(np.array(_data),(1,0,2,3))
  865. request = tf_predict_pb2.PredictRequest()
  866. request.inputs["input0"].dtype = tf_predict_pb2.DT_FLOAT
  867. request.inputs["input0"].array_shape.dim.extend(np.shape(_data[0]))
  868. request.inputs["input0"].float_val.extend(np.array(_data[0],dtype=np.float64).reshape(-1))
  869. request.inputs["input1"].dtype = tf_predict_pb2.DT_FLOAT
  870. request.inputs["input1"].array_shape.dim.extend(np.shape(_data[1]))
  871. request.inputs["input1"].float_val.extend(np.array(_data[1],dtype=np.float64).reshape(-1))
  872. request.inputs["input2"].dtype = tf_predict_pb2.DT_FLOAT
  873. request.inputs["input2"].array_shape.dim.extend(np.shape(_data[2]))
  874. request.inputs["input2"].float_val.extend(np.array(_data[2],dtype=np.float64).reshape(-1))
  875. request_data = request.SerializeToString()
  876. list_outputs = ["outputs"]
  877. _result = vpc_requests(money_url, money_authorization, request_data, list_outputs)
  878. if _result is not None:
  879. predict_y = _result["outputs"]
  880. else:
  881. predict_y = self.model_money.predict(_data)
  882. else:
  883. predict_y = self.model_money.predict(_data)
  884. for i in range(len(predict_y)):
  885. entity = points_entitys[i]
  886. label = np.argmax(predict_y[i])
  887. values = predict_y[i]
  888. # text = text_list[i]
  889. text_tup = text_list[i]
  890. front, middle, behind = text_tup
  891. whole = "".join(text_tup)
  892. # print('金额: ', entity.entity_text, label, values, front, middle, behind)
  893. # if label in [0, 1]:
  894. # self.money_file.write("{0} {1} {2} {3}\n".format(front, entity.entity_text, behind, label))
  895. if label in [0, 1] and values[label] < 0.5: # 小于阈值的设为其他金额,让后面的规则召回重新判断
  896. # print('模型预测金额: ', entity.entity_text, label, values, front, middle, behind)
  897. label = 2
  898. elif label == 1: # 错误中标金额处理
  899. if re.search('[::,。](总金额|总价|单价|合价)((万?元))?:?$', front) and re.search('(中标|投标|成交|中价)', front)==None:
  900. values[label] = 0.5
  901. elif re.search('[\+=]((中标|成交)(金?额|价格?)|[若如]果?(中标|成交)(金?额|价格?)为?', front): # 处理例如 241561780 如中标金额为 500-1000万元,则代理服务费=100 万元×0.5%+400万元×0.35%+(中标金额-500)万元
  902. values[label] = 0.49
  903. elif re.search('^(以[上下])?按[\d.%]+收取|^及?以[上下]|^[()]?[+×*-][\d.%]+', behind):
  904. values[label] = 0.49
  905. elif re.search('(含|在|包括|[大小等高低]于|达到)$|[\d.%]+[+×*-]$', front):
  906. values[label] = 0.49
  907. elif entity.notes == '单价' and float(entity.entity_text)<5000:
  908. label = 2
  909. elif label ==0: # 错误招标金额处理
  910. if entity.notes in ["投资", "总投资","工程造价"] or re.search('最低限价:?$|注册资本', front) or re.search('服务内容:([\d,.]+万?亿?元?-?)$', front):
  911. values[label] = 0.49
  912. elif re.search('^(以[上下])?按[\d.%]+收取|^及?以[上下]|^[()]?[+×*-][\d.%]+|(含)', behind):
  913. values[label] = 0.49
  914. elif re.search('(含|在|包括|[大小等高低]于|如预算金额为)$|[\d.%]+((含))?[+×*-]$', front):
  915. values[label] = 0.49
  916. elif entity.notes == '单价' and float(entity.entity_text)<5000:
  917. label = 2
  918. elif re.search('报价:预估不?含税总价[为:]$', front) and (label != 1 or values[label]<0.5):
  919. label = 1
  920. values[label] = 0.8
  921. entity.set_Money(label, values)
  922. def correct_money_by_rule(self, title, list_entitys, list_articles):
  923. if (len(re.findall('监理|施工|设计|勘察', title)) == 1 and re.search('施工|总承包|epc|EPC', title) == None) or re.search('服务金额', list_articles[0].content):
  924. # keyword = re.search('监理|设计|勘察', title).group(0)
  925. for list_entity in list_entitys:
  926. for _entity in list_entity:
  927. # print('keyword:',keyword, '_entity.notes :',_entity.notes)
  928. # if _entity.entity_type == "money" and _entity.notes == keyword and _entity.label == 2:
  929. if _entity.entity_type == "money" and _entity.notes == '招标或中标金额' and _entity.label == 2:
  930. # if channel_dic['docchannel'] == "招标公告":
  931. if re.search('中标|成交|中选|中价|中租|结果|入围', title + list_articles[0].content[:100]) == None:
  932. _entity.values[0] = 0.51
  933. _entity.set_Money(0, _entity.values) # 2021/11/18 根据公告类别把费用改为招标或中投标金额
  934. else:
  935. _entity.values[1] = 0.51
  936. _entity.set_Money(1, _entity.values)
  937. def predict(self,list_sentences,list_entitys):
  938. self.predict_role(list_sentences,list_entitys)
  939. self.predict_money(list_sentences,list_entitys)
  940. #联系人模型
  941. class EPCPredict():
  942. def __init__(self,config=None):
  943. self.model_person = Model_person_classify(config=config)
  944. def search_person_data(self,list_sentences,list_entitys):
  945. '''
  946. @summary:根据句子list和实体list查询联系人模型的输入数据
  947. @param:
  948. list_sentences:文章的sentences
  949. list_entitys:文章的entitys
  950. @return:联系人模型的输入数据
  951. '''
  952. data_x = []
  953. points_entitys = []
  954. pre_texts = []
  955. for list_entity,list_sentence in zip(list_entitys,list_sentences):
  956. p_entitys = 0
  957. dict_index_sentence = {}
  958. for _sentence in list_sentence:
  959. dict_index_sentence[_sentence.sentence_index] = _sentence
  960. _list_entity = [entity for entity in list_entity if entity.entity_type=="person"]
  961. while(p_entitys<len(_list_entity)):
  962. entity = _list_entity[p_entitys]
  963. if entity.entity_type=="person":
  964. sentence = dict_index_sentence[entity.sentence_index]
  965. item_x = self.model_person.encode(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index)
  966. data_x.append(item_x)
  967. points_entitys.append(entity)
  968. pre_texts.append(spanWindow(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,size=20))
  969. p_entitys += 1
  970. if len(points_entitys)==0:
  971. return None
  972. # return [data_x,points_entitys,dianhua]
  973. return [data_x,points_entitys, pre_texts]
  974. def predict_person(self,list_sentences, list_entitys):
  975. datas = self.search_person_data(list_sentences, list_entitys)
  976. if datas is None:
  977. return
  978. points_entitys = datas[1]
  979. pre_texts = datas[2]
  980. # phone = datas[2]
  981. if USE_PAI_EAS:
  982. _data = datas[0]
  983. _data = np.transpose(np.array(_data),(1,0,2,3))
  984. request = tf_predict_pb2.PredictRequest()
  985. request.inputs["input0"].dtype = tf_predict_pb2.DT_FLOAT
  986. request.inputs["input0"].array_shape.dim.extend(np.shape(_data[0]))
  987. request.inputs["input0"].float_val.extend(np.array(_data[0],dtype=np.float64).reshape(-1))
  988. request.inputs["input1"].dtype = tf_predict_pb2.DT_FLOAT
  989. request.inputs["input1"].array_shape.dim.extend(np.shape(_data[1]))
  990. request.inputs["input1"].float_val.extend(np.array(_data[1],dtype=np.float64).reshape(-1))
  991. request_data = request.SerializeToString()
  992. list_outputs = ["outputs"]
  993. _result = vpc_requests(person_url, person_authorization, request_data, list_outputs)
  994. if _result is not None:
  995. predict_y = _result["outputs"]
  996. else:
  997. predict_y = self.model_person.predict(datas[0])
  998. else:
  999. predict_y = self.model_person.predict(datas[0])
  1000. # assert len(predict_y)==len(points_entitys)==len(phone)
  1001. assert len(predict_y)==len(points_entitys)
  1002. for i in range(len(predict_y)):
  1003. entity = points_entitys[i]
  1004. label = np.argmax(predict_y[i])
  1005. pre_text = ''.join(pre_texts[i][0])
  1006. # print('pre_text', pre_text)
  1007. if label==0 and re.search('(谈判|磋商|询价|资格审查|评审专家|(评选|议标|评标|评审)委员会?|专家|评委)(小?组|小?组成员)?(成员|名单)[:,](\w{2,4}((组长)|(成员))?[、,,])*$', pre_text):
  1008. # print(entity.entity_text, re.search('(谈判|磋商|询价|资格审查|评审专家|(评选|议标|评标|评审)委员会?|专家|评委)(小?组|小?组成员)?(成员|名单)[:,](\w{2,4}((组长)|(成员))?[、,,])*$', pre_text).group(0))
  1009. label = 4
  1010. values = []
  1011. for item in predict_y[i]:
  1012. values.append(item)
  1013. # phone_number = phone[i]
  1014. # entity.set_Person(label,values,phone_number)
  1015. entity.set_Person(label,values,[])
  1016. # 为联系人匹配电话
  1017. # self.person_search_phone(list_sentences, list_entitys)
  1018. def person_search_phone(self,list_sentences, list_entitys):
  1019. def phoneFromList(phones):
  1020. # for phone in phones:
  1021. # if len(phone)==11:
  1022. # return re.sub('电话[:|:]|联系方式[:|:]','',phone)
  1023. return re.sub('电话[:|:]|联系方式[:|:]', '', phones[0])
  1024. for list_entity, list_sentence in zip(list_entitys, list_sentences):
  1025. # p_entitys = 0
  1026. # p_sentences = 0
  1027. #
  1028. # key_word = re.compile('电话[:|:].{0,4}\d{7,12}|联系方式[:|:].{0,4}\d{7,12}')
  1029. # # phone = re.compile('1[3|4|5|7|8][0-9][-—-]?\d{4}[-—-]?\d{4}|\d{3,4}[-—-]\d{7,8}/\d{3,8}|\d{3,4}[-—-]\d{7,8}转\d{1,4}|\d{3,4}[-—-]\d{7,8}|[\(|\(]0\d{2,3}[\)|\)]-?\d{7,8}-?\d{,4}') # 联系电话
  1030. # # 2020/11/25 增加发现的号码段
  1031. # phone = re.compile('1[3|4|5|6|7|8|9][0-9][-—-]?\d{4}[-—-]?\d{4}|'
  1032. # '\d{3,4}[-—-][1-9]\d{6,7}/\d{3,8}|'
  1033. # '\d{3,4}[-—-]\d{7,8}转\d{1,4}|'
  1034. # '\d{3,4}[-—-]?[1-9]\d{6,7}|'
  1035. # '[\(|\(]0\d{2,3}[\)|\)]-?\d{7,8}-?\d{,4}|'
  1036. # '[1-9]\d{6,7}') # 联系电话
  1037. # dict_index_sentence = {}
  1038. # for _sentence in list_sentence:
  1039. # dict_index_sentence[_sentence.sentence_index] = _sentence
  1040. #
  1041. # dict_context_itemx = {}
  1042. # last_person = "####****++++$$^"
  1043. # last_person_phone = "####****++++$^"
  1044. # _list_entity = [entity for entity in list_entity if entity.entity_type == "person"]
  1045. # while (p_entitys < len(_list_entity)):
  1046. # entity = _list_entity[p_entitys]
  1047. # if entity.entity_type == "person" and entity.label in [1,2,3]:
  1048. # sentence = dict_index_sentence[entity.sentence_index]
  1049. # # item_x = embedding(spanWindow(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,size=settings.MODEL_PERSON_INPUT_SHAPE[1]),shape=settings.MODEL_PERSON_INPUT_SHAPE)
  1050. #
  1051. # # s = spanWindow(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,size=20)
  1052. #
  1053. # # 2021/5/8 取上下文的句子,解决表格处理的分句问题
  1054. # left_sentence = dict_index_sentence.get(entity.sentence_index - 1)
  1055. # left_sentence_tokens = left_sentence.tokens if left_sentence else []
  1056. # right_sentence = dict_index_sentence.get(entity.sentence_index + 1)
  1057. # right_sentence_tokens = right_sentence.tokens if right_sentence else []
  1058. # entity_beginIndex = entity.begin_index + len(left_sentence_tokens)
  1059. # entity_endIndex = entity.end_index + len(left_sentence_tokens)
  1060. # context_sentences_tokens = left_sentence_tokens + sentence.tokens + right_sentence_tokens
  1061. # s = spanWindow(tokens=context_sentences_tokens, begin_index=entity_beginIndex,
  1062. # end_index=entity_endIndex, size=20)
  1063. #
  1064. # _key = "".join(["".join(x) for x in s])
  1065. # if _key in dict_context_itemx:
  1066. # _dianhua = dict_context_itemx[_key][0]
  1067. # else:
  1068. # s1 = ''.join(s[1])
  1069. # # s1 = re.sub(',)', '-', s1)
  1070. # s1 = re.sub('\s', '', s1)
  1071. # have_key = re.findall(key_word, s1)
  1072. # have_phone = re.findall(phone, s1)
  1073. # s0 = ''.join(s[0])
  1074. # # s0 = re.sub(',)', '-', s0)
  1075. # s0 = re.sub('\s', '', s0)
  1076. # have_key2 = re.findall(key_word, s0)
  1077. # have_phone2 = re.findall(phone, s0)
  1078. #
  1079. # s3 = ''.join(s[1])
  1080. # # s0 = re.sub(',)', '-', s0)
  1081. # s3 = re.sub(',|,|\s', '', s3)
  1082. # have_key3 = re.findall(key_word, s3)
  1083. # have_phone3 = re.findall(phone, s3)
  1084. #
  1085. # s4 = ''.join(s[0])
  1086. # # s0 = re.sub(',)', '-', s0)
  1087. # s4 = re.sub(',|,|\s', '', s0)
  1088. # have_key4 = re.findall(key_word, s4)
  1089. # have_phone4 = re.findall(phone, s4)
  1090. #
  1091. # _dianhua = ""
  1092. # if have_phone:
  1093. # if entity.entity_text != last_person and s0.find(last_person) != -1 and s1.find(
  1094. # last_person_phone) != -1:
  1095. # if len(have_phone) > 1:
  1096. # _dianhua = phoneFromList(have_phone[1:])
  1097. # else:
  1098. # _dianhua = phoneFromList(have_phone)
  1099. # elif have_key:
  1100. # if entity.entity_text != last_person and s0.find(last_person) != -1 and s1.find(
  1101. # last_person_phone) != -1:
  1102. # if len(have_key) > 1:
  1103. # _dianhua = phoneFromList(have_key[1:])
  1104. # else:
  1105. # _dianhua = phoneFromList(have_key)
  1106. # elif have_phone2:
  1107. # if entity.entity_text != last_person and s0.find(last_person) != -1 and s0.find(
  1108. # last_person_phone) != -1:
  1109. # if len(have_phone2) > 1:
  1110. # _dianhua = phoneFromList(have_phone2[1:])
  1111. # else:
  1112. # _dianhua = phoneFromList(have_phone2)
  1113. # elif have_key2:
  1114. # if entity.entity_text != last_person and s0.find(last_person) != -1 and s0.find(
  1115. # last_person_phone) != -1:
  1116. # if len(have_key2) > 1:
  1117. # _dianhua = phoneFromList(have_key2[1:])
  1118. # else:
  1119. # _dianhua = phoneFromList(have_key2)
  1120. # elif have_phone3:
  1121. # if entity.entity_text != last_person and s4.find(last_person) != -1 and s3.find(
  1122. # last_person_phone) != -1:
  1123. # if len(have_phone3) > 1:
  1124. # _dianhua = phoneFromList(have_phone3[1:])
  1125. # else:
  1126. # _dianhua = phoneFromList(have_phone3)
  1127. # elif have_key3:
  1128. # if entity.entity_text != last_person and s4.find(last_person) != -1 and s3.find(
  1129. # last_person_phone) != -1:
  1130. # if len(have_key3) > 1:
  1131. # _dianhua = phoneFromList(have_key3[1:])
  1132. # else:
  1133. # _dianhua = phoneFromList(have_key3)
  1134. # elif have_phone4:
  1135. # if entity.entity_text != last_person and s4.find(last_person) != -1 and s4.find(
  1136. # last_person_phone) != -1:
  1137. # if len(have_phone4) > 1:
  1138. # _dianhua = phoneFromList(have_phone4)
  1139. # else:
  1140. # _dianhua = phoneFromList(have_phone4)
  1141. # elif have_key4:
  1142. # if entity.entity_text != last_person and s4.find(last_person) != -1 and s4.find(
  1143. # last_person_phone) != -1:
  1144. # if len(have_key4) > 1:
  1145. # _dianhua = phoneFromList(have_key4)
  1146. # else:
  1147. # _dianhua = phoneFromList(have_key4)
  1148. # else:
  1149. # _dianhua = ""
  1150. # # dict_context_itemx[_key] = [item_x, _dianhua]
  1151. # dict_context_itemx[_key] = [_dianhua]
  1152. # # points_entitys.append(entity)
  1153. # # dianhua.append(_dianhua)
  1154. # last_person = entity.entity_text
  1155. # if _dianhua:
  1156. # # 更新联系人entity联系方式(person_phone)
  1157. # entity.person_phone = _dianhua
  1158. # last_person_phone = _dianhua
  1159. # else:
  1160. # last_person_phone = "####****++++$^"
  1161. # p_entitys += 1
  1162. from scipy.optimize import linear_sum_assignment
  1163. from BiddingKG.dl.interface.Entitys import Match
  1164. def dispatch(match_list):
  1165. main_roles = list(set([match.main_role for match in match_list]))
  1166. attributes = list(set([match.attribute for match in match_list]))
  1167. label = np.zeros(shape=(len(main_roles), len(attributes)))
  1168. for match in match_list:
  1169. main_role = match.main_role
  1170. attribute = match.attribute
  1171. value = match.value
  1172. label[main_roles.index(main_role), attributes.index(attribute)] = value + 10000
  1173. # print(label)
  1174. gragh = -label
  1175. # km算法
  1176. row, col = linear_sum_assignment(gragh)
  1177. max_dispatch = [(i, j) for i, j, value in zip(row, col, gragh[row, col]) if value]
  1178. return [Match(main_roles[row], attributes[col]) for row, col in max_dispatch]
  1179. # km算法
  1180. key_word = re.compile('((?:电话|联系方式|联系人).{0,4}?)(\d{7,12})')
  1181. phone = re.compile('1[3|4|5|6|7|8|9][0-9][-—-―]?\d{4}[-—-―]?\d{4}|'
  1182. '\+86.?1[3|4|5|6|7|8|9]\d{9}|'
  1183. '0\d{2,3}[-—-―][1-9]\d{6,7}/[1-9]\d{6,10}|'
  1184. '0\d{2,3}[-—-―]\d{7,8}转\d{1,4}|'
  1185. '0\d{2,3}[-—-―]?[1-9]\d{6,7}|'
  1186. '[\(|\(]0\d{2,3}[\)|\)]-?\d{7,8}-?\d{,4}|'
  1187. '[1-9]\d{6,7}')
  1188. phone_entitys = []
  1189. for _sentence in list_sentence:
  1190. sentence_text = _sentence.sentence_text
  1191. res_set = set()
  1192. for i in re.finditer(phone,sentence_text):
  1193. res_set.add((i.group(),i.start(),i.end()))
  1194. for i in re.finditer(key_word,sentence_text):
  1195. res_set.add((i.group(2),i.start()+len(i.group(1)),i.end()))
  1196. for item in list(res_set):
  1197. phone_left = sentence_text[max(0,item[1]-10):item[1]]
  1198. phone_right = sentence_text[item[2]:item[2]+8]
  1199. # 排除传真号 和 其它错误项
  1200. if re.search("传,?真|信,?箱|邮,?箱",phone_left):
  1201. if not re.search("电,?话",phone_left):
  1202. continue
  1203. if re.search("帐,?号|编,?号|报,?价|证,?号|价,?格|[\((]万?元[\))]",phone_left):
  1204. continue
  1205. if re.search("[.,]\d{2,}",phone_right):
  1206. continue
  1207. _entity = Entity(_sentence.doc_id, None, item[0], "phone", _sentence.sentence_index, None, None,item[1], item[2],in_attachment=_sentence.in_attachment)
  1208. phone_entitys.append(_entity)
  1209. person_entitys = []
  1210. for entity in list_entity:
  1211. if entity.entity_type == "person":
  1212. entity.person_phone = ""
  1213. person_entitys.append(entity)
  1214. _list_entity = phone_entitys + person_entitys
  1215. _list_entity = sorted(_list_entity,key=lambda x:(x.sentence_index,x.wordOffset_begin))
  1216. words_num_dict = dict()
  1217. last_words_num = 0
  1218. list_sentence = sorted(list_sentence, key=lambda x: x.sentence_index)
  1219. for sentence in list_sentence:
  1220. _index = sentence.sentence_index
  1221. if _index == 0:
  1222. words_num_dict[_index] = 0
  1223. else:
  1224. words_num_dict[_index] = words_num_dict[_index - 1] + last_words_num
  1225. last_words_num = len(sentence.sentence_text)
  1226. match_list = []
  1227. for index in range(len(_list_entity)):
  1228. entity = _list_entity[index]
  1229. if entity.entity_type=="person" and entity.label in [1,2,3]:
  1230. match_nums = 0
  1231. for after_index in range(index + 1, min(len(_list_entity), index + 5)):
  1232. after_entity = _list_entity[after_index]
  1233. if after_entity.entity_type=="phone":
  1234. sentence_distance = after_entity.sentence_index - entity.sentence_index
  1235. distance = (words_num_dict[after_entity.sentence_index] + after_entity.wordOffset_begin) - (
  1236. words_num_dict[entity.sentence_index] + entity.wordOffset_end)
  1237. if sentence_distance < 2 and distance < 50:
  1238. value = (-1 / 2 * (distance ** 2)) / 10000
  1239. match_list.append(Match(entity, after_entity, value))
  1240. match_nums += 1
  1241. else:
  1242. break
  1243. if after_entity.entity_type=="person":
  1244. if after_entity.label not in [1,2,3]:
  1245. break
  1246. if not match_nums:
  1247. for previous_index in range(index-1, max(0,index-5), -1):
  1248. previous_entity = _list_entity[previous_index]
  1249. if previous_entity.entity_type == "phone":
  1250. sentence_distance = entity.sentence_index - previous_entity.sentence_index
  1251. distance = (words_num_dict[entity.sentence_index] + entity.wordOffset_begin) - (
  1252. words_num_dict[previous_entity.sentence_index] + previous_entity.wordOffset_end)
  1253. if sentence_distance < 1 and distance<30:
  1254. # 前向 没有 /10000
  1255. value = (-1 / 2 * (distance ** 2))
  1256. match_list.append(Match(entity, previous_entity, value))
  1257. else:
  1258. break
  1259. result = dispatch(match_list)
  1260. for match in result:
  1261. entity = match.main_role
  1262. # 更新 list_entity
  1263. entity_index = list_entity.index(entity)
  1264. list_entity[entity_index].person_phone = match.attribute.entity_text
  1265. def predict(self,list_sentences,list_entitys):
  1266. self.predict_person(list_sentences,list_entitys)
  1267. #表格预测
  1268. class FormPredictor():
  1269. def __init__(self,lazyLoad=getLazyLoad(),config=None):
  1270. self.model_file_line = os.path.dirname(__file__)+"/../form/model/model_form.model_line.hdf5"
  1271. self.model_file_item = os.path.dirname(__file__)+"/../form/model/model_form.model_item.hdf5"
  1272. self.model_form_item = Model_form_item(config=config)
  1273. self.model_dict = {"line":[None,self.model_file_line]}
  1274. self.model_form_context = Model_form_context(config=config)
  1275. def getModel(self,type):
  1276. if type=="item":
  1277. return self.model_form_item
  1278. elif type=="context":
  1279. return self.model_form_context
  1280. else:
  1281. return self.getModel(type)
  1282. def encode(self,data,**kwargs):
  1283. return encodeInput([data], word_len=50, word_flag=True,userFool=False)[0]
  1284. return encodeInput_form(data)
  1285. def predict(self,form_datas,type):
  1286. if type=="item":
  1287. return self.model_form_item.predict(form_datas)
  1288. elif type=="context":
  1289. return self.model_form_context.predict(form_datas)
  1290. else:
  1291. return self.getModel(type).predict(form_datas)
  1292. #角色规则
  1293. #依据正则给所有无角色的实体赋予角色,给予等于阈值的最低概率
  1294. class RoleRulePredictor():
  1295. def __init__(self):
  1296. # (?P<tenderee_left_w1> 正则组名 后面的 w1 为概率权重关键词
  1297. self.pattern_tenderee_left_55 = "(?P<tenderee_left_55>((遴选|寻源|采购|招标|竞价|议价|比选|委托|询比?价|比价|评选|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|项目|需求|甲方?|转让|招租|议标|合同主体|挂牌|出租|出让|出售|标卖|处置|发包|最终|建设|业主|竞卖|申购|公选)" \
  1298. "(人|方|单位|组织|用户|业主|主体|部门|公司|企业|工厂)|需求?方|买方|业主|权属人|甲方当事人|询价书企业|比选发起人|采购(执行|实施)单位)"\
  1299. "[))]?(信息|联系方式|概况)?[,,::]?([((](1|2|1.1|1.2)[))])?((公司|单位)?名称)?([((](全称|盖章)[))])?(是|为|:|:|\s*)+$)"
  1300. self.pattern_tenderee_left_60 = "(?P<tenderee_left_60>(,|。|^)(项目)?((遴选|寻源|采购|招标|竞价|议价|比选|委托|询比?价|比价|评选|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选|项目|需求|甲|转让|招租|议标|合同主体|挂牌|出租|出让|出售|标卖|处置|发包)" \
  1301. "(人|方|单位|组织|用户|业主|主体|部门|公司|企业|工厂))"\
  1302. "[))]?(信息|联系方式|概况)?[,,。::]?([((]?(1|2|1.1|1.2)[))]?)?((公司|单位)?名称)?([((](全称|盖章)[))])?(是|为|:|:|,|\s*)+$)" # 367784094 隆道-大企业采购平台 采购商:C5石油树脂-中国建材集团有限公司-四川省/成都市/市辖区
  1303. self.pattern_tenderee_left_50 = "(?P<tenderee_left_50>((所需|需[用求]|购货|征集|发布|交易发起|开户|申报|填报|开票|收货)" \
  1304. "(人|方|单位|组织|用户|业主|主体|部门|公司|企业|工厂)|[转流]出方|文章来源|委托机构|产权所有人|承包权人|结算单位|收货地址)" \
  1305. "[))]?(信息|联系方式|概况)?[,,::]?([((](1|2|1.1|1.2)[))])?((公司|单位)?名称)?([((](全称|盖章)[))])?(是|为|:|:|\s*)+$|(采购商|招标人):(\w{2,10}-)?$)"
  1306. self.pattern_tenderee_center = "(?P<tenderee_center>(受.{5,20}的?委托|现将[\w()()]{5,20}[\d年月季度至()]+采购意向|尊敬的供应商(伙伴)?:\w{5,20}(以下简称“\w{2,5}”)))"
  1307. self.pattern_tenderee_right = "(?P<tenderee_right>^(机关)?([((](以下简称)?[,\"“]*((招标|采购)(人|单位|机构)|(服务)?购买方)[,\"”]*[))]|^委托|^将于[\d年月日,::]+进行|^现委托|^的\w{2,10}正在进行|[\d年月季度至]+采购意向|^)?的招标工作已圆满结束)|^([拟须需]|计划)(采购|招标|购置|购买)|^须购[买置]一批|作为(采购|招标)(人|单位)|^关于)" #|(^[^.。,,::](采购|竞价|招标|施工|监理|中标|物资)(公告|公示|项目|结果|招标))|的.*正在进行询比价)
  1308. self.pattern_tendereeORagency_right = "(?P<tendereeORagency_right>(^拟对|^现?就|^现对))"
  1309. self.pattern_agency_left = "(?P<agency_left>((代理|拍卖)(?:人|机构|公司|企业|单位|组织)|专业采购机构|集中采购机构|招标组织机构|交易机构|集采机构|[招议))]+标机构|(采购|招标)代理)(名称|.{,4}名,?称|全称)?(是|为|:|:|[,,]?\s*)$|(受.{5,20}委托,?$))"
  1310. self.pattern_agency_right = "(?P<agency_right>^([((](以下简称)?[,\"“]*(代理)(人|单位|机构)[,\"”]*[))])|^受.{5,20}委托|^受委?托,)" # |^受托 会与 受托生产等冲突,代理表达一般会在后面有逗号
  1311. # 2020//11/24 大网站规则 中标关键词添加 选定单位|指定的中介服务机构
  1312. self.pattern_winTenderer_left_50 = "(?P<winTenderer_left_51>" \
  1313. "(乙|竞得|受让|买受|签约|施工|供货|供应?|合作|承做|承包|承建|承销|承保|承接|承制|承担|承修|承租((包))?|入围|入选|竞买)(候选|投标)?(人|单位|机构|供应商|方|公司|企业|厂商|商|社会资本方?)(:?单位名称|:?名称|盖章)?[::是为]+$" \
  1314. "|(选定单位|指定的中介服务机构|实施主体|中标银行|中标通知书,致|征集结果|选择中介|选择结果|成交对象|勘察人|(,|审计|处置|勘察|设计)服务单位|受托[人方])[::是为]+$" \
  1315. "|((评审结果|名次|排名|中标结果)[::]*第?[一1]名?)[::是为]+$|成交供应商信息[,:]?(序号1)?:?|供应商名称$" \
  1316. "|单一来源(采购)?(供应商|供货商|服务商|方式向)$|((中标|成交)(结果|信息))[::是为]+$|(中标|成交)供应商、(中标|成交)(金额|价格),$" \
  1317. "|现(公布|宣布|公示)中标单位如下:$|现将中标单位(公布|公示)如下:$|现宣布以下(企业|单位|公司)中标:$|经讨论,决定采用$)" # 承办单位:不作为中标 83914772
  1318. self.pattern_winTenderer_left_60 = "(?P<winTenderer_left_60>" \
  1319. "(,|。|:|^)((中标(投标)?|[拟预]中标|中选|中价|中签|成交)(人|单位|机构|中介(服务)?机构|供应商|客户|方|公司|企业|厂商|商家?|社会资本方?)|(中标候选人)?第?[一1]名|第[一1](中标|中选|成交)?候选人|服务机构)" \
  1320. "(:?单位名称|:?名称|盖章)?[,,]?([((]按综合排名排序[))]|:择优选取)?[::,,]$|选取(情况|说明):中选,中介机构名称:$|排名如下:1、$)" # 解决表头识别不到加逗号情况,需前面为,。空 20240621补充 中选 云南省投资审批中介超市 补充排名如下 南阳师范学院
  1321. self.pattern_winTenderer_left_55 = "(?P<winTenderer_left_55>(中标(投标)?|[拟预]中标|中选|中价|中签|成交|入选)(人|单位|机构|中介(服务)?机构|供应商|客户|方|公司|企业|厂商|商家?|社会资本方?)" \
  1322. "(:?单位名称|:?名称|盖章)?([((]按综合排名排序[))]|:择优选取)?[::是为]+$" \
  1323. "|结果公示如下:摇出球号:\d+号,中介机构:$)" # 取消逗号 并拒绝执行改进计划的供应商,华新水泥将可能终止与其合作关系 # 中标候选人不能作为中标 # |直购企业:$不能作为中标人,看到有些公告会又多个公司,然后还会发布中选结果的公告,其中一个公司中标
  1324. self.pattern_winTenderer_right = "(?P<winTenderer_right>(^[是为](首选)?((采购|中标|成交)(供应商|供货商|服务商)|(第[一1]|预)?(拟?(中标|中选|中价|成交)(候选|排序)?(人|单位|机构|供应商|公司|企业|厂商)))|" \
  1325. "^((报价|价格)最低,|以\w{5,10})?(确定|成|作)?为[\w“”()]{3,25}((成交|中选|中标|服务)(人|单位|供应商|企业|公司)|供货单位|供应商|第一中标候选人)[,。]" \
  1326. "|^:贵公司参与|^:?你方于|^(胜出)?中标。|^取得中标(单位)?资格|^以\d+[\d,.]+万?元(中标|成交|中选)" \
  1327. "|^通过(挂牌|拍卖)方式(以[\d.,]+万?元)?竞得|^[((](中标|成交|承包)人名?称?[))]))" # 去掉 |\w{,20} 修复 460216955 网上公布的与本次采购项目有关的信息视为已送达各响应供应商。 作为中标
  1328. self.pattern_winTenderer_whole = "(?P<winTenderer_center>(贵公司|由).{,15}以\w{,15}中标|确定[\w()]{5,20}为[^,。;]{5,50}的?中标单位" \
  1329. "|选定报价最低的[“”\w()]{5,25}为[^,。;]{5,50}的?(服务|中标|成交)单位" \
  1330. "|拟邀请[\w()]{5,20}(进行)?单一来源谈判|(承办单位|报价人|投标人|中介机构)(名称)?:[\w()]{5,20},(中标|承办|中选)(价格|金额)" \
  1331. "|(谈判结果:|结果|最终|确定|决定)[以由为][^,。;]{5,25}(向我单位)?(供货|承担|承接|中标|竞买成功)|中标通知书.{,15}你方|单一来源方?式?[从向][()\w]{5,20}采购|供应商名称:[()\w]{5,20},独家采购原因)" # 2020//11/24 大网站规则 中标关键词添加 谈判结果:由.{5,20}供货
  1332. self.pattern_secondTenderer_left = "(?P<secondTenderer_left>((第[二2]名?(名|((中标|中选|中价|成交|候选)(候选)?(人|单位|机构|供应商|公司))))(名称)?[::是为]+$)|((评审结果|名次|排名|排序)[::]第?[二2]名?,?(投标(供应)?商|供应商)(名称)?[::]+$))"
  1333. self.pattern_secondTenderer_right = "(?P<secondTenderer_right>^[是为\(]第[二2](名|(中标|中选|中价|成交)(候选)?(人|单位|机构|供应商|公司)))"
  1334. self.pattern_thirdTenderer_left = "(?P<thirdTenderer_left>(第[三3]名?(名|((中标|中选|中价|成交|候选)(候选)?(人|单位|机构|供应商|公司))))(名称)?[::是为]+$|((评审结果|名次|排名|排序)[::]第?[三3]名?,?(投标(供应)?商|供应商)(名称)?[::]+$))"
  1335. self.pattern_thirdTenderer_right = "(?P<thirdTenderer_right>^[是为\(]第[三3](名|(中标|中选|中价|成交)(候选)?(人|单位|机构|供应商|公司)))"
  1336. self.condadate_left = "(?P<candidate_left>(((中标|成交|入围|入选)候选|投标)(人|单位|机构|中介(服务)?机构|供应商|客户|方|公司|厂商|商家?|社会资本方?)|服务单位)(:?单位名称|:?名称|全称|(?盖\w{,5}章)?|如下|:?牵头人)?[::是为]+$)"
  1337. self.pattern_left = [
  1338. self.pattern_tenderee_left_60,
  1339. self.pattern_tenderee_left_55,
  1340. self.pattern_tenderee_left_50,
  1341. self.pattern_agency_left,
  1342. self.pattern_secondTenderer_left,
  1343. self.pattern_thirdTenderer_left,
  1344. self.pattern_winTenderer_left_60,
  1345. self.pattern_winTenderer_left_55,
  1346. self.pattern_winTenderer_left_50,
  1347. ]
  1348. self.pattern_whole = [
  1349. self.pattern_winTenderer_whole,
  1350. self.pattern_tenderee_center,
  1351. ]
  1352. self.pattern_right = [
  1353. self.pattern_thirdTenderer_right,
  1354. self.pattern_secondTenderer_right,
  1355. self.pattern_agency_right,
  1356. self.pattern_tendereeORagency_right,
  1357. self.pattern_tenderee_right,
  1358. self.pattern_winTenderer_right,
  1359. ]
  1360. self.SET_NOT_TENDERER = set(["人民政府","人民法院","中华人民共和国","人民检察院","评标委员会","中国政府","中国海关","中华人民共和国政府"])
  1361. self.pattern_money_tenderee = re.compile("投?标?最高限价|采购计划金额|项目预算|招标金额|采购金额|项目金额|投资估算|采购(单位|人)委托价|招标限价|拦标价|预算金额|标底|总计|限额|资金来源,?[为:]+\w{2,4}资金|采购成本价|总费用约?为") # |建安费用 不作为招标金额
  1362. self.pattern_money_tenderer = re.compile("((合同|成交|中标|应付款|交易|投标|验收|订单)[)\)]?(综合)?(总?金额|结果|[单报总]?价))|标的基本情况|承包价|报酬(含税):|经评审的价格") # 单写 总价 不能作为中标金额,很多表格有单价、总价
  1363. self.pattern_money_tenderer_whole = re.compile("(以金额.*中标)|中标供应商.*单价|以.*元(报价)?(中标|中选|成交)")
  1364. self.pattern_money_other = re.compile("代理费|服务费")
  1365. self.pattern_pack = "(([^承](包|标[段号的包]|分?包|包组)编?号?|项目)[::]?[\((]?[0-9A-Za-z一二三四五六七八九十]{1,4})[^至]?|(第?[0-9A-Za-z一二三四五六七八九十]{1,4}(包号|标[段号的包]|分?包))|[0-9]个(包|标[段号的包]|分?包|包组)"
  1366. # self.role_file = open('/data/python/lsm/role_rule_predict.txt', 'a', encoding='utf-8')
  1367. def _check_input(self,text, ignore=False):
  1368. if not text:
  1369. return []
  1370. if not isinstance(text, list):
  1371. text = [text]
  1372. null_index = [i for i, t in enumerate(text) if not t]
  1373. if null_index and not ignore:
  1374. raise Exception("null text in input ")
  1375. return text
  1376. def ser_role(self, pattern_list, text, entity_text):
  1377. for _pattern in pattern_list:
  1378. for _iter in re.finditer(_pattern, text):
  1379. for _group, _v_group in _iter.groupdict().items():
  1380. if _v_group is not None and _v_group != "":
  1381. _role = _group.split("_")[0]
  1382. if _role == "tendereeORagency": # 2022/3/9 新增不确定招标代理判断逻辑
  1383. # print('p_entity_sentenceindex:', p_entity.sentence_index)
  1384. # if re.search('医院|学校|大学|中学|小学|幼儿园|政府|部|委员会|署|行|局|厅|处|室|科|股|站', entity_text) \
  1385. # or re.search('(采购|招标|投标|交易|代理|拍卖|咨询|顾问|管理)', entity_text) == None:
  1386. if is_agency(entity_text):
  1387. _role = 'tenderee'
  1388. else:
  1389. _role = "agency"
  1390. _direct = _group.split("_")[1]
  1391. # _weight = _group.split("_")[2] if len(_group.split("_")) == 3 else ""
  1392. prob = int(_group.split("_")[2])/100 if len(_group.split("_")) == 3 else 0.55
  1393. # print('实体召回概率:', prob)
  1394. _label = {"tenderee": 0, "agency": 1, "winTenderer": 2,
  1395. "secondTenderer": 3, "thirdTenderer": 4}.get(_role)
  1396. return (_label, prob, _iter.group(0))
  1397. return (5, 0.5, '')
  1398. def rule_predict(self, before, center, after, entity_text):
  1399. # before = before if isinstance(before, str) else ""
  1400. # center = center if isinstance(center, str) else ""
  1401. # after = after if isinstance(after, str) else ""
  1402. _label, _prob, keyword = self.ser_role(self.pattern_left, before, entity_text) # 前文匹配
  1403. keyword = "left_" + keyword if keyword!="" else keyword
  1404. if _label == 2 and re.search(
  1405. '各.{,5}供应商|尊敬的供应商|[^\w]候选供应商|业绩|拟招|(交易|采购|招标|建设)服务(单位|机构)|第[四五六七4567]|是否中标:否|序号:\d+,\w{,2}候选|(排名|排序|名次):([4-9]|\d{2,})|未(中[标选]|入围)|不得确定为|(响应|参[加与]报价|通过资格审查)的?供应商',
  1406. # 135463002 拟招一家供应商为宜宾市第三人民医院、李庄同济医院院区提供消防维保服务
  1407. before) != None:
  1408. _label = 5
  1409. elif _label == 2 and re.search('为$', before) and re.match('\w', after): # 排除错误 前文为结尾,后文不是标点符号结尾的,如 353824459 供应商为社会团体的, 供应商为玉田县中医医院提供安保服务
  1410. _label = 5
  1411. elif _label == 2 and re.search('评委|未中标', after[:5]): # 397194341 过滤掉错误召回中标人
  1412. _label = 5
  1413. elif _label == 2 and re.search('^,?(投标报价|(资格性审查:|符合性审查:)?(不通过|不符合))', after) and re.search('中标|成交|中选|排名|排序|名次|第[一1]名', before[-10:])==None: #20240705 处理类似 493939047 错误
  1414. _label = 5
  1415. if _label == 5:
  1416. _label, _prob, keyword = self.ser_role(self.pattern_whole, before + center + after, entity_text) # 前后文匹配
  1417. keyword = 'whole_'+ keyword[:keyword.find(entity_text)] if keyword!="" else keyword
  1418. if _label == 2 and re.search('以[^,。;]{10,30}为准', before + center + after)!=None:
  1419. _label = 5
  1420. if _label != 5 and self.ser_role(self.pattern_whole, before, entity_text)[0] != 5 or \
  1421. self.ser_role(self.pattern_whole, after, entity_text)[0] != 5:
  1422. _label = 5
  1423. if _label == 5:
  1424. _label, _prob, keyword = self.ser_role(self.pattern_right, after, entity_text) # 后文匹配
  1425. keyword = "right_" + keyword if keyword!="" else keyword
  1426. if _label==5 and re.search('(中标|中选|成交)?)(结果)?(公告|公示|通知书?),', before) and re.match(':', after):
  1427. _label = 2
  1428. _prob = 0.5
  1429. _flag = False if _label==5 else True
  1430. return (_label, _prob, _flag, keyword)
  1431. def predict(self, list_articles, list_sentences, list_entitys, list_codenames, on_value=0.5):
  1432. for article, list_entity, list_sentence, list_codename in zip(list_articles, list_entitys, list_sentences,
  1433. list_codenames):
  1434. list_sentence.sort(key=lambda x: x.sentence_index) # 2022/1/5 按句子顺序排序
  1435. # list_name = list_codename["name"]
  1436. list_name = [] # 2022/1/5 改为实体列表内所有项目名称
  1437. name_entitys = [] # 2023/6/30 保存项目名称实体,直接通过位置判断角色是否在项目名称里面
  1438. candidates = [] # 保存不能确定为第几的候选人 2023/04/14
  1439. notfound_tenderer = True # 未找到前三候选人
  1440. for entity in list_entity:
  1441. if entity.entity_type == 'name':
  1442. list_name.append(entity.entity_text)
  1443. name_entitys.append(entity)
  1444. list_name = self._check_input(list_name) + [article.title]
  1445. for p_entity in list_entity:
  1446. if p_entity.entity_type in ["org", "company"]:
  1447. # 只解析角色为无的或者概率低于阈值的
  1448. if p_entity.label is None:
  1449. continue
  1450. # 将上下文包含标题的实体概率置为0.6,因为标题中的实体不一定是招标人
  1451. if str(p_entity.label) == "0":
  1452. find_flag = False
  1453. for _sentence in list_sentence:
  1454. if _sentence.sentence_index == p_entity.sentence_index:
  1455. _span = spanWindow(tokens=_sentence.tokens, begin_index=p_entity.begin_index,
  1456. end_index=p_entity.end_index, size=20, center_include=True,
  1457. word_flag=True, use_text=True,
  1458. text=re.sub(")", ")", re.sub("(", "(", p_entity.entity_text)))
  1459. if re.search(self.pattern_tenderee_left_50, _span[0]) or re.search(self.pattern_tenderee_left_55, _span[0]): # 前面有关键词的实体不判断是否在项目名称中出现
  1460. find_flag = True
  1461. break
  1462. if re.search('(项目|工程|招标|采购(条目)?|合同|标项|标的|计划|询价|询价单|询价通知书|申购单|申购)(名称|标名|标题|主题):$', _span[0]):
  1463. find_flag = True
  1464. if re.search('(局|院|府|学|处|站|会|所|校|馆|队|厅|室|司|心|园|厂)$', p_entity.entity_text):
  1465. p_entity.values[0] = 0.6 if p_entity.values[0]>0.6 else 0.55
  1466. else:
  1467. p_entity.values[0] = on_value # 项目名称里面实体修改为最低概率
  1468. break
  1469. for _name in name_entitys:
  1470. if _name.sentence_index == p_entity.sentence_index and p_entity.wordOffset_begin >=_name.wordOffset_begin and p_entity.wordOffset_end < _name.wordOffset_end:
  1471. find_flag = True
  1472. if re.search('(局|院|府|学|处|站|会|所|校|馆|队|厅|室|司|心|园|厂)$', p_entity.entity_text):
  1473. p_entity.values[0] = 0.6 if p_entity.values[0] > 0.6 else 0.55
  1474. else:
  1475. p_entity.values[0] = on_value # 项目名称里面实体修改为最低概率
  1476. break
  1477. # if p_entity.values[0] > on_value:
  1478. # p_entity.values[0] = 0.5 + (p_entity.values[0] - 0.5) / 10
  1479. # else:
  1480. # p_entity.values[0] = on_value # 2022/03/08 修正类似 223985179 公司在文章开头的项目名称概率又没达到0.5的情况
  1481. # for _name in list_name:
  1482. # if _name != "" and str(_span[0][-10:]+_span[1] + _span[2][:len(str(_name))]).find(_name) >= 0: #加上前面一些信息,修复公司不在项目名称开头的,检测不到
  1483. # find_flag = True
  1484. # if p_entity.values[0] > on_value:
  1485. # p_entity.values[0] = 0.5 + (p_entity.values[0] - 0.5) / 10
  1486. # else:
  1487. # p_entity.values[0] = on_value # 2022/03/08 修正类似 223985179 公司在文章开头的项目名称概率又没达到0.5的情况
  1488. if find_flag:
  1489. continue
  1490. # 正则从概率低于阈值或其他类别中召回角色
  1491. role_prob = float(p_entity.values[int(p_entity.label)])
  1492. if role_prob < on_value or str(p_entity.label) == "5":
  1493. # 将标题中的实体置为招标人
  1494. _list_name = self._check_input(list_name, ignore=True)
  1495. find_flag = False
  1496. for _name in _list_name: # 2022/1/5修正只要项目名称出现过的角色,所有位置都标注为招标人
  1497. if str(_name).find(p_entity.entity_text) >= 0 and p_entity.sentence_index < 4:
  1498. for _sentence in list_sentence:
  1499. if _sentence.sentence_index == p_entity.sentence_index:
  1500. _span = spanWindow(tokens=_sentence.tokens, begin_index=p_entity.begin_index,
  1501. end_index=p_entity.end_index, size=20, center_include=True,
  1502. word_flag=True, use_text=True, text=p_entity.entity_text)
  1503. if _span[2].startswith(":"): # 实体后面为冒号的不作为招标人,避免项目名称出错中标变招标 368122675 陇西兴恒建建筑有限责任公司:线路安全保护区内环境治理专项整改(第二标段)项目
  1504. break
  1505. if str(_span[0][-len(str(_name)):]+_span[1] + _span[2][:len(str(_name))]).find(
  1506. _name) >= 0 or str(_name).startswith(p_entity.entity_text): # 20240621 补充公司开头的项目名称召回,避免name太长召回失败 例 367033697
  1507. # if p_entity.entity_text in agency_set or re.search('(代理|管理|咨询|招投?标|采购)\w{,6}公司', p_entity.entity_text): # 在代理人集合的作为代理人
  1508. if is_agency(p_entity.entity_text): # 2024/3/29 统一方法判断是否为代理
  1509. find_flag = True
  1510. _label = 1
  1511. p_entity.label = _label
  1512. p_entity.values[int(_label)] = on_value
  1513. break
  1514. else:
  1515. find_flag = True
  1516. _label = 0
  1517. p_entity.label = _label
  1518. p_entity.values[int(_label)] = on_value + p_entity.values[int(_label)] / 10
  1519. if 6<len(p_entity.entity_text) < 20 and p_entity.entity_type == 'org': # 标题中角色长度在一定范围内的加分 优化类似367720967 标题中两个实体选择错误问题
  1520. p_entity.values[int(_label)] += 0.005
  1521. break
  1522. if p_entity.sentence_index >= 4:
  1523. break
  1524. if find_flag:
  1525. break
  1526. # 若是实体在标题中,默认为招标人,不进行以下的规则匹配
  1527. if find_flag:
  1528. continue
  1529. for s_index in range(len(list_sentence)):
  1530. if p_entity.doc_id == list_sentence[s_index].doc_id and p_entity.sentence_index == \
  1531. list_sentence[s_index].sentence_index:
  1532. tokens = list_sentence[s_index].tokens
  1533. begin_index = p_entity.begin_index
  1534. end_index = p_entity.end_index
  1535. size = 40 #15
  1536. spans = spanWindow(tokens, begin_index, end_index, size, center_include=True,
  1537. word_flag=True, use_text=False)
  1538. # _flag = False
  1539. # 添加中标通知书类型特殊处理
  1540. try:
  1541. if s_index == 0 and re.search('中标通知书.{,30}[,:]%s:'%p_entity.entity_text.replace('(', '').replace(')', ''),
  1542. list_sentence[s_index].sentence_text.replace('(', '').replace(')', '')[:100]):
  1543. p_entity.label = 2
  1544. p_entity.values[2] = 0.5
  1545. notfound_tenderer = False
  1546. # log('正则召回实体: %s, %s, %s, %d, %.4f, %s'%(_group, _v_group, p_entity.entity_text, p_entity.label, p_entity.values[p_entity.label], list_spans[_i_span]))
  1547. break
  1548. except Exception as e:
  1549. print('正则报错:', e)
  1550. before, center, after = spans[0], spans[1], spans[2]
  1551. entity_text = p_entity.entity_text
  1552. _label, _prob, _flag, kw = self.rule_predict(before, center, after, entity_text)
  1553. # if _label in [0, 1, 2, 3, 4]:
  1554. # self.role_file.write("{0}#split#{1}#split#{2}#split#{3}#split#{4}\n".format(before,
  1555. # entity.entity_text,
  1556. # after,
  1557. # _label,
  1558. # entity.doc_id))
  1559. # 得到结果
  1560. if _flag:
  1561. if _label in [2, 3, 4]:
  1562. notfound_tenderer = False
  1563. p_entity.label = _label
  1564. # p_entity.values[int(_label)] = on_value + p_entity.values[
  1565. # int(_label)] / 10
  1566. p_entity.values[_label] = _prob + p_entity.values[int(_label)] / 10
  1567. # log('正则召回实体: %s, %s, %d, %.4f, %s'%(kw, p_entity.entity_text, p_entity.label, p_entity.values[p_entity.label], before+" "+after))
  1568. break
  1569. if re.search(self.condadate_left, before) and re.search('尊敬的|各', before[-10:])==None:
  1570. candidates.append(p_entity)
  1571. # # 使用正则+距离解决冲突
  1572. # # 2021/6/11update center: spans[1] --> spans[0][-30:]+spans[1]
  1573. # list_spans = [spans[0][-30:], spans[0][-10:] + spans[1] + spans[2][:25], spans[2]] # 实体左、中、右 信息
  1574. # for _i_span in range(len(list_spans)):
  1575. # _flag = False
  1576. # _prob_weight = 1
  1577. #
  1578. # # print(list_spans[_i_span],p_entity.entity_text)
  1579. # for _pattern in self.pattern_whole:
  1580. # for _iter in re.finditer(_pattern, list_spans[_i_span]):
  1581. # for _group, _v_group in _iter.groupdict().items():
  1582. # if _v_group is not None and _v_group != "":
  1583. # _role = _group.split("_")[0]
  1584. # if _role == "tendereeORagency": # 2022/3/9 新增不确定招标代理判断逻辑
  1585. # # print('p_entity_sentenceindex:', p_entity.sentence_index)
  1586. # if p_entity.sentence_index>=1: # 只在第一句进行这种模糊匹配
  1587. # continue
  1588. # if re.search('医院|学校|大学|中学|小学|幼儿园|政府|部|委员会|署|行|局|厅|处|室|科|股|站', p_entity.entity_text)\
  1589. # or re.search('(采购|招标|投标|交易|代理|拍卖|咨询|顾问|管理)', p_entity.entity_text) == None:
  1590. # _role = 'tenderee'
  1591. # else:
  1592. # _role = "agency"
  1593. # _direct = _group.split("_")[1]
  1594. # _weight = _group.split("_")[2] if len(_group.split("_"))==3 else ""
  1595. # # _label = {"tenderee": 0, "agency": 1, "winTenderer": 2,
  1596. # # "secondTenderer": 3, "thirdTenderer": 4}.get(_role)
  1597. # if _i_span == 0 and _direct == "left" and re.search('各.{,5}供应商|尊敬的供应商|业绩|拟招|(交易|采购|招标|建设)服务(单位|机构)|第[四五六七4567]|是否中标:否|序号:\d+,\w{,2}候选|(排名|排序|名次):([4-9]|\d{2,})', #135463002 拟招一家供应商为宜宾市第三人民医院、李庄同济医院院区提供消防维保服务
  1598. # list_spans[0]) == None: # 2021/12/22 修正错误中标召回 例子208668937
  1599. # _flag = True
  1600. # _label = {"tenderee": 0, "agency": 1, "winTenderer": 2,
  1601. # "secondTenderer": 3, "thirdTenderer": 4}.get(_role)
  1602. # _prob_weight = 1.2 if _weight=='w1' else 1
  1603. # # print('_v_group:',_group, _v_group, p_entity.entity_text)
  1604. #
  1605. # if _i_span == 1 and _direct == "center" and _v_group.find(p_entity.entity_text) != -1 and re.search('以[^,。;]{10,30}为准', list_spans[1])==None:
  1606. # _flag = True
  1607. # _label = {"tenderee": 0, "agency": 1, "winTenderer": 2,
  1608. # "secondTenderer": 3, "thirdTenderer": 4}.get(_role)
  1609. # _prob_weight = 1.2 if _weight == 'w1' else 1
  1610. # # print('_v_group:', _group, _v_group, p_entity.entity_text)
  1611. #
  1612. # if _i_span == 2 and _direct == "right":
  1613. # _flag = True
  1614. # _label = {"tenderee": 0, "agency": 1, "winTenderer": 2,
  1615. # "secondTenderer": 3, "thirdTenderer": 4}.get(_role)
  1616. # _prob_weight = 1.2 if _weight == 'w1' else 1
  1617. # # print('_v_group:', _group, _v_group, p_entity.entity_text)
  1618. # # 得到结果
  1619. # if _flag:
  1620. # if _label in [2, 3, 4]:
  1621. # notfound_tenderer = False
  1622. # p_entity.label = _label
  1623. # p_entity.values[int(_label)] = on_value*_prob_weight + p_entity.values[int(_label)] / 10
  1624. # # log('正则召回实体: %s, %s, %s, %d, %.4f, %s'%(_group, _v_group, p_entity.entity_text, p_entity.label, p_entity.values[p_entity.label], list_spans[_i_span]))
  1625. # break
  1626. # if _i_span == 0 and re.search(self.condadate_left, list_spans[_i_span]):
  1627. # candidates.append(p_entity)
  1628. elif str(p_entity.label) in ['2', '3', '4']:
  1629. notfound_tenderer = False
  1630. # 其他金额通过正则召回可能是招标或中投标的金额
  1631. if p_entity.entity_type in ["money"]:
  1632. if str(p_entity.label) == "2":
  1633. for _sentence in list_sentence:
  1634. if _sentence.sentence_index == p_entity.sentence_index:
  1635. _span = spanWindow(tokens=_sentence.tokens, begin_index=p_entity.begin_index,
  1636. end_index=p_entity.end_index, size=10, center_include=True,
  1637. word_flag=True, text=p_entity.entity_text)
  1638. if re.search('(含|在|包括)(\d+)?$', _span[0]):
  1639. continue
  1640. if re.search(',\w{2,}', _span[0]):
  1641. _span[0] = _span[0].split(',')[-1] if len(_span[0].split(',')[-1])>4 else _span[0][-8:] #避免多个价格在一起造成误判
  1642. if re.search(self.pattern_money_tenderee, _span[0]) is not None and re.search(
  1643. self.pattern_money_other, _span[0]) is None:
  1644. p_entity.values[0] = 0.8 + p_entity.values[0] / 10
  1645. p_entity.label = 0
  1646. # print('规则召回预算金额:', p_entity.entity_text, _span[0])
  1647. if re.search(self.pattern_money_tenderer, _span[0]) is not None:
  1648. if re.search(self.pattern_money_other, _span[0]) is not None:
  1649. if re.search(self.pattern_money_tenderer, _span[0]).span()[1] > \
  1650. re.search(self.pattern_money_other, _span[0]).span()[1]:
  1651. p_entity.values[1] = 0.8 + p_entity.values[1] / 10
  1652. p_entity.label = 1
  1653. else:
  1654. p_entity.values[1] = 0.8 + p_entity.values[1] / 10
  1655. p_entity.label = 1
  1656. if re.search(self.pattern_money_tenderer_whole,
  1657. "".join(_span)) is not None and re.search(self.pattern_money_other,
  1658. _span[0]) is None:
  1659. p_entity.values[1] = 0.8 + p_entity.values[1] / 10
  1660. p_entity.label = 1
  1661. elif re.search('(预算金额|最高(投标)?上?限[价额]?格?|招标控制价))?:?([\d.,]+万?元[,(]其中)?(第?[一二三四五0-9](标[段|包]|[分子]包):?[\d.,]+万?元,)*第?[一二三四五0-9](标[段|包]|[分子]包):?$'
  1662. , _sentence.sentence_text[:p_entity.wordOffset_begin]): # 处理几个标段金额相邻情况 例子:191705231
  1663. p_entity.values[0] = 0.8 + p_entity.values[0] / 10
  1664. p_entity.label = 0
  1665. # print('规则召回预算金额2:', p_entity.entity_text, _sentence.sentence_text[:p_entity.wordOffset_begin])
  1666. if notfound_tenderer and len(set([ent.entity_text for ent in candidates])) == 1 and re.search(
  1667. '(中标|中选|中价|中租|成交|入选|确认)(候选人|人|供应商|记录|结果|变更)?(公告|公示|结果)|(遴选|采购|招标|竞价|议价|比选|询比?价|评选|谈判|邀标|邀请|洽谈|约谈|评标|发包|磋商|交易|评审)\w{,2}结果|单一来源(采购|招标)?的?(中标|成交|结果)|中标通知书',
  1668. article.title+article.content[:100]):
  1669. for p_entity in candidates:
  1670. # print('只有一个候选人的作为中标人', p_entity.entity_text)
  1671. p_entity.label = 2
  1672. p_entity.values[2] = on_value
  1673. # 增加招标金额扩展,招标金额+连续的未识别金额,并且都可以匹配到标段信息,则将为识别的金额设置为招标金额
  1674. list_p = []
  1675. state = 0
  1676. for p_entity in list_entity:
  1677. for _sentence in list_sentence:
  1678. if _sentence.sentence_index == p_entity.sentence_index:
  1679. _span = spanWindow(tokens=_sentence.tokens, begin_index=p_entity.begin_index,
  1680. end_index=p_entity.end_index, size=20, center_include=True, word_flag=True,
  1681. text=p_entity.entity_text)
  1682. if state == 2:
  1683. for _p in list_p[1:]:
  1684. _p.values[0] = 0.8 + _p.values[0] / 10
  1685. _p.label = 0
  1686. state = 0
  1687. list_p = []
  1688. if state == 0:
  1689. if p_entity.entity_type in ["money"]:
  1690. if str(p_entity.label) == "0" and re.search(self.pattern_pack,
  1691. _span[0] + "-" + _span[2]) is not None:
  1692. state = 1
  1693. list_p.append(p_entity)
  1694. elif state == 1:
  1695. if p_entity.entity_type in ["money"]:
  1696. if str(p_entity.label) in ["0", "2"] and re.search(self.pattern_pack,
  1697. _span[0] + "-" + _span[
  1698. 2]) is not None and re.search(
  1699. self.pattern_money_other,
  1700. _span[0] + "-" + _span[2]) is None and p_entity.sentence_index == list_p[
  1701. 0].sentence_index:
  1702. list_p.append(p_entity)
  1703. else:
  1704. state = 2
  1705. if len(list_p) > 1:
  1706. for _p in list_p[1:]:
  1707. # print("==",_p.entity_text,_p.sentence_index,_p.label)
  1708. _p.values[0] = 0.8 + _p.values[0] / 10
  1709. _p.label = 0
  1710. state = 0
  1711. list_p = []
  1712. for p_entity in list_entity:
  1713. # 将属于集合中的不可能是中标人的标签置为无
  1714. if p_entity.entity_text in self.SET_NOT_TENDERER:
  1715. p_entity.label = 5
  1716. '''正则补充最后一句实体日期格式为招标或代理 2021/12/30'''
  1717. class RoleRuleFinalAdd():
  1718. def predict(self, list_articles,list_sentences, list_entitys, list_codenames):
  1719. '''
  1720. 最终规则召回角色
  1721. :param list_articles:
  1722. :param list_sentences:
  1723. :param list_entitys:
  1724. :param list_codenames:
  1725. :return:
  1726. '''
  1727. # text_end = list_articles[0].content.split('##attachment##')[0][-40:]
  1728. main_sentences = [sentence for sentence in list_sentences[0] if not sentence.in_attachment]
  1729. if len(list_sentences[0])>0 and list_sentences[0][-1].in_attachment:
  1730. main_sentences = list_sentences[0][-1:] + main_sentences[-2:]
  1731. if len(main_sentences)==0:
  1732. return 0
  1733. # end_tokens = []
  1734. for sentence in main_sentences[-5:][::-1]: # 402073799 最后五句由后往前,匹配文末角色,日期
  1735. # end_tokens.extend(sentence.tokens)
  1736. # text_end = "".join(end_tokens[-30:])
  1737. # text_end = "".join(end_tokens)
  1738. text_end = "".join(sentence.tokens)
  1739. text_end = re.sub(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", '', text_end) # 去除网址
  1740. text_end = re.sub(',?(招标办|招投标管理中心|国有资产管理处|采办共享中心|采购与招标管理办公室|附件\d*:[^附件,。]{5,100}\.(docx|doc|rar|xlsx|xls|jpg|pdf)|附件\d*:.{,100})', '', text_end)[-200:] # 处理 类似 285264698 传真:0512-62690315,苏州卫生职业技术学院,国有资产管理处,2022年11月24日, 这种情况
  1741. # sear_ent = re.search('[,。]([\u4e00-\u9fa5()()]{5,20}),?\s*[.]{2,4}年.{1,2}月.{1,2}日', text_end)
  1742. sear_ent = re.search('([,。;]|^)(?P<entity>[\u4e00-\u9fa5()()]{5,20}(,?[\u4e00-\u9fa5]{,8})?),?\s*(公告日期:)?[0-9零一二三四五六七八九十〇]{2,4}[年\-/][0-9零一二三四五六七八九十]{1,2}[月\-/][0-9零一二三四五六七八九十]{1,2}日?', text_end)
  1743. if sear_ent:
  1744. b, e = sear_ent.span()
  1745. if re.search('报价记录|竞价成交', text_end[max(b-10, 0):b] + text_end[e:]):
  1746. sear_ent = None
  1747. break
  1748. if sear_ent == None:
  1749. text_end = list_articles[0].content[-100:]
  1750. sear_ent = re.search(
  1751. '([,。;]|^)(?P<entity>[\u4e00-\u9fa5()()]{5,20}(,?[\u4e00-\u9fa5]{,8})?),?\s*(公告日期:)?[0-9零一二三四五六七八九十〇]{2,4}[年\-/][0-9零一二三四五六七八九十]{1,2}[月\-/][0-9零一二三四五六七八九十]{1,2}日?',
  1752. text_end)
  1753. if sear_ent:
  1754. b, e = sear_ent.span()
  1755. if re.search('报价记录|竞价成交', text_end[max(b-10, 0):b] + text_end[e:]):
  1756. sear_ent = None
  1757. sear_ent1 = re.search('((招标|采购)联系人)[,::][A-Za-z0-9_]*(?P<entity>[\u4e00-\u9fa5()()]{4,20})', list_articles[0].content[:5000])
  1758. sear_ent2 = re.search('[,:](户名|开户名称|发票抬头|单位名称|名称)[::](?P<entity>[\u4e00-\u9fa5()()]{5,20})[,。]', list_articles[0].content[:5000])
  1759. if sear_ent2 and sear_ent2.group(1) in ['单位名称','名称'] and re.search('投标报价|(中标|成交|结果|候选人|评标|开标)(公告|公示)', list_articles[0].content[:5000]): # 排除 341354479 这种作为招标人
  1760. sear_ent2 = None
  1761. sear_ent3 = re.search('(买家信息|所有权人|土地权属单位|报名咨询|[收送交]货地点)[,:](?P<entity>[\u4e00-\u9fa5()()]{5,20})[0-9\-]*[,。]', list_articles[0].content[:5000])
  1762. sear_ent4 = re.search('(发布(?:人|单位|机构|企业)|项目业主|所属公司|寻源单位)[,::][A-Za-z0-9_]*(?P<entity>[\u4e00-\u9fa5()()]{4,20})[,。]', list_articles[0].content[:5000])
  1763. sear_list = [sear_ent4 , sear_ent3 , sear_ent2 ,sear_ent1, sear_ent]
  1764. tenderee_notfound = True
  1765. agency_notfound = True
  1766. tenderee_list = []
  1767. agency_list = []
  1768. ents = []
  1769. for ent in list_entitys[0]:
  1770. if ent.entity_type in ['org', 'company']:
  1771. if ent.label == 0 and ent.values[ent.label]>0.55:
  1772. if '公共资源交易中心' in ent.entity_text: # 公共资源交易中心不算招标或代理,只算平台
  1773. # ent.label = 5
  1774. ent.values[ent.label] = 0.6 if ent.values[ent.label]>0.6 else 0.5 # 改为降低概率,不改类别,防止 382573066 明显招标人表达不提取
  1775. continue
  1776. tenderee_list.append(ent.entity_text)
  1777. tenderee_notfound = False
  1778. elif ent.label == 1 and ent.values[ent.label]>0.55:
  1779. agency_list.append(ent.entity_text)
  1780. agency_notfound = False
  1781. elif ent.label == 5:
  1782. if '公共资源交易中心' in ent.entity_text:
  1783. continue
  1784. ents.append(ent)
  1785. if sear_ent or sear_ent1 or sear_ent2 or sear_ent3 or sear_ent4:
  1786. for _sear_ent in [_sear for _sear in sear_list if _sear]:
  1787. ent_re = _sear_ent.group('entity')
  1788. ent_re = ent_re.replace(',', '').replace("(","(").replace(")",")")
  1789. if tenderee_notfound or agency_notfound:
  1790. n = 0
  1791. for i in range(len(ents) - 1, -1, -1):
  1792. if not ents[i].in_attachment:
  1793. n += 1
  1794. if n > 3 and _sear_ent==sear_ent: # 文章末尾角色加日期这种只找后三个实体
  1795. break
  1796. elif _sear_ent==sear_ent and ents[i].label != 5: # 后面有角色的实体的停止继续往前
  1797. break
  1798. if ents[i].entity_text == ent_re or (ents[i].entity_text in ent_re and re.search('(大学|中学|小学|幼儿园|医院)$', ents[i].entity_text)) or (ents[i].entity_text in ent_re and len(ents[i].entity_text)/len(ent_re)>0.6):
  1799. if agency_notfound and is_agency(ents[i].entity_text) and ents[i].entity_text not in tenderee_list:
  1800. ents[i].label = 1
  1801. ents[i].values[1] = 0.51 # 修改为比标题概率略高
  1802. agency_notfound = False
  1803. elif tenderee_notfound and not is_agency(ents[i].entity_text) and ents[i].entity_text not in agency_list:
  1804. ents[i].label = 0
  1805. ents[i].values[0] = 0.51 # 修改为比标题概率略高
  1806. tenderee_notfound = False
  1807. # log('正则最后补充实体: %s'%(ent_re))
  1808. break
  1809. if not tenderee_notfound:
  1810. break
  1811. # 招标人角色召回规则
  1812. class TendereeRuleRecall():
  1813. def __init__(self):
  1814. # self.tenderee_left = re.compile("(发布(人|单位|机构)|需求方(信息[,:])?(单位|公司)?名称|购买主体|收货单位|项目申请单位|发起组织|联系单位|"
  1815. # "询价(机构|企业)|联系(人|方式),?(单位|公司)(名称)?|联系(人|方式),名称)[::是为][^。;,]{,5}$")
  1816. # self.tenderee_left_1 = re.compile("采购商公司|询价单位|项目法人单位|项目法人|项目业主名称|申购单位|预算单位|预算单位名称|预算单位单位名称|买方单位|需求公司|寻源单位|项目业主|采购商|业主单位咨询电话|需用单位|采购工厂|征集单位")
  1817. self.tenderee_left_1 = re.compile("((?:采购商|项目法人|项目业主)(名称)?|(?:采购商|询价|项目法人|项目业主|申购|预算|买方|需求|寻源|需用|征集)(单位|公司)((?:单位|公司)?名称)?|询价企业|"
  1818. "业主单位咨询电话|购买主体|采购工厂|需求方(信息[,:])?(单位|公司)?名称|采购单位[\((].{1,6}[\))])[::是为][^。;,]{,2}$")
  1819. self.tenderee_left_2 = re.compile("(招标承办单位|交易人(?:名称)?|招标人代表|(采购|招标)联系人|交易单位|发起(单位|组织)|收货单位|使用方|买家信息)[::是为][^。;,]{,2}$")
  1820. self.tenderee_left_3 = re.compile("[本我](?:公司|单位)[\(\[(【]?$")
  1821. # self.tenderee_left_4 = re.compile("(采购机构|组织机构|组织方|执行单位|采购组织单位|招标组织单位|招标组织部门|采购执行方|采购执行单位|询价执行组织|组织单位|联系单位|联系部门)[::是为][^。;,]{,2}$")
  1822. self.tenderee_left_4 = re.compile("(采购机构|(?:采购|招标|询价)?(组织|执行)(机构|方|单位|部门|组织)|联系(单位|部门)|联系(人|方式),?(单位|公司)(名称)?|联系(人|方式),名称)[::是为][^。;,]{,2}$")
  1823. self.tenderee_left_5 = re.compile("(撰写单位|发布(?:人|单位|机构|公司|部门|企业))[^。;,]{,2}$")
  1824. self.tenderee_right = re.compile("^[^。;::]{,5}[((](以?下简?称)?,?[,\"“]*[我本][\u4e00-\u9fa5]{1,2}[,\"”]*[))]|"
  1825. "^[\((][^。;::\))]{,5}称(?:招标|采购)(?:人|单位)|"
  1826. "^[^。;::]{,10}[对就][^。;,]+,?[^。;,]{,20}进行[^。;,]*(采购|询比?价|遴选|招投?标|征集)|"
  1827. "^[^。;::]{,10}关于[^。;,]+,?[^。;,]{,20}的[^。;,]{,20}公告|"
  1828. "^[^。;,::]{,10}的[^。;,]+,?[^。;,]{,20}正在[^。;,]{,5}进行|"
  1829. "^[^。;,::]{,10}的[^。;,]+,?[^。,;]{,20}已?[^。;,]{,20}批准|"
  1830. "^[^。;,::]{,15}(选定|选取|征集|遴选)[^。;,]{,20}(供应商|(代理|咨询|设计)[^。;,]{,5}机构|代理人)")
  1831. self.tenderee_right2 = re.compile("^[^。;,::]{,10}(招标办|采购部|办事处|采购小?组)")
  1832. self.tenderee_right3 = re.compile("^[^。;,::]{,10}(对|就|关于|的)(?P<project>[^。;,?!::]{4,40})")
  1833. # 公告主语判断规则
  1834. self.subject = re.compile("[我本][院校局]")
  1835. # 未识别实体召回正则
  1836. self.unrecognized1 = re.compile("(?P<tenderee_left>((遴选|采购|招标|竞价|议价|比选|委托|询比?价|评选|谈判|邀标|邀请|洽谈|约谈)" \
  1837. "(人|商|公司|单位|组织|用户|业主|主体|方|部门))" \
  1838. "(信息[,:]?)?((公司|单位)?名称)?([((](全称|盖章)[))])?(是|:|:)+)(?P<unrecognized>[^,。::;]+)[,。;::]")
  1839. self.unrecognized2 = re.compile("(?P<tenderee_left>((项目|需求|最终|建设|业主|转让|招租|甲|议标|合同主体|挂牌|出租|出让|选取|抽取|抽选|出售|标卖|比价|处置)" \
  1840. "(人|公司|单位|组织|用户|业主|主体|方|部门)|文章来源|委托机构|产权所有人|需求?方|买方|业主|(业主|采购人|招标人)联系方式[,:]公司名称:|权属人|甲方当事人|询价书企业|比选发起人|项目单位[,:]单位名称|结算单位)"\
  1841. "[))]?(信息[,:])?((公司|单位)?名称)?([((](全称|盖章)[))])?(是|:|:)+)(?P<unrecognized>[^,。::;]+)[,。;::]")
  1842. # 未识别实体尾部判断
  1843. # self.unrecognized_end1 = re.compile(
  1844. # "^[\u4e00-\u9fa5]{2,}?(?:公司|医院|学校|学院|大学|中学|小学|幼儿园|政府|指挥部|办公室|项目部|业主大会|监狱|教育局|委员会|研究所|招标办|采购部|办事处|水利局|公墓|中心|联合社|合作社)")
  1845. # self.unrecognized_end2 = re.compile("^[\u4e00-\u9fa5]{4,}(?:署|局|厅|处|室|科|部|站|所|股|行|园)")
  1846. def predict(self, list_articles,list_sentences, list_entitys, list_codenames):
  1847. self.get_tenderee = False
  1848. ents = []
  1849. list_name = []
  1850. agency_set = set()
  1851. for ent in list_entitys[0]:
  1852. if ent.entity_type == 'name':
  1853. list_name.append(ent.entity_text)
  1854. if ent.entity_type in ['org', 'company']:
  1855. if ent.label == 0 and ent.values[ent.label]>=0.5:
  1856. self.get_tenderee = True
  1857. break
  1858. elif ent.label == 1:
  1859. if ent.values[ent.label]>0.5:
  1860. agency_set.add(ent.entity_text)
  1861. elif ent.label == 5:
  1862. if len(ent.entity_text)>=4:
  1863. ents.append(ent)
  1864. if not self.get_tenderee:
  1865. self.entity_context_rule(ents,list_name,list_sentences,list(agency_set))
  1866. if not self.get_tenderee:
  1867. self.subject_rule(ents,list_articles,list_sentences)
  1868. # if not self.get_tenderee:
  1869. # self.unrecognized_entity_rule(self.unrecognized1,list_sentences,list_entitys,0.55)
  1870. # if not self.get_tenderee:
  1871. # self.unrecognized_entity_rule(self.unrecognized2,list_sentences,list_entitys,0.5)
  1872. #entity上下文正则判断
  1873. def entity_context_rule(self,entitys,list_name,list_sentences,list_agency):
  1874. list_sentences[0].sort(key=lambda x:x.sentence_index)
  1875. entity_data = []
  1876. for ent in entitys:
  1877. _sentence = list_sentences[0][ent.sentence_index]
  1878. _span = spanWindow(tokens=_sentence.tokens, begin_index=ent.begin_index,
  1879. end_index=ent.end_index, size=40, center_include=True,
  1880. word_flag=True, use_text=True,
  1881. text=re.sub(")", ")", re.sub("(", "(", ent.entity_text)))
  1882. entity_data.append((ent,_span))
  1883. if not self.get_tenderee:
  1884. for _data in entity_data:
  1885. ent = _data[0]
  1886. _span = _data[1]
  1887. if re.search(self.tenderee_left_1,_span[0]):
  1888. ent.label = 0
  1889. ent.values[0] = 0.5 + ent.values[0] / 10
  1890. self.get_tenderee = True
  1891. if not self.get_tenderee:
  1892. for _data in entity_data:
  1893. ent = _data[0]
  1894. _span = _data[1]
  1895. if re.search(self.tenderee_left_2,_span[0]):
  1896. ent.label = 0
  1897. ent.values[0] = 0.5 + ent.values[0] / 10
  1898. self.get_tenderee = True
  1899. if not self.get_tenderee:
  1900. for _data in entity_data:
  1901. ent = _data[0]
  1902. _span = _data[1]
  1903. if re.search(self.tenderee_left_3,_span[0]):
  1904. ent.label = 0
  1905. ent.values[0] = 0.5 + ent.values[0] / 10
  1906. self.get_tenderee = True
  1907. if not self.get_tenderee:
  1908. for _data in entity_data:
  1909. ent = _data[0]
  1910. _span = _data[1]
  1911. if re.search(self.tenderee_left_4,_span[0]):
  1912. if len(list_agency)>0:
  1913. _same = False
  1914. for agency in list_agency:
  1915. if ent.entity_text in agency or agency in ent.entity_text:
  1916. _same = True
  1917. break
  1918. if not _same:
  1919. ent.label = 0
  1920. ent.values[0] = 0.5 + ent.values[0] / 10
  1921. self.get_tenderee = True
  1922. else:
  1923. if re.search('医院|学校|大学|中学|小学|幼儿园|政府|部|委员会|署|行|局|厅|处|室|科|股|站', ent.entity_text
  1924. ) or not re.search('(采购|招标|投标|交易|代理|拍卖|咨询|顾问|管理)', ent.entity_text) or re.search("自行.?采购",list_sentences[0][ent.sentence_index].sentence_text):
  1925. ent.label = 0
  1926. ent.values[0] = 0.5 + ent.values[0] / 10
  1927. self.get_tenderee = True
  1928. if not self.get_tenderee:
  1929. for _data in entity_data:
  1930. ent = _data[0]
  1931. _span = _data[1]
  1932. if re.search(self.tenderee_left_5,_span[0]):
  1933. if len(list_agency)>0:
  1934. _same = False
  1935. for agency in list_agency:
  1936. if ent.entity_text in agency or agency in ent.entity_text:
  1937. _same = True
  1938. break
  1939. if not _same:
  1940. ent.label = 0
  1941. ent.values[0] = 0.5 + ent.values[0] / 10
  1942. self.get_tenderee = True
  1943. else:
  1944. if re.search('医院|学校|大学|中学|小学|幼儿园|政府|部|委员会|署|行|局|厅|处|室|科|股|站', ent.entity_text
  1945. ) or not re.search('(采购|招标|投标|交易|代理|拍卖|咨询|顾问|管理)', ent.entity_text):
  1946. ent.label = 0
  1947. ent.values[0] = 0.5 + ent.values[0] / 10
  1948. self.get_tenderee = True
  1949. if not self.get_tenderee:
  1950. for _data in entity_data:
  1951. ent = _data[0]
  1952. _span = _data[1]
  1953. if re.search(self.tenderee_right, _span[2]):
  1954. ent.label = 0
  1955. ent.values[0] = 0.5 + ent.values[0] / 10
  1956. self.get_tenderee = True
  1957. if not self.get_tenderee:
  1958. for _data in entity_data:
  1959. ent = _data[0]
  1960. _span = _data[1]
  1961. if re.search(self.tenderee_right2, _span[2]):
  1962. ent.label = 0
  1963. ent.values[0] = 0.5 + ent.values[0] / 10
  1964. self.get_tenderee = True
  1965. if not self.get_tenderee:
  1966. if list_name:
  1967. for _data in entity_data:
  1968. ent = _data[0]
  1969. _span = _data[1]
  1970. pj_name = re.search(self.tenderee_right3, _span[2])
  1971. if pj_name:
  1972. pj_name = pj_name.groupdict()["project"]
  1973. for _name in list_name:
  1974. if _name in pj_name:
  1975. ent.label = 0
  1976. ent.values[0] = 0.5
  1977. self.get_tenderee = True
  1978. break
  1979. # for _data in entity_data:
  1980. # ent = _data[0]
  1981. # _span = _data[1]
  1982. # if re.search(self.tenderee_left,_span[0]):
  1983. # ent.label = 0
  1984. # ent.values[0] = 0.5 + ent.values[0] / 10
  1985. # self.get_tenderee = True
  1986. # elif re.search(self.tenderee_right,_span[2]):
  1987. # ent.label = 0
  1988. # ent.values[0] = 0.5 + ent.values[0] / 10
  1989. # self.get_tenderee = True
  1990. # elif re.search(self.tenderee_right2, _span[2]):
  1991. # ent.label = 0
  1992. # ent.values[0] = 0.5 + ent.values[0] / 10
  1993. # self.get_tenderee = True
  1994. # elif list_name:
  1995. # pj_name = re.search(self.tenderee_right3, _span[2])
  1996. # if pj_name:
  1997. # pj_name = pj_name.groupdict()["project"]
  1998. # for _name in list_name:
  1999. # if _name in pj_name:
  2000. # ent.label = 0
  2001. # ent.values[0] = 0.5
  2002. # self.get_tenderee = True
  2003. # break
  2004. # 公告主语判断
  2005. def subject_rule(self, entitys,list_articles,list_sentences):
  2006. content = list_articles[0].content.split('##attachment##')[0]
  2007. if re.search(self.subject,content):
  2008. _subject = re.search(self.subject,content).group()
  2009. for ent in entitys:
  2010. if re.search("院",_subject) and re.search("医院|学院",ent.entity_text):
  2011. ent.label = 0
  2012. ent.values[0] = 0.5 + ent.values[0] / 10
  2013. self.get_tenderee = True
  2014. elif re.search("校",_subject) and re.search("学校|学院|大学|高中|初中|中学|小学",ent.entity_text):
  2015. ent.label = 0
  2016. ent.values[0] = 0.5 + ent.values[0] / 10
  2017. self.get_tenderee = True
  2018. elif re.search("局", _subject) and re.search("局", ent.entity_text):
  2019. _sentence = list_sentences[0][ent.sentence_index]
  2020. _span = spanWindow(tokens=_sentence.tokens, begin_index=ent.begin_index,
  2021. end_index=ent.end_index, size=20, center_include=True,
  2022. word_flag=True, use_text=True,
  2023. text=re.sub(")", ")", re.sub("(", "(", ent.entity_text)))
  2024. if not re.search("监督|投诉",_span[0][-10:]):
  2025. ent.label = 0
  2026. ent.values[0] = 0.5 + ent.values[0] / 10
  2027. self.get_tenderee = True
  2028. # 正则召回未识别实体
  2029. # def unrecognized_entity_rule(self,pattern,list_sentences,list_entitys,on_value=0.5):
  2030. # list_sentence = list_sentences[0]
  2031. # for in_attachment in [False,True]:
  2032. # for sentence in [sentence for sentence in list_sentence if sentence.in_attachment==in_attachment]:
  2033. # sentence_text = sentence.sentence_text
  2034. # tokens = sentence.tokens
  2035. # doc_id = sentence.doc_id
  2036. # in_attachment = sentence.in_attachment
  2037. # list_tokenbegin = []
  2038. # begin = 0
  2039. # for i in range(0, len(tokens)):
  2040. # list_tokenbegin.append(begin)
  2041. # begin += len(str(tokens[i]))
  2042. # list_tokenbegin.append(begin + 1)
  2043. # for _match in re.finditer(pattern,sentence_text):
  2044. # _groupdict = _match.groupdict()
  2045. # _match_text = _match.group()
  2046. # _unrecognized_text = _groupdict["unrecognized"]
  2047. # _unrecognized = re.search(self.unrecognized_end1,_unrecognized_text)
  2048. # if not _unrecognized:
  2049. # _unrecognized = re.search(self.unrecognized_end2, _unrecognized_text)
  2050. # if _unrecognized:
  2051. # _unrecognized = _unrecognized.group()
  2052. # else:
  2053. # continue
  2054. # # print(_unrecognized)
  2055. # if re.search("某|乙方|代理",_unrecognized) or len(_unrecognized)>15:
  2056. # continue
  2057. # begin_index_temp = _match.start()+len(_groupdict['tenderee_left'])
  2058. # for j in range(len(list_tokenbegin)):
  2059. # if list_tokenbegin[j] == begin_index_temp:
  2060. # begin_index = j
  2061. # break
  2062. # elif list_tokenbegin[j] > begin_index_temp:
  2063. # begin_index = j - 1
  2064. # break
  2065. # index = begin_index_temp + len(_unrecognized)
  2066. # end_index_temp = index
  2067. # for j in range(begin_index, len(list_tokenbegin)):
  2068. # if list_tokenbegin[j] >= index:
  2069. # end_index = j - 1
  2070. # break
  2071. # entity_id = "%s_%d_%d_%d" % (doc_id, sentence.sentence_index, begin_index, end_index)
  2072. # entity_text = _unrecognized
  2073. # new_entity = Entity(doc_id, entity_id, entity_text, 'company', sentence.sentence_index, begin_index, end_index,
  2074. # begin_index_temp, end_index_temp, in_attachment=in_attachment)
  2075. # new_entity.label = 0
  2076. # new_entity.values = [on_value,0,0,0,0,0]
  2077. # list_entitys[0].append(new_entity)
  2078. # self.get_tenderee = True
  2079. # if self.get_tenderee:
  2080. # list_entitys[0] = sorted(list_entitys[0], key=lambda x: (x.sentence_index, x.begin_index))
  2081. # break
  2082. class RoleGrade():
  2083. def __init__(self):
  2084. self.tenderee_left_9 = "(?P<tenderee_left_9>(招标|采购|遴选|寻源|竞价|议价|比选|询比?价|比价|评选|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选)(人|方|单位))"
  2085. self.tenderee_center_8 = "(?P<tenderee_center_8>受.{5,20}委托)"
  2086. self.tenderee_left_8 = "(?P<tenderee_left_8>(尊敬的供应商|项目法人|(需求|最终|发包|征集|甲|转让|出租|处置)(人|方|单位|组织|用户|业主|主体|部门|公司)))"
  2087. self.tenderee_left_6 = "(?P<tenderee_left_6>(业主|建设|委托)(人|方|单位|组织|用户|业主|主体|部门|公司|企业)|业主|买方)"
  2088. self.tenderee_left_5 = "(?P<tenderee_left_5>(发布)(人|方|单位|组织|用户|业主|主体|部门|公司|企业)|买方|发布机构)"
  2089. self.agency_left_9 = "(?P<agency_left_9>代理)"
  2090. self.winTenderer_left_9 = "(?P<winTenderer_left_9>(中标|中选|中价|成交|竞得)|第[1一]名|排[名序]:1|名次:1)"
  2091. self.winTenderer_left_8 = "(?P<winTenderer_left_8>(入选供应商|供货商|乙方|最[终后]选[择取]))" # 229435497 最后选择西平,县中原彩印有限公司,作为此项目中标供应商,
  2092. self.winTenderer_left_6 = "(?P<winTenderer_left_6>(入围|承[接建包修做制担租销]))"
  2093. self.winTenderer_right_9 = "(?P<winTenderer_right_9>^(为(中标|成交|中选)(人|单位|供应商|公司)|以\d+[\d.,]+万?元中标))"
  2094. self.secondTenderer_left_9 = "(?P<secondTenderer_left_9>(第[二2](中标|中选|中价|成交)?候选(人|单位|供应商|公司)|第[二2]名|排[名序]:2|名次:2))"
  2095. self.thirdTenderer_left_9 = "(?P<thirdTenderer_left_9>(第[三3](中标|中选|中价|成交)?候选(人|单位|供应商|公司)|第[三3]名|排[名序]:3|名次:3))"
  2096. self.pattern_list = [self.tenderee_left_9,self.tenderee_center_8, self.tenderee_left_8,self.tenderee_left_6,self.tenderee_left_5,self.agency_left_9,
  2097. self.winTenderer_left_6, self.winTenderer_left_9,self.winTenderer_left_8, self.winTenderer_right_9, self.secondTenderer_left_9, self.thirdTenderer_left_9]
  2098. def predict(self, list_sentences, list_entitys, original_docchannel, span=15, min_prob=0.7):
  2099. '''
  2100. 根据规则给角色分配不同等级概率;分三级:0.9-1,0.8-0.9,0.7-0.8;附件0.7-0.8,0.6-0.7,0.5-0.6
  2101. 修改概率小于0.6的且在大数据代理集合里面的招标人为代理人
  2102. :param list_articles:
  2103. :param list_sentences:
  2104. :param list_entitys:
  2105. :param codeName:
  2106. :return:
  2107. '''
  2108. sentences = sorted(list_sentences[0], key=lambda x:x.sentence_index)
  2109. role2id = {"tenderee": 0, "agency": 1, "winTenderer": 2, "secondTenderer": 3, "thirdTenderer": 4}
  2110. org_winner = []
  2111. company_winner = []
  2112. org_tenderee = []
  2113. agency_l = []
  2114. agency_like_tenderee = [] # 类似招标人的代理人实体列表
  2115. low_prob_agency = []
  2116. low_prob_tenderee = []
  2117. low_prob_winner = []
  2118. all_tenderee_agency = []
  2119. for entity in list_entitys[0]:
  2120. if entity.entity_type in ['org', 'company'] and entity.label in [0, 1, 2, 3, 4] and entity.values[entity.label]> min_prob:
  2121. text = sentences[entity.sentence_index].sentence_text
  2122. in_att = sentences[entity.sentence_index].in_attachment
  2123. pre_prob = entity.values[entity.label] # 模型预测角色概率
  2124. b = entity.wordOffset_begin
  2125. e = entity.wordOffset_end
  2126. not_found = 1
  2127. if re.search('(乙方:甲方:|甲方:乙方:)$', text[max(0, b-span):b]):
  2128. entity.label = 0 if entity.entity_type == 'org' else 5 # 修复 290777022 乙方:甲方: 重庆机场集团有限公司 错分为中标
  2129. entity.values[entity.label] = 0.55
  2130. continue
  2131. elif re.search('(采购|招标)人(?或其?(采购|招标)?代理机构)?', text[max(0, b-span-2):b]): # 修复 275206588 招标人或其招标代理机构:(盖章)
  2132. entity.label = 1 if is_agency(entity.entity_text) else 0
  2133. entity.values[entity.label] = 0.8
  2134. continue
  2135. elif re.search('(采购|招标|询比?价|遴选|寻源|比选)机构[是为:]+', text[max(0, b-span):b]) and not is_agency(entity.entity_text):
  2136. agency_like_tenderee.append(entity)
  2137. for pattern in self.pattern_list:
  2138. if 'left' in pattern:
  2139. context = text[max(0, b-span):b]
  2140. elif 'right' in pattern:
  2141. context = text[e:e+span]
  2142. elif 'center' in pattern:
  2143. context = text[max(0, b-span):e+span]
  2144. else:
  2145. print('规则错误', pattern)
  2146. ser = re.search(pattern, context)
  2147. if ser:
  2148. groupdict = pattern.split('>')[0].replace('(?P<', '')
  2149. _role, _direct, _prob = groupdict.split('_')
  2150. _label = role2id.get(_role)
  2151. if _label != entity.label:
  2152. continue
  2153. _prob = int(_prob)*0.1
  2154. # print('规则修改角色概率前:', entity.entity_text, entity.label, entity.values)
  2155. if in_att:
  2156. _prob = _prob - 0.1 # 0.2
  2157. if pre_prob < _prob: # 如果模型预测概率小于关键词概率
  2158. _prob = 0.65
  2159. if len(entity.entity_text) < 6 and re.search('大学|医院', entity.entity_text)==None: # 如果实体名称小于6个字,概率再降0.05
  2160. _prob -= 0.05
  2161. if re.search('(地址|联系方式):$', context): # 地址结尾的概率 概率降低
  2162. _prob -= 0.05
  2163. entity.values[_label] = _prob + entity.values[_label] / 20
  2164. not_found = 0
  2165. # print('规则修改角色概率后:', entity.entity_text, entity.label, entity.values)
  2166. break
  2167. if not_found and entity.values[entity.label]> min_prob:
  2168. _prob = min_prob - 0.1 if in_att else min_prob
  2169. entity.values[entity.label] = _prob + entity.values[entity.label] / 20
  2170. # print('找不到规则修改角色概率:', entity.entity_text, entity.label, entity.values)
  2171. if entity.label == 2 and entity.values[entity.label]> min_prob:
  2172. if entity.entity_type == 'org':
  2173. org_winner.append(entity)
  2174. elif entity.entity_type == 'company':
  2175. company_winner.append(entity) # 保存中标人实体
  2176. if entity.label == 0 and entity.values[entity.label]> min_prob:
  2177. org_tenderee.append(entity.entity_text) # 保存所有招标人名称
  2178. elif entity.label == 1 and entity.values[entity.label]> min_prob:
  2179. agency_l.append(entity.entity_text)
  2180. # if entity.entity_type in ['org', 'company'] and entity.label == 0 and entity.entity_text in agency_set and entity.values[entity.label]<0.6: # 修改概率小于0.6的且在大数据代理集合里面的招标人为代理人
  2181. # # log('修改概率小于0.6的且在大数据代理集合里面的招标人为代理人%s:'%entity.entity_text)
  2182. # entity.label = 1
  2183. # entity.values[entity.label] = 0.5
  2184. elif entity.entity_type in ['org', 'company'] and entity.label in [1, 0] and 0.5<=entity.values[entity.label]<0.6:
  2185. if entity.label == 1:
  2186. low_prob_agency.append(entity)
  2187. else:
  2188. low_prob_tenderee.append(entity)
  2189. elif entity.entity_type in ['org', 'company'] and entity.label == 2 and 0.5<=entity.values[entity.label]<0.6:
  2190. low_prob_winner.append(entity)
  2191. if entity.entity_type in ['org', 'company'] and entity.label in [1, 0] and 0.6<entity.values[entity.label]: # 由0.5调为0.6,避免367217504 同时为低概率招标、中标被改
  2192. all_tenderee_agency.append(entity.entity_text)
  2193. if org_tenderee == [] and agency_like_tenderee:
  2194. for entity in agency_like_tenderee:
  2195. entity.label = 0
  2196. entity.values[entity.label] = 0.6
  2197. for entity in low_prob_agency: # 如果低概率代理在招标人列表,改为招标人
  2198. if entity.entity_text in org_tenderee:
  2199. entity.label = 0
  2200. entity.values[entity.label] = 0.6
  2201. for entity in low_prob_tenderee:
  2202. if entity.entity_text in agency_l:
  2203. entity.label = 1
  2204. entity.values[entity.label] = 0.6
  2205. for entity in low_prob_winner: # 如果低概率中标人在招标或代理列表,改为非角色
  2206. if entity.entity_text in all_tenderee_agency:
  2207. entity.label = 5
  2208. if org_winner != []:
  2209. flag = 0
  2210. if org_tenderee != []:
  2211. for ent in org_winner:
  2212. if ent.entity_text in org_tenderee:
  2213. # log('如果org中标人同时为招标人角色,降低中标概率:%s, %s' % (ent.entity_text, ent.label))
  2214. ent.values[2] = 0.6
  2215. flag = 1
  2216. # if flag == 0 and company_winner != []: # 2024/04/18 注释掉 避免提取不到 273351465 供应商(乙方:湖南省第二测绘院
  2217. # for ent in org_winner:
  2218. # if ent.label == 2 and ent.values[2] > 0.6:
  2219. # # log('如果同时包含org和company中标人,降低org中标人概率为0.6:%s, %s' % (ent.entity_text, ent.values[2]))
  2220. # ent.values[2] = 0.6
  2221. class MoneyGrade():
  2222. def __init__(self):
  2223. self.tenderee_money_left_9 = "(?P<tenderee_left_9>最高(投标)?限价)|控制价|拦标价"
  2224. self.tenderee_money_left_8 = "(?P<tenderee_left_8>预算|限价|起始|起拍|底价|标底)"
  2225. self.tenderer_money_left_9 = "(?P<tenderer_left_9>(中标|成交|合同|总报价))"
  2226. self.tenderer_money_left_8 = "(?P<tenderer_left_8>(投标|总价))"
  2227. self.pattern_list = [self.tenderee_money_left_8, self.tenderer_money_left_8, self.tenderee_money_left_9, self.tenderer_money_left_9]
  2228. def predict(self, list_sentences, list_entitys, span=10, min_prob=0.7):
  2229. sentences = sorted(list_sentences[0], key=lambda x:x.sentence_index)
  2230. role2id = {"tenderee": 0, "tenderer": 1}
  2231. for entity in list_entitys[0]:
  2232. if entity.entity_type in ['money'] and entity.label in [0, 1] and entity.values[entity.label]> 0.6:
  2233. text = sentences[entity.sentence_index].sentence_text
  2234. in_att = sentences[entity.sentence_index].in_attachment
  2235. b = entity.wordOffset_begin
  2236. e = entity.wordOffset_end
  2237. context = text[max(0, b - span):b]
  2238. not_found = 1
  2239. for pattern in self.pattern_list:
  2240. ser = re.search(pattern, context)
  2241. if ser:
  2242. groupdict = pattern.split('>')[0].replace('(?P<', '')
  2243. _role, _direct, _prob = groupdict.split('_')
  2244. if re.search('单价', context[-4:]) or re.search('(最低|风险)控制价', context) or float(entity.entity_text)<100:
  2245. _prob = 6
  2246. _label = role2id.get(_role)
  2247. if _label != entity.label:
  2248. continue
  2249. _prob = int(_prob) * 0.1
  2250. # print('规则修改金额概率前:', entity.entity_text, entity.label, entity.values)
  2251. if in_att:
  2252. _prob = max(0.5, _prob - 0.2)
  2253. entity.values[_label] = _prob + entity.values[_label] / 20
  2254. not_found = 0
  2255. # print('规则修改金额概率后:', entity.entity_text, entity.label, entity.values)
  2256. break
  2257. if not_found and entity.values[entity.label] > min_prob:
  2258. if re.search('单价', context[-4:]) or re.search('(最低|风险)控制价', context) or float(entity.entity_text)<100:
  2259. _prob = 0.6
  2260. elif in_att:
  2261. _prob = max(0.5, min_prob - 0.1)
  2262. else:
  2263. _prob = min_prob
  2264. # _prob = min_prob - 0.1 if in_att else min_prob
  2265. entity.values[entity.label] = _prob + entity.values[entity.label] / 20
  2266. # print('找不到规则修改金额概率:', entity.entity_text, entity.label, entity.values)
  2267. # 时间类别
  2268. class TimePredictor():
  2269. def __init__(self,config=None):
  2270. self.sess = tf.Session(graph=tf.Graph(),config=config)
  2271. self.inputs_code = None
  2272. self.outputs_code = None
  2273. self.input_shape = (2,40,128)
  2274. self.load_model()
  2275. def load_model(self):
  2276. model_path = os.path.dirname(__file__)+'/timesplit_model'
  2277. if self.inputs_code is None:
  2278. log("get model of time")
  2279. with self.sess.as_default():
  2280. with self.sess.graph.as_default():
  2281. meta_graph_def = tf.saved_model.loader.load(self.sess, tags=["serve"], export_dir=model_path)
  2282. signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
  2283. signature_def = meta_graph_def.signature_def
  2284. self.inputs_code = []
  2285. self.inputs_code.append(
  2286. self.sess.graph.get_tensor_by_name(signature_def[signature_key].inputs["input0"].name))
  2287. self.inputs_code.append(
  2288. self.sess.graph.get_tensor_by_name(signature_def[signature_key].inputs["input1"].name))
  2289. self.outputs_code = self.sess.graph.get_tensor_by_name(signature_def[signature_key].outputs["outputs"].name)
  2290. return self.inputs_code, self.outputs_code
  2291. else:
  2292. return self.inputs_code, self.outputs_code
  2293. def search_time_data(self,list_sentences,list_entitys):
  2294. data_x = []
  2295. points_entitys = []
  2296. for list_sentence, list_entity in zip(list_sentences, list_entitys):
  2297. p_entitys = 0
  2298. p_sentences = 0
  2299. list_sentence.sort(key=lambda x: x.sentence_index)
  2300. while(p_entitys<len(list_entity)):
  2301. entity = list_entity[p_entitys]
  2302. if entity.entity_type in ['time']:
  2303. while(p_sentences<len(list_sentence)):
  2304. sentence = list_sentence[p_sentences]
  2305. if entity.doc_id == sentence.doc_id and entity.sentence_index == sentence.sentence_index:
  2306. # left = sentence.sentence_text[max(0,entity.wordOffset_begin-self.input_shape[1]):entity.wordOffset_begin]
  2307. # right = sentence.sentence_text[entity.wordOffset_end:entity.wordOffset_end+self.input_shape[1]]
  2308. s = spanWindow(tokens=sentence.tokens,begin_index=entity.begin_index,end_index=entity.end_index,size=self.input_shape[1])
  2309. left = s[0]
  2310. right = s[1]
  2311. context = [left, right]
  2312. x = self.embedding_words(context, shape=self.input_shape)
  2313. data_x.append(x)
  2314. points_entitys.append(entity)
  2315. break
  2316. p_sentences += 1
  2317. p_entitys += 1
  2318. if len(points_entitys)==0:
  2319. return None
  2320. data_x = np.transpose(np.array(data_x), (1, 0, 2, 3))
  2321. return [data_x, points_entitys]
  2322. def embedding_words(self, datas, shape):
  2323. '''
  2324. @summary:查找词汇对应的词向量
  2325. @param:
  2326. datas:词汇的list
  2327. shape:结果的shape
  2328. @return: array,返回对应shape的词嵌入
  2329. '''
  2330. model_w2v = getModel_w2v()
  2331. embed = np.zeros(shape)
  2332. length = shape[1]
  2333. out_index = 0
  2334. for data in datas:
  2335. index = 0
  2336. for item in data:
  2337. item_not_space = re.sub("\s*", "", item)
  2338. if index >= length:
  2339. break
  2340. if item_not_space in model_w2v.vocab:
  2341. embed[out_index][index] = model_w2v[item_not_space]
  2342. index += 1
  2343. else:
  2344. embed[out_index][index] = model_w2v['unk']
  2345. index += 1
  2346. out_index += 1
  2347. return embed
  2348. def predict(self, list_sentences,list_entitys):
  2349. datas = self.search_time_data(list_sentences, list_entitys)
  2350. if datas is None:
  2351. return
  2352. points_entitys = datas[1]
  2353. with self.sess.as_default():
  2354. predict_y = limitRun(self.sess,[self.outputs_code], feed_dict={self.inputs_code[0]:datas[0][0]
  2355. ,self.inputs_code[1]:datas[0][1]})[0]
  2356. for i in range(len(predict_y)):
  2357. entity = points_entitys[i]
  2358. label = np.argmax(predict_y[i])
  2359. values = []
  2360. for item in predict_y[i]:
  2361. values.append(item)
  2362. if label != 0:
  2363. if not timeFormat(entity.entity_text):
  2364. label = 0
  2365. values[0] = 0.5
  2366. entity.set_Role(label, values)
  2367. # 产品字段提取
  2368. class ProductPredictor():
  2369. def __init__(self,config=None):
  2370. vocabpath = os.path.dirname(__file__) + "/codename_vocab.pk"
  2371. self.vocab = load(vocabpath)
  2372. self.word2index = dict((w, i) for i, w in enumerate(np.array(self.vocab)))
  2373. self.sess = tf.Session(graph=tf.Graph(),config=config)
  2374. self.load_model()
  2375. def load_model(self):
  2376. # model_path = os.path.dirname(__file__)+'/product_savedmodel/product.pb'
  2377. model_path = os.path.dirname(__file__)+'/product_savedmodel/productAndfailreason.pb'
  2378. with self.sess.as_default():
  2379. with self.sess.graph.as_default():
  2380. output_graph_def = tf.GraphDef()
  2381. with open(model_path, 'rb') as f:
  2382. output_graph_def.ParseFromString(f.read())
  2383. tf.import_graph_def(output_graph_def, name='')
  2384. self.sess.run(tf.global_variables_initializer())
  2385. self.char_input = self.sess.graph.get_tensor_by_name('CharInputs:0')
  2386. self.length = self.sess.graph.get_tensor_by_name("Sum:0")
  2387. self.dropout = self.sess.graph.get_tensor_by_name("Dropout:0")
  2388. self.logit = self.sess.graph.get_tensor_by_name("logits/Reshape:0")
  2389. self.tran = self.sess.graph.get_tensor_by_name("crf_loss/transitions:0")
  2390. def decode(self,logits, lengths, matrix):
  2391. paths = []
  2392. small = -1000.0
  2393. # start = np.asarray([[small] * 4 + [0]])
  2394. start = np.asarray([[small]*7+[0]])
  2395. for score, length in zip(logits, lengths):
  2396. score = score[:length]
  2397. pad = small * np.ones([length, 1])
  2398. logits = np.concatenate([score, pad], axis=1)
  2399. logits = np.concatenate([start, logits], axis=0)
  2400. path, _ = viterbi_decode(logits, matrix)
  2401. paths.append(path[1:])
  2402. return paths
  2403. def predict(self, list_sentences,list_entitys=None,list_articles=[], fail=False, MAX_AREA=5000):
  2404. '''
  2405. 预测实体代码,每个句子最多取MAX_AREA个字,超过截断
  2406. :param list_sentences: 多篇公告句子列表,[[一篇公告句子列表],[公告句子列表]]
  2407. :param list_entitys: 多篇公告实体列表
  2408. :param MAX_AREA: 每个句子最多截取多少字
  2409. :return: 把预测出来的实体放进实体类
  2410. '''
  2411. with self.sess.as_default() as sess:
  2412. with self.sess.graph.as_default():
  2413. result = []
  2414. product_list = []
  2415. if fail and list_articles!=[]:
  2416. text_list = [list_articles[0].content[:MAX_AREA]]
  2417. chars = [[self.word2index.get(it, self.word2index.get('<unk>')) for it in text] for text in text_list]
  2418. if USE_API:
  2419. requests_result = requests.post(API_URL + "/predict_product",
  2420. json={"inputs": chars}, verify=True)
  2421. batch_paths = json.loads(requests_result.text)['result']
  2422. lengths = json.loads(requests_result.text)['lengths']
  2423. else:
  2424. lengths, scores, tran_ = sess.run([self.length, self.logit, self.tran],
  2425. feed_dict={
  2426. self.char_input: np.asarray(chars),
  2427. self.dropout: 1.0
  2428. })
  2429. batch_paths = self.decode(scores, lengths, tran_)
  2430. for text, path, length in zip(text_list, batch_paths, lengths):
  2431. tags = ''.join([str(it) for it in path[:length]])
  2432. # 提取产品
  2433. for it in re.finditer("12*3", tags):
  2434. start = it.start()
  2435. end = it.end()
  2436. _entity = Entity(doc_id=list_articles[0].id, entity_id="%s_%s_%s_%s" % (
  2437. list_articles[0].doc_id, 0, start, end),
  2438. entity_text=text[start:end],
  2439. entity_type="product", sentence_index=0,
  2440. begin_index=0, end_index=0, wordOffset_begin=start,
  2441. wordOffset_end=end)
  2442. list_entitys[0].append(_entity)
  2443. product_list.append(text[start:end])
  2444. # 提取失败原因
  2445. for it in re.finditer("45*6", tags):
  2446. start = it.start()
  2447. end = it.end()
  2448. result.append(text[start:end].replace('?', '').strip())
  2449. reasons = []
  2450. for it in result:
  2451. if "(√)" in it or "(√)" in it:
  2452. reasons = [it]
  2453. break
  2454. if reasons != [] and (it not in reasons[-1] and it not in reasons):
  2455. reasons.append(it)
  2456. elif reasons == []:
  2457. reasons.append(it)
  2458. if reasons == []: # 如果模型识别不到失败原因 就用规则补充
  2459. for text in text_list:
  2460. ser1 = re.search('\w{,4}(理由|原因):\s*((第\d+包|标项\d+|原因类型)?[::]?[\s*\w,]{2,30}((不满?足|少于|未达)((法定)?[123一二三两]家|(规定)?要求)|(项目|采购)(终止|废标)),?)+',text)
  2461. ser2 = re.search(
  2462. '\w{,4}(理由|原因):\s*(第\d+包|标项\d+|原因类型)?[::]?[\s*\w]{4,30},', text)
  2463. if ser1:
  2464. reasons.append(ser1.group(0))
  2465. break
  2466. elif ser2:
  2467. reasons.append(ser2.group(0))
  2468. break
  2469. return {'fail_reason':';'.join(reasons)}, product_list
  2470. if list_entitys is None:
  2471. list_entitys = [[] for _ in range(len(list_sentences))]
  2472. for list_sentence, list_entity in zip(list_sentences,list_entitys):
  2473. if len(list_sentence)==0:
  2474. result.append({"product":[]})
  2475. continue
  2476. list_sentence.sort(key=lambda x:len(x.sentence_text), reverse=True)
  2477. _begin_index = 0
  2478. item = {"product":[]}
  2479. temp_list = []
  2480. while True:
  2481. MAX_LEN = len(list_sentence[_begin_index].sentence_text)
  2482. if MAX_LEN > MAX_AREA:
  2483. MAX_LEN = MAX_AREA
  2484. _LEN = MAX_AREA//MAX_LEN
  2485. chars = [sentence.sentence_text[:MAX_LEN] for sentence in list_sentence[_begin_index:_begin_index+_LEN]]
  2486. chars = [[self.word2index.get(it, self.word2index.get('<unk>')) for it in l] for l in chars]
  2487. chars = pad_sequences(chars, maxlen=MAX_LEN, padding="post", truncating="post")
  2488. if USE_API:
  2489. requests_result = requests.post(API_URL + "/predict_product",
  2490. json={"inputs": chars.tolist()}, verify=True)
  2491. batch_paths = json.loads(requests_result.text)['result']
  2492. lengths = json.loads(requests_result.text)['lengths']
  2493. else:
  2494. lengths, scores, tran_ = sess.run([self.length, self.logit, self.tran],
  2495. feed_dict={
  2496. self.char_input: np.asarray(chars),
  2497. self.dropout: 1.0
  2498. })
  2499. batch_paths = self.decode(scores, lengths, tran_)
  2500. for sentence, path, length in zip(list_sentence[_begin_index:_begin_index+_LEN],batch_paths, lengths):
  2501. tags = ''.join([str(it) for it in path[:length]])
  2502. for it in re.finditer("12*3", tags):
  2503. start = it.start()
  2504. end = it.end()
  2505. _entity = Entity(doc_id=sentence.doc_id, entity_id="%s_%s_%s_%s" % (
  2506. sentence.doc_id, sentence.sentence_index, start, end),
  2507. entity_text=sentence.sentence_text[start:end],
  2508. entity_type="product", sentence_index=sentence.sentence_index,
  2509. begin_index=0, end_index=0, wordOffset_begin=start,
  2510. wordOffset_end=end,in_attachment=sentence.in_attachment)
  2511. list_entity.append(_entity)
  2512. temp_list.append(sentence.sentence_text[start:end])
  2513. product_list.append(sentence.sentence_text[start:end])
  2514. # item["product"] = list(set(temp_list))
  2515. # result.append(item)
  2516. if _begin_index+_LEN >= len(list_sentence):
  2517. break
  2518. _begin_index += _LEN
  2519. item["product"] = list(set(temp_list))
  2520. result.append(item) # 修正bug
  2521. return {'fail_reason': ""},product_list
  2522. # 产品数量单价品牌规格提取 #2021/11/10 添加表格中的项目、需求、预算、时间要素提取
  2523. class ProductAttributesPredictor():
  2524. def __init__(self,):
  2525. self.p0 = '(类别|类型|物类|目录|类目|分类)(名称|$)|^品名|^品类|^品目|(标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)(名称|内容|描述)'
  2526. self.p1 = '(标的|维修|系统|报价构成|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品?|采购|物装|配件|资产|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|品目|^品名|气体)[\))的]?([、\w]{,4}名称|内容|描述)'
  2527. self.p2 = '标的|标项|项目$|商品|产品|物料|物资|货物|设备|采购品|采购条目|物品|材料|印刷品|物装|配件|资产|招标内容|耗材|清单|器材|仪器|器械|备件|拍卖物|标的物|物件|药品|药材|药械|货品|食品|食材|菜名|^品目$|^品名$|^名称|^内容$|(标项|分项|项目|计划|包组|标段|[分子]?包|子目|服务|招标|中标|成交|工程|招标内容)(名称|内容|描述)'
  2528. # self.p1 = '(设备|货物|商品|产品|物品|货品|材料|物资|物料|物件|耗材|备件|食材|食品|品目|标的|标的物|标项|资产|拍卖物|仪器|器材|器械|药械|药品|药材|采购品?|项目|招标|工程|服务)[\))]?(名称|内容|描述)'
  2529. # self.p2 = '设备|货物|商品|产品|物品|货品|材料|物资|物料|物件|耗材|备件|食材|食品|品目|标的|标的物|资产|拍卖物|仪器|器材|器械|药械|药品|药材|采购品|项目|品名|菜名|内容|名称'
  2530. with open(os.path.dirname(__file__)+'/header_set.pkl', 'rb') as f:
  2531. self.header_set = pickle.load(f)
  2532. self.tb = TableTag2List()
  2533. def isTrueTable(self, table):
  2534. '''真假表格规则:
  2535. 1、包含<caption>或<th>标签为真
  2536. 2、包含大量链接、表单、图片或嵌套表格为假
  2537. 3、表格尺寸太小为假
  2538. 4、外层<table>嵌套子<table>,一般子为真,外为假'''
  2539. if table.find_all(['caption', 'th']) != []:
  2540. return True
  2541. elif len(table.find_all(['form', 'a', 'img'])) > 5:
  2542. # print('过滤表格:包含链接图片等大于5的为假表格')
  2543. return False
  2544. elif len(table.find_all(['tr'])) < 2:
  2545. # print('过滤表格:行数小于2的为假表格')
  2546. return False
  2547. elif len(table.find_all(['table'])) >= 1:
  2548. # print('过滤表格:包含多个表格的为假表格')
  2549. return False
  2550. else:
  2551. return True
  2552. def getTrs(self, tbody):
  2553. # 获取所有的tr
  2554. trs = []
  2555. objs = tbody.find_all(recursive=False)
  2556. for obj in objs:
  2557. if obj.name == "tr":
  2558. trs.append(obj)
  2559. if obj.name == "tbody":
  2560. for tr in obj.find_all("tr", recursive=False):
  2561. trs.append(tr)
  2562. return trs
  2563. def getTable(self, tbody):
  2564. trs = self.getTrs(tbody)
  2565. inner_table = []
  2566. if len(trs) < 2:
  2567. return inner_table
  2568. for tr in trs:
  2569. tr_line = []
  2570. tds = tr.findChildren(['td', 'th'], recursive=False)
  2571. if len(tds) < 2:
  2572. continue
  2573. for td in tds:
  2574. # td_text = re.sub('\s+|…', ' ', td.get_text()).strip()
  2575. td_text = re.sub('…', '', td.get_text()).strip()
  2576. td_text = td_text.replace("\x06", "").replace("\x05", "").replace("\x07", "").replace('\\', '/').replace('"', '') # 修复272144312 # 产品单价数量提取结果有特殊符号\ 气动执行装置备件\密封组件\NBR+PT
  2577. td_text = td_text.replace("(", "(").replace(")", ")").replace(':', ':')
  2578. tr_line.append(td_text)
  2579. inner_table.append(tr_line)
  2580. return inner_table
  2581. def fixSpan(self, tbody):
  2582. # 处理colspan, rowspan信息补全问题
  2583. trs = self.getTrs(tbody)
  2584. ths_len = 0
  2585. ths = list()
  2586. trs_set = set()
  2587. # 修改为先进行列补全再进行行补全,否则可能会出现表格解析混乱
  2588. # 遍历每一个tr
  2589. for indtr, tr in enumerate(trs):
  2590. ths_tmp = tr.findChildren('th', recursive=False)
  2591. # 不补全含有表格的tr
  2592. if len(tr.findChildren('table')) > 0:
  2593. continue
  2594. if len(ths_tmp) > 0:
  2595. ths_len = ths_len + len(ths_tmp)
  2596. for th in ths_tmp:
  2597. ths.append(th)
  2598. trs_set.add(tr)
  2599. # 遍历每行中的element
  2600. tds = tr.findChildren(recursive=False)
  2601. if len(tds) < 3:
  2602. continue # 列数太少的不补全
  2603. for indtd, td in enumerate(tds):
  2604. # 若有colspan 则补全同一行下一个位置
  2605. if 'colspan' in td.attrs and str(re.sub("[^0-9]", "", str(td['colspan']))) != "":
  2606. col = int(re.sub("[^0-9]", "", str(td['colspan'])))
  2607. if col < 10 and len(td.get_text()) < 500:
  2608. td['colspan'] = 1
  2609. for i in range(1, col, 1):
  2610. td.insert_after(copy.copy(td))
  2611. for indtr, tr in enumerate(trs):
  2612. ths_tmp = tr.findChildren('th', recursive=False)
  2613. # 不补全含有表格的tr
  2614. if len(tr.findChildren('table')) > 0:
  2615. continue
  2616. if len(ths_tmp) > 0:
  2617. ths_len = ths_len + len(ths_tmp)
  2618. for th in ths_tmp:
  2619. ths.append(th)
  2620. trs_set.add(tr)
  2621. # 遍历每行中的element
  2622. tds = tr.findChildren(recursive=False)
  2623. same_span = 0
  2624. if len(tds) > 1 and 'rowspan' in tds[0].attrs:
  2625. span0 = tds[0].attrs['rowspan']
  2626. for td in tds:
  2627. if 'rowspan' in td.attrs and td.attrs['rowspan'] == span0:
  2628. same_span += 1
  2629. if same_span == len(tds):
  2630. continue
  2631. for indtd, td in enumerate(tds):
  2632. # 若有rowspan 则补全下一行同样位置
  2633. if 'rowspan' in td.attrs and str(re.sub("[^0-9]", "", str(td['rowspan']))) != "":
  2634. row = int(re.sub("[^0-9]", "", str(td['rowspan'])))
  2635. td['rowspan'] = 1
  2636. for i in range(1, row, 1):
  2637. # 获取下一行的所有td, 在对应的位置插入
  2638. if indtr + i < len(trs):
  2639. tds1 = trs[indtr + i].findChildren(['td', 'th'], recursive=False)
  2640. if len(tds1) >= (indtd) and len(tds1) > 0:
  2641. if indtd > 0:
  2642. tds1[indtd - 1].insert_after(copy.copy(td))
  2643. else:
  2644. tds1[0].insert_before(copy.copy(td))
  2645. elif len(tds1) > 0 and len(tds1) == indtd - 1:
  2646. tds1[indtd - 2].insert_after(copy.copy(td))
  2647. def get_monthlen(self, year, month):
  2648. '''输入年份、月份 int类型 得到该月份天数'''
  2649. try:
  2650. weekday, num = calendar.monthrange(int(year), int(month))
  2651. except:
  2652. num = 30
  2653. return str(num)
  2654. def fix_time(self, text, html, page_time):
  2655. '''输入日期字段返回格式化日期'''
  2656. for it in [('十二', '12'),('十一', '11'),('十','10'),('九','9'),('八','8'),('七','7'),
  2657. ('六','6'),('五','5'),('四','4'),('三','3'),('二','2'),('一','1')]:
  2658. if it[0] in text:
  2659. text = text.replace(it[0], it[1])
  2660. if re.search('^\d{1,2}月$', text):
  2661. m = re.search('^(\d{1,2})月$', text).group(1)
  2662. if len(m) < 2:
  2663. m = '0' + m
  2664. year = re.search('(\d{4})年(.{,12}采购意向)?', html)
  2665. if year:
  2666. y = year.group(1)
  2667. num = self.get_monthlen(y, m)
  2668. if len(num) < 2:
  2669. num = '0' + num
  2670. order_begin = "%s-%s-01" % (y, m)
  2671. order_end = "%s-%s-%s" % (y, m, num)
  2672. elif page_time != "":
  2673. year = re.search('\d{4}', page_time)
  2674. if year:
  2675. y = year.group(0)
  2676. num = self.get_monthlen(y, m)
  2677. if len(num) < 2:
  2678. num = '0' + num
  2679. order_begin = "%s-%s-01" % (y, m)
  2680. order_end = "%s-%s-%s" % (y, m, num)
  2681. else:
  2682. y = str(datetime.datetime.now().year)
  2683. num = self.get_monthlen(y, m)
  2684. if len(num) < 2:
  2685. num = '0' + num
  2686. order_begin = "%s-%s-01" % (y, m)
  2687. order_end = "%s-%s-%s" % (y, m, num)
  2688. else:
  2689. y = str(datetime.datetime.now().year)
  2690. num = self.get_monthlen(y, m)
  2691. if len(num) < 2:
  2692. num = '0' + num
  2693. order_begin = "%s-%s-01" % (y, m)
  2694. order_end = "%s-%s-%s" % (y, m, num)
  2695. return order_begin, order_end
  2696. t1 = re.search('^(\d{4})(年|/|\.|-)(\d{1,2})月?$', text)
  2697. if t1:
  2698. year = t1.group(1)
  2699. month = t1.group(3)
  2700. num = self.get_monthlen(year, month)
  2701. if len(month)<2:
  2702. month = '0'+month
  2703. if len(num) < 2:
  2704. num = '0'+num
  2705. order_begin = "%s-%s-01" % (year, month)
  2706. order_end = "%s-%s-%s" % (year, month, num)
  2707. return order_begin, order_end
  2708. t2 = re.search('^(\d{4})(年|/|\.|-)(\d{1,2})(月|/|\.|-)(\d{1,2})日?$', text)
  2709. if t2:
  2710. y = t2.group(1)
  2711. m = t2.group(3)
  2712. d = t2.group(5)
  2713. m = '0'+ m if len(m)<2 else m
  2714. d = '0'+d if len(d)<2 else d
  2715. order_begin = order_end = "%s-%s-%s"%(y,m,d)
  2716. return order_begin, order_end
  2717. # 时间样式:"202105"
  2718. t3 = re.search("^(20\d{2})(\d{1,2})$",text)
  2719. if t3:
  2720. year = t3.group(1)
  2721. month = t3.group(2)
  2722. if int(month)>0 and int(month)<=12:
  2723. num = self.get_monthlen(year, month)
  2724. if len(month) < 2:
  2725. month = '0' + month
  2726. if len(num) < 2:
  2727. num = '0' + num
  2728. order_begin = "%s-%s-01" % (year, month)
  2729. order_end = "%s-%s-%s" % (year, month, num)
  2730. return order_begin, order_end
  2731. # 时间样式:"20210510"
  2732. t4 = re.search("^(20\d{2})(\d{2})(\d{2})$", text)
  2733. if t4:
  2734. year = t4.group(1)
  2735. month = t4.group(2)
  2736. day = t4.group(3)
  2737. if int(month) > 0 and int(month) <= 12 and int(day)>0 and int(day)<=31:
  2738. order_begin = order_end = "%s-%s-%s"%(year,month,day)
  2739. return order_begin, order_end
  2740. all_match = re.finditer('^(?P<y1>\d{4})(年|/|\.)(?P<m1>\d{1,2})(?:(月|/|\.)(?:(?P<d1>\d{1,2})日)?)?'
  2741. '(到|至|-)(?:(?P<y2>\d{4})(年|/|\.))?(?P<m2>\d{1,2})(?:(月|/|\.)'
  2742. '(?:(?P<d2>\d{1,2})日)?)?$', text)
  2743. y1 = m1 = d1 = y2 = m2 = d2 = ""
  2744. found_math = False
  2745. for _match in all_match:
  2746. if len(_match.group()) > 0:
  2747. found_math = True
  2748. for k, v in _match.groupdict().items():
  2749. if v!="" and v is not None:
  2750. if k == 'y1':
  2751. y1 = v
  2752. elif k == 'm1':
  2753. m1 = v
  2754. elif k == 'd1':
  2755. d1 = v
  2756. elif k == 'y2':
  2757. y2 = v
  2758. elif k == 'm2':
  2759. m2 = v
  2760. elif k == 'd2':
  2761. d2 = v
  2762. if not found_math:
  2763. return "", ""
  2764. y2 = y1 if y2 == "" else y2
  2765. d1 = '1' if d1 == "" else d1
  2766. d2 = self.get_monthlen(y2, m2) if d2 == "" else d2
  2767. m1 = '0' + m1 if len(m1) < 2 else m1
  2768. m2 = '0' + m2 if len(m2) < 2 else m2
  2769. d1 = '0' + d1 if len(d1) < 2 else d1
  2770. d2 = '0' + d2 if len(d2) < 2 else d2
  2771. order_begin = "%s-%s-%s"%(y1,m1,d1)
  2772. order_end = "%s-%s-%s"%(y2,m2,d2)
  2773. return order_begin, order_end
  2774. def fix_quantity(self, quantity_text, header_quan_unit):
  2775. '''
  2776. 产品数量标准化,统一为数值型字符串
  2777. :param quantity_text: 原始数量字符串
  2778. :param header_quan_unit: 表头数量单位字符串
  2779. :return: 返回数量及单位
  2780. '''
  2781. quantity = quantity_text
  2782. quantity = re.sub('[一壹]', '1', quantity)
  2783. quantity = re.sub('[,,约]|(\d+)', '', quantity)
  2784. ser = re.search('^(\d+\.?\d*)(?([㎡\w/]{,5})', quantity)
  2785. if ser:
  2786. quantity = str(ser.group(1))
  2787. quantity_unit = ser.group(2)
  2788. if quantity_unit == "" and header_quan_unit != "":
  2789. quantity_unit = header_quan_unit
  2790. else:
  2791. quantity = ""
  2792. quantity_unit = ""
  2793. return quantity, quantity_unit
  2794. def find_header(self, items,p0, p1, p2):
  2795. '''
  2796. inner_table 每行正则检查是否为表头,是则返回表头所在列序号,及表头内容
  2797. :param items: 列表,内容为每个td 文本内容
  2798. :param p1: 优先表头正则
  2799. :param p2: 第二表头正则
  2800. :return: 表头所在列序号,是否表头,表头内容
  2801. '''
  2802. items = [re.sub('\s', '', it) for it in items]
  2803. flag = False
  2804. header_dic = {'名称': '', '数量': '', '单位': '', '单价': '', '品牌': '', '规格': '', '需求': '', '预算': '', '时间': '', '总价': '', '品目': '', '参数': '', '采购人':'', '备注':'','发布日期':''}
  2805. product = "" # 产品
  2806. quantity = "" # 数量
  2807. quantity_unit = "" # 数量单位
  2808. unitPrice = "" # 单价
  2809. brand = "" # 品牌
  2810. specs = "" # 规格
  2811. demand = "" # 采购需求
  2812. budget = "" # 预算金额
  2813. order_time = "" # 采购时间
  2814. total_price = "" # 总价
  2815. category = "" # 品目
  2816. parameter = "" # 参数
  2817. tenderee = "" # 采购人
  2818. notes = "" # 备注 2024/3/27 达仁 需求
  2819. issue_date = "" # 发布日期 2024/3/27 达仁 需求
  2820. # for i in range(min(6, len(items))):
  2821. for i in range(len(items)):
  2822. it = items[i]
  2823. if len(it) < 15 and re.search(p0, it) != None:
  2824. flag = True
  2825. if category != "" and category != it:
  2826. continue
  2827. category = it
  2828. header_dic['品目'] = i
  2829. elif len(it) < 15 and re.search(p1, it) != None:
  2830. flag = True
  2831. if product !='' and product != it:
  2832. break
  2833. product = it
  2834. header_dic['名称'] = i
  2835. # break
  2836. # if not flag:
  2837. if product == "":
  2838. # for i in range(min(4, len(items))):
  2839. for i in range(len(items)):
  2840. it = items[i]
  2841. if len(it) < 15 and it != category and re.search(p2, it) and (re.search('^名称|^品名|^品目', it) or re.search(
  2842. '编号|编码|号|情况|报名|单位|位置|地址|数量|单价|价格|金额|品牌|规格类型|型号|公司|中标人|企业|供应商|候选人', it) == None):
  2843. flag = True
  2844. product = it
  2845. header_dic['名称'] = i
  2846. break
  2847. if flag == False and len(items)>3 and re.search('^第[一二三四五六七八九十](包|标段)$', items[0]):
  2848. product = items[0]
  2849. header_dic['名称'] = 0
  2850. flag = True
  2851. if flag:
  2852. # for j in range(i + 1, len(items)):
  2853. for j in range(len(items)):
  2854. if items[j] in [product, category]:
  2855. continue
  2856. if len(items[j]) > 20 and len(re.sub('[\((].*[)\)]|[^\u4e00-\u9fa5]', '', items[j])) > 10:
  2857. continue
  2858. if header_dic['数量']=="" and re.search('数量|采购量', items[j]) and re.search('单价|用途|要求|规格|型号|运输|承运', items[j])==None:
  2859. header_dic['数量'] = j
  2860. quantity = items[j]
  2861. elif header_dic['单位']=="" and re.search('^(数量单位|计量单位|单位)$', items[j]):
  2862. header_dic['单位'] = j
  2863. quantity_unit = items[j]
  2864. elif re.search('单价', items[j]) and re.search('数量|规格|型号|品牌|供应商', items[j])==None:
  2865. header_dic['单价'] = j
  2866. unitPrice = items[j]
  2867. elif re.search('品牌', items[j]):
  2868. header_dic['品牌'] = j
  2869. brand = items[j]
  2870. elif re.search('规格|型号', items[j]):
  2871. header_dic['规格'] = j
  2872. specs = items[j]
  2873. elif re.search('参数', items[j]):
  2874. header_dic['参数'] = j
  2875. parameter = items[j]
  2876. elif re.search('预算单位|(采购|招标|购买)(单位|人|方|主体)|项目业主|采购商|申购单位|需求单位|业主单位',items[j]) and len(items[j])<=8:
  2877. header_dic['采购人'] = j
  2878. tenderee = items[j]
  2879. elif re.search('需求|服务要求|服务标准', items[j]):
  2880. header_dic['需求'] = j
  2881. demand = items[j]
  2882. elif re.search('预算|控制金额', items[j]) and not re.search('预算单位',items[j]):
  2883. header_dic['预算'] = j
  2884. budget = items[j]
  2885. elif re.search('时间|采购实施月份|采购月份|采购日期', items[j]):
  2886. header_dic['时间'] = j
  2887. order_time = items[j]
  2888. elif re.search('总价|(成交|中标|验收|合同|预算|控制|总|合计))?([金总]额|价格?)|最高限价|价格|金额', items[j]) and re.search('数量|规格|型号|品牌|供应商', items[j])==None:
  2889. header_dic['总价'] = j
  2890. total_price = items[j]
  2891. elif re.search('^备\s*注$|资质要求|预留面向中小企业|是否适宜中小企业采购预算预留|公开征集信息', items[j]):
  2892. header_dic['备注'] = j
  2893. notes = items[j]
  2894. elif re.search('^\w{,4}发布(时间|日期)$', items[j]):
  2895. header_dic['发布日期'] = j
  2896. issue_date = items[j]
  2897. if header_dic.get('名称', "") != "" or header_dic.get('品目', "") != "":
  2898. # num = 0
  2899. # for it in (quantity, unitPrice, brand, specs, product, demand, budget, order_time, total_price):
  2900. # if it != "":
  2901. # num += 1
  2902. # if num >=2:
  2903. # return header_dic, flag, (product, quantity, quantity_unit, unitPrice, brand, specs, total_price, category, parameter), (product, demand, budget, order_time)
  2904. if set([quantity, brand, specs, unitPrice, total_price])!=set([""]) or set([demand, budget])!=set([""]):
  2905. return header_dic, flag, (product, quantity, quantity_unit, unitPrice, brand, specs, total_price, category, parameter), (product, demand, budget, order_time,tenderee, notes,issue_date)
  2906. flag = False
  2907. return header_dic, flag, (product, quantity, quantity_unit, unitPrice, brand, specs, total_price, category, parameter), (product, demand, budget, order_time,tenderee,notes,issue_date)
  2908. def predict(self, docid='', html='', page_time=""):
  2909. '''
  2910. 正则寻找table表格内 产品相关信息
  2911. :param html:公告HTML原文
  2912. :return:公告表格内 产品、数量、单价、品牌、规格 ,表头,表头列等信息
  2913. '''
  2914. html = html.replace('<br>', '\n').replace('<br/>', '\n')
  2915. html = re.sub("<html>|</html>|<body>|</body>","",html)
  2916. html = re.sub("##attachment##","",html)
  2917. soup = BeautifulSoup(html, 'lxml')
  2918. # flag_yx = True if re.search('采购意向', html) else False
  2919. flag_yx = True if re.search('采购意向|招标意向|选取意向|意向公告|意向公示|意向公开', html) else False
  2920. tables = soup.find_all(['table'])
  2921. headers = []
  2922. headers_demand = []
  2923. header_col = []
  2924. product_link = []
  2925. demand_link = []
  2926. product_set = set()
  2927. total_product_money = 0
  2928. unit_price_list = [] # 单价列表,用于判断是否重复单价,避免多个表格重复提取造成合计产品价格错误。
  2929. total_price_list = [] # 总价列表,拥有判断是否为几行产品合计总价
  2930. # print('表格数:', len(tables))
  2931. for i in range(len(tables)): # (len(tables)-1, -1, -1) 由从最后到前改为 前到后
  2932. table = tables[i]
  2933. if table.parent.name == 'td' and len(table.find_all('td')) <= 3:
  2934. table.string = table.get_text()
  2935. table.name = 'turntable'
  2936. # print('过滤表格:表格父节点为td,且表格td数量小于等于3')
  2937. continue
  2938. if not self.isTrueTable(table):
  2939. continue
  2940. # self.fixSpan(table)
  2941. # inner_table = self.getTable(table)
  2942. inner_table = self.tb.table2list(table)
  2943. table.extract()
  2944. # print(inner_table)
  2945. i = 0
  2946. found_header = False
  2947. header_quan_unit = "" # 数量表头 包含单位
  2948. header_colnum = 0
  2949. if flag_yx:
  2950. # print('意向公告, 提取意向信息')
  2951. col0_l = []
  2952. col1_l = []
  2953. for tds in inner_table:
  2954. if len(tds) == 2:
  2955. col0_l.append(re.sub('[::]', '', tds[0])) # 处理只有两列的情况
  2956. col1_l.append(tds[1])
  2957. elif len(tds)>=4 and len(inner_table)==2: # 处理只有两行的情况
  2958. col0_l = inner_table[0]
  2959. col1_l = inner_table[1]
  2960. break
  2961. # print(set(col0_l))
  2962. # print('head: ',set(col0_l) & self.header_set)
  2963. if len(set(col0_l) & self.header_set) > len(col0_l) * 0.2 and len(col0_l)==len(col1_l): # 保证两个列数一致
  2964. header_list2 = []
  2965. product = demand = budget = order_begin = order_end = ""
  2966. tenderee = ""
  2967. notes = ''
  2968. issue_date = ''
  2969. for i in range(len(col0_l)):
  2970. if re.search('项目名称', col0_l[i]):
  2971. header_list2.append(col0_l[i])
  2972. product = col1_l[i]
  2973. elif re.search('采购需求|需求概况', col0_l[i]):
  2974. header_list2.append(col0_l[i])
  2975. demand = col1_l[i]
  2976. elif re.search('采购预算|预算金额|控制金额', col0_l[i]):
  2977. header_list2.append(col0_l[i])
  2978. _budget = col1_l[i]
  2979. re_price = re.findall("[零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]{3,}|\d[\d,]*(?:\.\d+)?万?", _budget)
  2980. if re_price:
  2981. # _budget = re_price[0]
  2982. # if '万元' in col0_l[i] and '万' not in _budget:
  2983. # _budget += '万元'
  2984. # budget = str(getUnifyMoney(_budget))
  2985. _budget, _money_unit = money_process(_budget, col0_l[i])
  2986. budget = str(_budget)
  2987. if '.' in budget:
  2988. budget = budget.rstrip('0').rstrip('.')
  2989. if float(budget)>= 500*100000000:
  2990. budget = ""
  2991. elif re.search('预算单位|(采购|招标|购买)(单位|人|方|主体)|项目业主|采购商|申购单位|需求单位|业主单位', col0_l[i]):
  2992. header_list2.append(col0_l[i])
  2993. tenderee = re.sub("\s","",col1_l[i])
  2994. if len(tenderee) > 20:
  2995. tenderee = ""
  2996. elif re.search('采购时间|采购实施月份|采购月份|采购日期', col0_l[i]):
  2997. header_list2.append(col0_l[i])
  2998. order_time = col1_l[i].strip()
  2999. order_begin, order_end = self.fix_time(order_time, html, page_time)
  3000. elif re.search('^备\s*注$|资质要求|预留面向中小企业|是否适宜中小企业采购预算预留|公开征集信息', col0_l[i]):
  3001. header_list2.append(col0_l[i])
  3002. notes = col1_l[i].strip()
  3003. elif re.search('^\w{,4}发布(时间|日期)$', col0_l[i]):
  3004. header_list2.append(col0_l[i])
  3005. issue_date = self.fix_time(col1_l[i].strip(), '', '')[0]
  3006. if order_begin != "" and order_end!="":
  3007. order_begin_year = int(order_begin.split("-")[0])
  3008. order_end_year = int(order_end.split("-")[0])
  3009. # 限制附件错误识别时间
  3010. if order_begin_year>=2050 or order_end_year>=2050:
  3011. order_begin = order_end = ""
  3012. # print(product,demand,budget,order_begin)
  3013. if product!= "" and demand != "" and budget!="" and order_begin != "":
  3014. link = {'project_name': product, 'product': [], 'demand': demand, 'budget': budget,
  3015. 'order_begin': order_begin, 'order_end': order_end ,'tenderee':tenderee, 'notes':notes, 'issue_date':issue_date}
  3016. if link not in demand_link:
  3017. demand_link.append(link)
  3018. headers_demand.append('_'.join(header_list2))
  3019. continue
  3020. if len(inner_table)>3 and len(inner_table[0])==2 and len(inner_table[1])==2: # 只有两列且第一列为表头的,行列切换
  3021. col0_l = []
  3022. col1_l = []
  3023. for tds in inner_table:
  3024. if len(tds) == 2:
  3025. col0_l.append(re.sub('[::]', '', tds[0])) # 处理只有两列的情况
  3026. col1_l.append(tds[1])
  3027. else:
  3028. break
  3029. if len(set(col0_l) & self.header_set) > len(col0_l) * 0.5 and len(col0_l) == len(col1_l):
  3030. inner_table = [col0_l, col1_l]
  3031. elif len(inner_table)>2 and len(inner_table[0])==4 and len(inner_table[1])==4 and len(set(inner_table[0]) & self.header_set)==2: # 只有两列且第一列为表头的,行列切换
  3032. col0_l = []
  3033. col1_l = []
  3034. col2_l = []
  3035. col3_l = []
  3036. for tds in inner_table:
  3037. if len(tds) == 4 and len(set(tds))>2:
  3038. col0_l.append(re.sub('[::]', '', tds[0])) # 处理只有两列的情况
  3039. col1_l.append(tds[1])
  3040. col2_l.append(re.sub('[::]', '', tds[2])) # 处理只有两列的情况
  3041. col3_l.append(tds[3])
  3042. else:
  3043. break
  3044. if len(set(col0_l) & self.header_set) > len(col0_l) * 0.5 and len(set(col2_l) & self.header_set) > len(col2_l) * 0.5:
  3045. inner_table = [col0_l+col2_l, col1_l+col3_l]
  3046. while i < (len(inner_table)):
  3047. tds = inner_table[i]
  3048. not_empty = [it for it in tds if re.sub('\s', '', it) != ""]
  3049. if len(set(not_empty))<2 or len(set(tds))<2 or (len(set(tds))==2 and re.search('总计|合计|汇总', tds[0])): # 非空列或者不重复内容小于两列的 继续
  3050. i += 1
  3051. # print('表格产品提取:非空列或者不重复内容小于两列的 继续', i, tds)
  3052. continue
  3053. product = "" # 产品
  3054. quantity = "" # 数量
  3055. quantity_unit = "" # 数量单位
  3056. unitPrice = "" # 单价
  3057. brand = "" # 品牌
  3058. specs = "" # 规格
  3059. demand = "" # 采购需求
  3060. budget = "" # 预算金额
  3061. order_time = "" # 采购时间
  3062. order_begin = ""
  3063. order_end = ""
  3064. total_price = "" # 总金额
  3065. parameter = "" # 参数
  3066. tenderee = "" # 采购人
  3067. notes = '' # 备注
  3068. issue_date = '' # 发布日期
  3069. if len(set([re.sub('[::\s]','',td) for td in tds]) & self.header_set) > len(tds) * 0.4:
  3070. # if len(set(tds) & self.header_set) > len(tds) * 0.2:
  3071. header_dic, found_header, header_list, header_list2 = self.find_header(tds, self.p0, self.p1, self.p2)
  3072. if found_header:
  3073. header_colnum = len(tds) # 保存表头所在行列数
  3074. # print('发现表头:', header_colnum, header_dic)
  3075. if found_header and isinstance(header_list, tuple) and len(header_list) > 2: # 获取表头中的 数量单位
  3076. quantity_header = header_list[1].replace('单位:', '')
  3077. if re.search('(([\w/]{,5}))', quantity_header):
  3078. header_quan_unit = re.search('(([\w/]{,5}))', quantity_header).group(1)
  3079. else:
  3080. header_quan_unit = ""
  3081. if found_header and ('_'.join(header_list) not in headers or '_'.join(header_list2) not in headers_demand):# and len(headers)<1: # 只保留出现的第一个表头
  3082. headers.append('_'.join(header_list))
  3083. headers_demand.append('_'.join(header_list2))
  3084. header_col.append('_'.join(tds))
  3085. i += 1
  3086. # print('表头数量占行列数0.4倍不做内容匹配', set([re.sub('[::]','',td) for td in tds]) & self.header_set, tds)
  3087. continue
  3088. elif found_header:
  3089. if len(tds) > header_colnum or len(tds)-1<max([it for it in header_dic.values() if it!=""]): # 表头、属性列数不一致跳过
  3090. i += 1
  3091. # print('表头、属性列数不一致跳过', len(tds), header_colnum, tds)
  3092. continue
  3093. id0 = header_dic.get('品目', "")
  3094. id1 = header_dic.get('名称', "")
  3095. id2 = header_dic.get('数量', "")
  3096. id2_2 = header_dic.get('单位', "")
  3097. id3 = header_dic.get('单价', "")
  3098. id4 = header_dic.get('品牌', "")
  3099. id5 = header_dic.get('规格', "")
  3100. id6 = header_dic.get('需求', "")
  3101. id7 = header_dic.get('预算', "")
  3102. id8 = header_dic.get('时间', "")
  3103. id9 = header_dic.get("总价", "")
  3104. id10 = header_dic.get('参数', "")
  3105. id11 = header_dic.get('采购人', "")
  3106. id12 = header_dic.get('备注', "")
  3107. id13 = header_dic.get('发布日期', "")
  3108. not_attr = 0
  3109. for k, v in header_dic.items():
  3110. if isinstance(v, int):
  3111. if v >= len(tds) or tds[v] in self.header_set:
  3112. # print('内容属性在表头集合里面', tds[v], v >= len(tds))
  3113. not_attr = 1
  3114. # break
  3115. if not_attr>=2: # 只要属性里面有两项为表头,停止匹配
  3116. i += 1
  3117. found_header = False
  3118. # print('只要属性里面有两项为表头,停止匹配')
  3119. continue
  3120. if id1!="" and re.search('[a-zA-Z\u4e00-\u9fa5]', tds[id1]) and tds[id1] not in self.header_set and \
  3121. re.search('备注|汇总|合计|总价|价格|金额|^详见|无$|xxx', tds[id1]) == None:
  3122. product = tds[id1]
  3123. if id0!="" and re.search('[a-zA-Z\u4e00-\u9fa5]', tds[id0]) and tds[id0] not in self.header_set and \
  3124. re.search('备注|汇总|合计|总价|价格|金额|^详见|无$|xxx', tds[id0]) == None:
  3125. category = tds[id0]
  3126. product = "%s_%s"%(category, product) if product!="" and product!=category else category
  3127. if product != "" and product not in ['工程类', '服务类', '货物类', '工程', '服务', '货物']:
  3128. # print('匹配产品内容: ', product)
  3129. if id2 != "":
  3130. if re.search('\d+|[壹贰叁肆伍陆柒捌玖拾一二三四五六七八九十]', tds[id2]):
  3131. # if re.search('(^\d{,3}(,?\d{3}){2,}(\.\d{2,7},?)$)|万?元', tds[id2]): # 254816100 这篇数量很大,貌似正常
  3132. # i += 1
  3133. # print('过滤:数量包含金额单位或值很大类似金额', tds[id2])
  3134. # continue
  3135. quantity = tds[id2]
  3136. elif re.search('\w{5,}', tds[id2]) and re.search('^详见|^详情', tds[id2])==None:
  3137. i += 1
  3138. # print('过滤:数量包含五个字符以上且不包含^详见|^详情等字符', tds[id2])
  3139. continue
  3140. if id2_2 != "":
  3141. if re.search('^\w{1,4}$', tds[id2_2]) and re.search('元', tds[id2_2])==None:
  3142. quantity_unit = tds[id2_2]
  3143. if id3 != "":
  3144. if re.search('[零壹贰叁肆伍陆柒捌玖拾佰仟萬億十百千万亿元角分]{3,}', tds[id3]):
  3145. unitPrice = tds[id3]
  3146. elif re.search('^[\d,.亿万元人民币欧美日金额:()();;、,\n]+$|¥|¥|RMB|USD|EUR|JPY|CNY|元$', tds[id3].strip()):
  3147. unitPrice = tds[id3]
  3148. elif len(re.sub('[金额万元()()::零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分¥整\d,.]', '', tds[id3])) > 5 and re.search('^详见|^详情', tds[id3])==None:
  3149. i += 1
  3150. # print('过滤:产品单价包含金额外的字符数大于5个', tds[id3])
  3151. continue
  3152. if id4 != "":
  3153. if re.search('\w', tds[id4]):
  3154. brand = tds[id4]
  3155. if re.match('^详见|^详情', brand.strip()):
  3156. brand = ""
  3157. else:
  3158. brand = ""
  3159. if id5 != "":
  3160. if re.search('\w', tds[id5]):
  3161. specs = tds[id5][:500] # 限制最多500字
  3162. if re.match('^详见|^详情', specs.strip()):
  3163. specs = ""
  3164. else:
  3165. specs = ""
  3166. if id6 != "":
  3167. if re.search('\w', tds[id6]):
  3168. demand = tds[id6]
  3169. else:
  3170. demand = ""
  3171. if id7 != "":
  3172. if re.search('\d+|[零壹贰叁肆伍陆柒捌玖拾佰仟萬億十百千万亿元角分]{3,}', tds[id7]):
  3173. budget = tds[id7]
  3174. if id8 != "":
  3175. if re.search('\w', tds[id8]):
  3176. order_time = tds[id8].strip()
  3177. order_begin, order_end = self.fix_time(order_time, html, page_time)
  3178. if id9 != "":
  3179. if re.search('[零壹贰叁肆伍陆柒捌玖拾佰仟萬億十百千万亿元角分]{3,}', tds[id9]):
  3180. total_price = tds[id9]
  3181. elif re.search('^[\d,.亿万元人民币欧美日金额:()();;、,\n]+$|¥|¥|RMB|USD|EUR|JPY|CNY|元$', tds[id9].strip()):
  3182. total_price = tds[id9]
  3183. elif len(re.sub('[金额万元()()::零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分¥整\d,.]', '', tds[id9])) > 5 and re.search('^详见|^详情', tds[id9])==None:
  3184. i += 1
  3185. # print('过滤:产品总价包含金额外的字符数大于5个', tds[id9])
  3186. continue
  3187. if id10 != "":
  3188. parameter = tds[id10][:500]
  3189. if re.match('^详见|^详情', parameter.strip()):
  3190. parameter = ""
  3191. if id11 != "":
  3192. tenderee = re.sub("\s","",tds[id11])
  3193. if len(tenderee) > 30:
  3194. tenderee = ""
  3195. if id12 != "":
  3196. notes = tds[id12].strip()
  3197. if id13 != "":
  3198. issue_date = self.fix_time(tds[id13].strip(), '', '')[0]
  3199. # print('数量:{0}, 单价:{1}, 品牌:{2}, 规格:{3},总价:{4}'.format(quantity ,unitPrice, brand, specs, total_price))
  3200. if quantity != "" or unitPrice != "" or brand != "" or specs != "" or total_price or '单价' in header_dic or '总价' in header_dic:
  3201. if id1!="" and id2 != "" and id3 != "" and len(re.split('[;;、,\n]+', tds[id2])) > 1 and len(re.split('[;;、,\n]+', tds[id1])) == len(re.split('[;;、,\n]+', tds[id2])): # 处理一个空格包含多个产品,逗号或空格分割情况 例子 292846806 292650743
  3202. products = re.split('[;;、,\n]+', tds[id1])
  3203. quantitys = re.split('[;;、,\n]+', tds[id2])
  3204. unitPrices = re.split('[;;、,\n]+', tds[id3])
  3205. total_prices = re.split('[;;、,\n]+', total_price)
  3206. brands = re.split('[;;、,\n]+', brand) if re.search('等$', brand)==None else [brand]
  3207. specses = re.split('[;;、,\n]+', specs) if re.search('等$', specs)==None else [specs]
  3208. parameters = re.split('[;;、,\n]+', parameter) if re.search('等$', parameter)==None else [parameter]
  3209. unitPrices = [""]*len(products) if len(unitPrices)==1 else unitPrices
  3210. total_prices = [""]*len(products) if len(total_prices)==1 else total_prices
  3211. brands = brands*len(products) if len(brands)==1 else brands
  3212. specses = specses*len(products) if len(specses)==1 else specses
  3213. brands = [brand]*len(products) if len(brands) < len(products) else brands
  3214. specses = [specs] * len(products) if len(specses) < len(products) else specses
  3215. parameters = parameters*len(products) if len(parameters)==1 else parameters
  3216. # print('产品拆分:', len(products),len(quantitys) , len(unitPrices),len(brands),len(specses))
  3217. if len(products) == len(quantitys) == len(unitPrices) == len(brands) == len(specses):
  3218. for product, quantity, unitPrice, brand, specs, total_price, parameter in zip(products,quantitys,unitPrices, brands, specses, total_prices, parameters):
  3219. if quantity != "":
  3220. quantity, quantity_unit_ = self.fix_quantity(quantity, header_quan_unit)
  3221. quantity_unit = quantity_unit_ if quantity_unit_ != "" else quantity_unit
  3222. if unitPrice != "":
  3223. unitPrice, _money_unit = money_process(unitPrice, header_list[3])
  3224. unitPrice = str(unitPrice) if unitPrice != 0 and unitPrice<100000000 else ""
  3225. if budget != "":
  3226. budget, _money_unit = money_process(budget, header_list2[2])
  3227. budget = str(budget) if budget != 0 and budget<50000000000 else ''
  3228. if total_price != "":
  3229. total_price, _money_unit = money_process(total_price, header_list[6])
  3230. total_price_list.append(total_price)
  3231. total_price = str(total_price) if total_price != 0 and total_price<50000000000 else ""
  3232. link = {'product': product, 'quantity': quantity,
  3233. 'quantity_unit': quantity_unit, 'unitPrice': unitPrice,
  3234. 'brand': brand[:50], 'specs': specs, 'total_price': total_price, 'parameter': parameter}
  3235. # if link not in product_link:
  3236. # product_link.append(link)
  3237. # mat = re.match('([0-9.,]+)[((]?\w{,3}[))]?$', link['quantity'])
  3238. # if link['unitPrice'] != "" and mat:
  3239. # try:
  3240. # total_product_money += float(link['unitPrice']) * float(
  3241. # mat.group(1).replace(',', '')) if float(
  3242. # mat.group(1).replace(',', '')) < 50000 else 0
  3243. # except:
  3244. # log('产品属性单价数量相乘出错, 单价: %s, 数量: %s' % (
  3245. # link['unitPrice'], link['quantity']))
  3246. if (product, specs, unitPrice, quantity) not in product_set:
  3247. product_set.add((product, specs, unitPrice, quantity))
  3248. product_link.append(link)
  3249. if link['unitPrice'] != "" and link['quantity'] != '':
  3250. try:
  3251. total_product_money += float(link['unitPrice']) * float(
  3252. link['quantity']) if float(link['quantity']) < 50000 else 0
  3253. except:
  3254. log('产品属性单价数量相乘出错, 单价: %s, 数量: %s' % (
  3255. link['unitPrice'], link['quantity']))
  3256. elif len(product)>100: # 产品名称长于100字
  3257. i += 1
  3258. # print('过滤: 产品名称长于100字',)
  3259. continue
  3260. else:
  3261. if quantity != "":
  3262. quantity, quantity_unit_ = self.fix_quantity(quantity, header_quan_unit)
  3263. quantity_unit = quantity_unit_ if quantity_unit_ != "" else quantity_unit
  3264. if unitPrice != "":
  3265. unitPrice, _money_unit = money_process(unitPrice, header_list[3])
  3266. unitPrice = str(unitPrice) if unitPrice != 0 and unitPrice<100000000 else ""
  3267. if budget != "":
  3268. budget, _money_unit = money_process(budget, header_list2[2])
  3269. budget = str(budget) if budget != 0 and budget<50000000000 else ''
  3270. if total_price != "":
  3271. total_price, _money_unit = money_process(total_price, header_list[6])
  3272. total_price_list.append(total_price)
  3273. total_price = str(total_price) if total_price != 0 and total_price<50000000000 else ""
  3274. link = {'product': product, 'quantity': quantity, 'quantity_unit': quantity_unit, 'unitPrice': unitPrice,
  3275. 'brand': brand[:50], 'specs':specs, 'total_price': total_price, 'parameter': parameter}
  3276. # if link not in product_link:
  3277. # product_link.append(link)
  3278. # mat = re.match('([0-9.,]+)[((]?\w{,3}[))]?$', link['quantity'])
  3279. # if link['unitPrice'] != "" and mat:
  3280. # try:
  3281. # total_product_money += float(link['unitPrice'])*float(mat.group(1).replace(',', '')) if float(mat.group(1).replace(',', ''))<50000 else 0
  3282. # except:
  3283. # log('产品属性单价数量相乘出错, 单价: %s, 数量: %s'%(link['unitPrice'], link['quantity']))
  3284. # if (product, unitPrice, quantity) not in product_set:
  3285. # product_set.add((product, unitPrice, quantity))
  3286. if (product, unitPrice,) not in product_set: # 2023/09/22 改为只判断产品/单价,只要两个一样就不作为新产品 避免多个表格重复表达有些没数量造成重复提取 353858683
  3287. product_set.add((product, unitPrice))
  3288. product_link.append(link)
  3289. if link['unitPrice']:
  3290. unit_price_list.append(link['unitPrice'])
  3291. if link['unitPrice'] != "" and link['quantity'] != '':
  3292. try:
  3293. total_product_money += float(link['unitPrice'])*float(link['quantity']) if float(link['quantity'])<50000 else 0
  3294. except:
  3295. log('产品属性单价数量相乘出错, 单价: %s, 数量: %s'%(link['unitPrice'], link['quantity']))
  3296. if order_begin != "" and order_end != "":
  3297. order_begin_year = int(order_begin.split("-")[0])
  3298. order_end_year = int(order_end.split("-")[0])
  3299. # 限制附件错误识别时间
  3300. if order_begin_year >= 2050 or order_end_year >= 2050:
  3301. order_begin = order_end = ""
  3302. # print(budget,order_time)
  3303. if budget != "" and order_time != "":
  3304. link = {'project_name': product, 'product':[], 'demand': demand, 'budget': budget, 'order_begin':order_begin, 'order_end':order_end, 'tenderee':tenderee,'notes':notes,'issue_date':issue_date}
  3305. if link not in demand_link:
  3306. demand_link.append(link)
  3307. i += 1
  3308. else:
  3309. i += 1
  3310. if len(total_price_list)>1 and len(set(total_price_list))/len(total_price_list)<=0.5: # 2023/7/27 总价一半以上重复的为多行一个总价,需去掉
  3311. # print('总价一半以上重复的为多行一个总价,需去掉', total_price_list)
  3312. for link in product_link: # 预防最后一列总价为所有产品总价,列补全后所有产品总价一样情况
  3313. if 'total_price' in link:
  3314. link['total_price'] = ""
  3315. if len(unit_price_list)>0 and len(unit_price_list)==len(product_link) and len(set(unit_price_list))/len(unit_price_list)<=0.5: # 2023/7/18 如果单价重复率高不算总产品价避免错误
  3316. # print('如果单价重复率高不算总产品价避免错误')
  3317. total_product_money = 0
  3318. # for link in product_link:
  3319. # if 'unitPrice' in link:
  3320. # link['unitPrice'] = ""
  3321. if len(product_link)>0:
  3322. attr_dic = {'product_attrs':{'data':product_link, 'header':headers, 'header_col':header_col}}
  3323. else:
  3324. attr_dic = {'product_attrs': {'data': [], 'header': [], 'header_col': []}}
  3325. if len(demand_link)>0:
  3326. demand_dic = {'demand_info':{'data':demand_link, 'header':headers_demand, 'header_col':header_col}}
  3327. else:
  3328. demand_dic = {'demand_info':{'data':[], 'header':[], 'header_col':[]}}
  3329. # print('表格产品属性提取:', attr_dic)
  3330. return [attr_dic, demand_dic], total_product_money
  3331. def predict_without_table(self,product_attrs,list_sentences,list_entitys,codeName,prem, html='', page_time=""):
  3332. if len(prem[0]['prem'])==1:
  3333. list_sentences[0].sort(key=lambda x:x.sentence_index)
  3334. list_sentence = list_sentences[0]
  3335. list_entity = list_entitys[0]
  3336. _data = product_attrs[1]['demand_info']['data']
  3337. re_bidding_time = re.compile("(采购|采购实施|预计招标)(时间|月份|日期)[::,].{0,2}$")
  3338. order_times = []
  3339. for entity in list_entity:
  3340. if entity.entity_type=='time':
  3341. sentence = list_sentence[entity.sentence_index]
  3342. s = spanWindow(tokens=sentence.tokens, begin_index=entity.begin_index,
  3343. end_index=entity.end_index,size=20)
  3344. entity_left = "".join(s[0])
  3345. if re.search(re_bidding_time,entity_left):
  3346. time_text = entity.entity_text.strip()
  3347. standard_time = re.compile("((?P<year>\d{4}|\d{2})\s*[-\/年\.]\s*(?P<month>\d{1,2})\s*[-\/月\.]\s*((?P<day>\d{1,2})日?)?)")
  3348. time_match = re.search(standard_time,time_text)
  3349. # print(time_text, time_match)
  3350. if time_match:
  3351. time_text = time_match.group()
  3352. order_times.append(time_text)
  3353. # print(order_times)
  3354. order_times = [tuple(self.fix_time(order_time, html, page_time)) for order_time in order_times]
  3355. order_times = [order_time for order_time in order_times if order_time[0]!=""]
  3356. if len(set(order_times))==1:
  3357. order_begin,order_end = order_times[0]
  3358. project_name = codeName[0]['name']
  3359. pack_info = [pack for pack in prem[0]['prem'].values()]
  3360. budget = pack_info[0].get('tendereeMoney',0)
  3361. product = prem[0]['product']
  3362. link = {'project_name': project_name, 'product': product, 'demand': project_name, 'budget': budget,
  3363. 'order_begin': order_begin, 'order_end': order_end}
  3364. _data.append(link)
  3365. product_attrs[1]['demand_info']['data'] = _data
  3366. # print('predict_without_table: ', product_attrs)
  3367. return product_attrs
  3368. def predict_by_text(self,product_attrs,html,list_outlines,product_list,page_time=""):
  3369. product_entity_list = list(set(product_list))
  3370. list_outline = list_outlines[0]
  3371. get_product_attrs = False
  3372. for _outline in list_outline:
  3373. if re.search("信息|情况|清单|概况",_outline.outline_summary):
  3374. outline_text = _outline.outline_text
  3375. outline_text = outline_text.replace(_outline.outline_summary,"")
  3376. key_value_list = [_split for _split in re.split("[,。;]",outline_text) if re.search("[::]",_split)]
  3377. if not key_value_list:
  3378. continue
  3379. head_list = []
  3380. head_value_list = []
  3381. for key_value in key_value_list:
  3382. key_value = re.sub("^[一二三四五六七八九十]{1,3}[、.]|^[\d]{1,2}[、.]\d{,2}|^[\((]?[一二三四五六七八九十]{1,3}[\))][、]?","",key_value)
  3383. temp = re.split("[::]",key_value)
  3384. if len(temp)>2:
  3385. if temp[0] in head_list:
  3386. key = temp[0]
  3387. value = "".join(temp[1:])
  3388. else:
  3389. key = temp[-2]
  3390. value = temp[-1]
  3391. else:
  3392. key = temp[0]
  3393. value = temp[1]
  3394. key = re.sub("^[一二三四五六七八九十]{1,3}[、.]|^[\d]{1,2}[、.]\d{,2}|^[\((]?[一二三四五六七八九十]{1,3}[\))][、]?","",key)
  3395. head_list.append(key)
  3396. head_value_list.append(value)
  3397. head_set = set(head_list)
  3398. # print('head_set',head_set)
  3399. if len(head_set & self.header_set) > len(head_set)*0.2:
  3400. loop_list = []
  3401. begin_list = [0]
  3402. for index,head in enumerate(head_list):
  3403. if head not in loop_list:
  3404. if re.search('第[一二三四五六七八九十](包|标段)', head) and re.search('第[一二三四五六七八九十](包|标段)', '|'.join(loop_list)):
  3405. begin_list.append(index)
  3406. loop_list = []
  3407. loop_list.append(head)
  3408. else:
  3409. loop_list.append(head)
  3410. else:
  3411. begin_list.append(index)
  3412. loop_list = []
  3413. loop_list.append(head)
  3414. headers = []
  3415. headers_demand = []
  3416. header_col = []
  3417. product_link = []
  3418. demand_link = []
  3419. product_set = set()
  3420. for idx in range(len(begin_list)):
  3421. if idx==len(begin_list)-1:
  3422. deal_list = head_value_list[begin_list[idx]:]
  3423. tmp_head_list = head_list[begin_list[idx]:]
  3424. else:
  3425. deal_list = head_value_list[begin_list[idx]:begin_list[idx+1]]
  3426. tmp_head_list = head_list[begin_list[idx]:begin_list[idx+1]]
  3427. product = "" # 产品
  3428. quantity = "" # 数量
  3429. quantity_unit = "" # 单位
  3430. unitPrice = "" # 单价
  3431. brand = "" # 品牌
  3432. specs = "" # 规格
  3433. demand = "" # 采购需求
  3434. budget = "" # 预算金额
  3435. order_time = "" # 采购时间
  3436. order_begin = ""
  3437. order_end = ""
  3438. total_price = "" # 总金额
  3439. parameter = "" # 参数
  3440. header_dic, found_header, header_list, header_list2 = self.find_header(tmp_head_list, self.p0, self.p1,self.p2)
  3441. if found_header:
  3442. headers.append('_'.join(header_list))
  3443. headers_demand.append('_'.join(header_list2))
  3444. header_col.append('_'.join(tmp_head_list))
  3445. # print('header_dic: ',header_dic)
  3446. id0 = header_dic.get('品目', "")
  3447. id1 = header_dic.get('名称', "")
  3448. id2 = header_dic.get('数量', "")
  3449. id2_2 = header_dic.get('单位', "")
  3450. id3 = header_dic.get('单价', "")
  3451. id4 = header_dic.get('品牌', "")
  3452. id5 = header_dic.get('规格', "")
  3453. id6 = header_dic.get('需求', "")
  3454. id7 = header_dic.get('预算', "")
  3455. id8 = header_dic.get('时间', "")
  3456. id9 = header_dic.get("总价", "")
  3457. id10 = header_dic.get('参数', "")
  3458. if id1!='' and re.search('[a-zA-Z\u4e00-\u9fa5]', deal_list[id1]) and deal_list[id1] not in self.header_set and \
  3459. re.search('备注|汇总|合计|总价|价格|金额|公司|附件|详见|无$|xxx', deal_list[id1]) == None:
  3460. product = deal_list[id1]
  3461. if id0 != "" and re.search('[a-zA-Z\u4e00-\u9fa5]', deal_list[id0]) and deal_list[id0] not in self.header_set and \
  3462. re.search('备注|汇总|合计|总价|价格|金额|公司|附件|详见|无$|xxx', deal_list[id0]) == None:
  3463. category = deal_list[id0]
  3464. product = "%s_%s" % (category, product) if product != "" else category
  3465. if product == "":
  3466. # print(deal_list[id4],deal_list[id5],tmp_head_list,deal_list)
  3467. if (id4 != "" and deal_list[id4] != "") or (id5 != "" and deal_list[id5] != ""):
  3468. for head,value in zip(tmp_head_list,deal_list):
  3469. if value and value in product_entity_list:
  3470. product = value
  3471. break
  3472. if product != "":
  3473. if id2 != "":
  3474. if re.search('\d+|[壹贰叁肆伍陆柒捌玖拾一二三四五六七八九十]', deal_list[id2]):
  3475. quantity = deal_list[id2]
  3476. quantity = re.sub('[()(),,约]', '', quantity)
  3477. quantity = re.sub('[一壹]', '1', quantity)
  3478. ser = re.search('^(\d+(?:\.\d+)?)([㎡\w/]{,5})', quantity)
  3479. if ser:
  3480. quantity = str(ser.group(1))
  3481. quantity_unit = ser.group(2)
  3482. if float(quantity)>=10000*10000:
  3483. quantity = ""
  3484. quantity_unit = ""
  3485. else:
  3486. quantity = ""
  3487. quantity_unit = ""
  3488. if id2_2 != "":
  3489. if re.search('^\w{1,4}$', deal_list[id2_2]):
  3490. quantity_unit = deal_list[id2_2]
  3491. else:
  3492. quantity_unit = ""
  3493. # if id2 != "":
  3494. # if re.search('\d+|[壹贰叁肆伍陆柒捌玖拾一二三四五六七八九十]', deal_list[id2]):
  3495. # quantity = deal_list[id2]
  3496. # else:
  3497. # quantity = ""
  3498. if id3 != "":
  3499. if re.search('\d+|[零壹贰叁肆伍陆柒捌玖拾佰仟萬億十百千万亿元角分]{3,}', deal_list[id3]):
  3500. _unitPrice = deal_list[id3]
  3501. re_price = re.findall("[零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]{3,}|\d[\d,]*(?:\.\d+)?万?",_unitPrice)
  3502. if re_price:
  3503. # _unitPrice = re_price[0]
  3504. # if '万元' in header_list[3] and '万' not in _unitPrice:
  3505. # _unitPrice += '万元'
  3506. # unitPrice = getUnifyMoney(_unitPrice)
  3507. # if unitPrice>=10000*10000:
  3508. # unitPrice = ""
  3509. # unitPrice = str(unitPrice)
  3510. _unitPrice, _money_unit = money_process(_unitPrice, header_list[3])
  3511. if _unitPrice >= 10000 * 10000:
  3512. _unitPrice = ""
  3513. unitPrice = str(_unitPrice)
  3514. if '.' in unitPrice:
  3515. unitPrice = unitPrice.rstrip('0').rstrip('.')
  3516. if id4 != "":
  3517. if re.search('\w', deal_list[id4]):
  3518. brand = deal_list[id4]
  3519. if re.match('^详见|^详情', brand.strip()):
  3520. brand = ""
  3521. else:
  3522. brand = ""
  3523. if id5 != "":
  3524. if re.search('\w', deal_list[id5]):
  3525. specs = deal_list[id5][:500]
  3526. if re.match('^详见|^详情', specs.strip()):
  3527. brand = ""
  3528. else:
  3529. specs = ""
  3530. if id6 != "":
  3531. if re.search('\w', deal_list[id6]):
  3532. demand = deal_list[id6]
  3533. else:
  3534. demand = ""
  3535. if id7 != "":
  3536. if re.search('\d+|[零壹贰叁肆伍陆柒捌玖拾佰仟萬億十百千万亿元角分]{3,}', deal_list[id7]):
  3537. _budget = deal_list[id7]
  3538. re_price = re.findall("[零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分]{3,}|\d[\d,]*(?:\.\d+)?万?",_budget)
  3539. if re_price:
  3540. # _budget = re_price[0]
  3541. # if '万元' in header_list2[2] and '万' not in _budget:
  3542. # _budget += '万元'
  3543. # budget = str(getUnifyMoney(_budget))
  3544. _budget, _money_unit = money_process(_budget, header_list2[2])
  3545. budget = str(_budget)
  3546. if '.' in budget:
  3547. budget = budget.rstrip('0').rstrip('.')
  3548. if float(budget)>= 100000*10000:
  3549. budget = ""
  3550. if id8 != "":
  3551. if re.search('\w', deal_list[id8]) and re.search("(采购|采购实施|预计招标)(时间|月份|日期)",header_list2[3]):
  3552. order_time = deal_list[id8].strip()
  3553. order_begin, order_end = self.fix_time(order_time, html, page_time)
  3554. if id9 != "":
  3555. if re.search('[零壹贰叁肆伍陆柒捌玖拾佰仟萬億十百千万亿元角分]{3,}', deal_list[id9]):
  3556. total_price = deal_list[id9]
  3557. elif re.search('^[\d,.亿万元人民币欧美日金额:()();;、,\n]+$', deal_list[id9].strip()):
  3558. total_price = deal_list[id9]
  3559. if id10 != "":
  3560. parameter = deal_list[id10][:500]
  3561. if re.match('^详见|^详情', parameter.strip()):
  3562. parameter = ""
  3563. if quantity != "" or unitPrice != "" or brand != "" or specs != "" or total_price:
  3564. if id1 != "" and id2 != "" and id3 != "" and len(re.split('[;;、,\n]', deal_list[id2])) > 1 and len(
  3565. re.split('[;;、,\n]', deal_list[id1])) == len(re.split('[;;、,\n]', deal_list[id2])): # 处理一个空格包含多个产品,逗号或空格分割情况 例子 292846806 292650743
  3566. products = re.split('[;;、,\n]', deal_list[id1])
  3567. quantitys = re.split('[;;、,\n]', deal_list[id2])
  3568. unitPrices = re.split('[;;、,\n]', deal_list[id3])
  3569. total_prices = re.split('[;;、,\n]', total_price)
  3570. brands = re.split('[;;、,\n]', brand) if re.search('等$', brand) == None else [brand]
  3571. specses = re.split('[;;、,\n]', specs) if re.search('等$', specs) == None else [specs]
  3572. parameters = re.split('[;;、,\n]', parameter) if re.search('等$', parameter) == None else [parameter]
  3573. unitPrices = [""] * len(products) if len(unitPrices) == 1 else unitPrices
  3574. total_prices = [""] * len(products) if len(total_prices) == 1 else total_prices
  3575. brands = brands * len(products) if len(brands) == 1 else brands
  3576. specses = specses * len(products) if len(specses) == 1 else specses
  3577. parameters = parameters * len(products) if len(parameters) == 1 else parameters
  3578. if len(products) == len(quantitys) == len(unitPrices) == len(brands) == len(
  3579. specses):
  3580. for product, quantity, unitPrice, brand, specs, total_price, parameter in zip(
  3581. products, quantitys, unitPrices, brands, specses, total_prices,
  3582. parameters):
  3583. if quantity != "":
  3584. quantity, quantity_unit_ = self.fix_quantity(quantity,quantity_unit)
  3585. quantity_unit = quantity_unit_ if quantity_unit_ != "" else quantity_unit
  3586. if unitPrice != "":
  3587. unitPrice, _money_unit = money_process(unitPrice, header_list[3])
  3588. unitPrice = str(unitPrice) if unitPrice != 0 and unitPrice<100000000 else ""
  3589. if budget != "":
  3590. budget, _money_unit = money_process(budget, header_list2[2])
  3591. budget = str(budget) if budget != 0 and budget<50000000000 else ''
  3592. if total_price != "":
  3593. total_price, _money_unit = money_process(total_price,
  3594. header_list[6])
  3595. total_price = str(total_price) if total_price != 0 and total_price<50000000000 else ""
  3596. link = {'product': product, 'quantity': quantity,
  3597. 'quantity_unit': quantity_unit, 'unitPrice': unitPrice,
  3598. 'brand': brand[:50], 'specs': specs, 'total_price': total_price,
  3599. 'parameter': parameter}
  3600. if (product, specs, unitPrice, quantity) not in product_set:
  3601. product_set.add((product, specs, unitPrice, quantity))
  3602. product_link.append(link)
  3603. # if link['unitPrice'] != "" and link['quantity'] != '':
  3604. # try:
  3605. # total_product_money += float(link['unitPrice']) * float(
  3606. # link['quantity']) if float(
  3607. # link['quantity']) < 50000 else 0
  3608. # except:
  3609. # log('产品属性单价数量相乘出错, 单价: %s, 数量: %s' % (
  3610. # link['unitPrice'], link['quantity']))
  3611. elif len(unitPrice) > 15 or len(product) > 100: # 单价大于15位数或 产品名称长于100字
  3612. # i += 1
  3613. continue
  3614. else:
  3615. if quantity != "":
  3616. quantity, quantity_unit_ = self.fix_quantity(quantity, quantity_unit)
  3617. quantity_unit = quantity_unit_ if quantity_unit_ != "" else quantity_unit
  3618. if unitPrice != "":
  3619. unitPrice, _money_unit = money_process(unitPrice, header_list[3])
  3620. unitPrice = str(unitPrice) if unitPrice != 0 and unitPrice<100000000 else ""
  3621. if budget != "":
  3622. budget, _money_unit = money_process(budget, header_list2[2])
  3623. budget = str(budget) if budget != 0 and budget<50000000000 else ''
  3624. if total_price != "":
  3625. total_price, _money_unit = money_process(total_price, header_list[6])
  3626. total_price = str(total_price) if total_price != 0 and total_price<50000000000 else ""
  3627. link = {'product': product, 'quantity': quantity,
  3628. 'quantity_unit': quantity_unit, 'unitPrice': unitPrice,
  3629. 'brand': brand[:50], 'specs': specs, 'total_price': total_price,
  3630. 'parameter': parameter}
  3631. if (product, specs, unitPrice, quantity) not in product_set:
  3632. product_set.add((product, specs, unitPrice, quantity))
  3633. product_link.append(link)
  3634. # if link['unitPrice'] != "" and link['quantity'] != '':
  3635. # try:
  3636. # total_product_money += float(link['unitPrice']) * float(
  3637. # link['quantity']) if float(link['quantity']) < 50000 else 0
  3638. # except:
  3639. # log('产品属性单价数量相乘出错, 单价: %s, 数量: %s' % (
  3640. # link['unitPrice'], link['quantity']))
  3641. if order_begin != "" and order_end != "":
  3642. order_begin_year = int(order_begin.split("-")[0])
  3643. order_end_year = int(order_end.split("-")[0])
  3644. # 限制附件错误识别时间
  3645. if order_begin_year >= 2050 or order_begin_year < 2000 or order_end_year >= 2050 or order_end_year < 2000:
  3646. order_begin = order_end = ""
  3647. # print(budget, order_time)
  3648. if budget != "" and order_time != "":
  3649. link = {'project_name': product, 'product': [], 'demand': demand, 'budget': budget,
  3650. 'order_begin': order_begin, 'order_end': order_end}
  3651. if link not in demand_link:
  3652. demand_link.append(link)
  3653. if len(product_link) > 0:
  3654. attr_dic = {'product_attrs': {'data': product_link, 'header': list(set(headers)), 'header_col': list(set(header_col))}}
  3655. get_product_attrs = True
  3656. else:
  3657. attr_dic = {'product_attrs': {'data': [], 'header': [], 'header_col': []}}
  3658. if len(demand_link) > 0:
  3659. demand_dic = {'demand_info': {'data': demand_link, 'header': headers_demand, 'header_col': header_col}}
  3660. else:
  3661. demand_dic = {'demand_info': {'data': [], 'header': [], 'header_col': []}}
  3662. product_attrs[0] = attr_dic
  3663. if len(product_attrs[1]['demand_info']['data']) == 0:
  3664. product_attrs[1] = demand_dic
  3665. if get_product_attrs:
  3666. break
  3667. # print('predict_by_text: ', product_attrs)
  3668. return product_attrs
  3669. def add_product_attrs(self,channel_dic, product_attrs, list_sentences,list_entitys,list_outlines,product_list,codeName,prem,text,page_time):
  3670. # print(1,product_attrs[1]['demand_info']['data'])
  3671. if channel_dic['docchannel']['docchannel']=="采购意向" and len(product_attrs[1]['demand_info']['data']) == 0:
  3672. product_attrs = self.predict_without_table(product_attrs, list_sentences,list_entitys,codeName,prem,text,page_time)
  3673. # print(2,product_attrs[1]['demand_info']['data'])
  3674. if len(product_attrs[0]['product_attrs']['data']) == 0:
  3675. product_attrs = self.predict_by_text(product_attrs,text,list_outlines,product_list,page_time)
  3676. # print(3,product_attrs[1]['demand_info']['data'])
  3677. if len(product_attrs[1]['demand_info']['data'])>0:
  3678. for d in product_attrs[1]['demand_info']['data']:
  3679. for product in set(prem[0]['product']):
  3680. if product in d['project_name'] and product not in d['product']:
  3681. d['product'].append(product) #把产品在项目名称中的添加进需求要素中
  3682. # docchannel类型提取
  3683. class DocChannel():
  3684. def __init__(self, life_model='/channel_savedmodel/channel.pb', type_model='/channel_savedmodel/doctype.pb',config=None):
  3685. self.lift_sess, self.lift_title, self.lift_content, self.lift_prob, self.lift_softmax,\
  3686. self.mask, self.mask_title = self.load_life(life_model,config)
  3687. self.type_sess, self.type_title, self.type_content, self.type_prob, self.type_softmax,\
  3688. self.type_mask, self.type_mask_title = self.load_type(type_model)
  3689. self.sequen_len = 200 # 150 200
  3690. self.title_len = 30
  3691. self.sentence_num = 10
  3692. self.kws = '供货商|候选人|供应商|入选人|项目|选定|预告|中标|成交|补遗|延期|报名|暂缓|结果|意向|出租|补充|合同|限价|比选|指定|工程|废标|取消|中止|流标|资质|资格|地块|招标|采购|货物|租赁|计划|宗地|需求|来源|土地|澄清|失败|探矿|预审|变更|变卖|遴选|撤销|意见|恢复|采矿|更正|终止|废置|报建|流拍|供地|登记|挂牌|答疑|中选|受让|拍卖|竞拍|审查|入围|更改|条件|洽谈|乙方|后审|控制|暂停|用地|询价|预'
  3693. lb_type = ['采招数据', '土地矿产', '拍卖出让', '产权交易', '新闻资讯']
  3694. lb_life = ['采购意向', '招标预告', '招标公告', '招标答疑', '公告变更', '资审结果', '中标信息', '合同公告', '废标公告']
  3695. self.id2type = {k: v for k, v in enumerate(lb_type)}
  3696. self.id2life = {k: v for k, v in enumerate(lb_life)}
  3697. self.load_pattern()
  3698. def load_pattern(self):
  3699. self.type_dic = {
  3700. '土地矿产': '供地结果|(土地|用地|宗地|地块|海域|矿)的?(基本信息|基本情况|概况|信息|详情|来源|用途|性质|编号|位置|坐落|使用年限|出让年限)|(土地|山地|农田)(经营权)?(出让|出租|招租|租赁|承包|流转)|流转土地',
  3701. '拍卖出让': '(拍卖|变卖|流拍|竞拍)的?(公告|活动|信息|结果|成交|主体|标的|资产|财产|方式|类型|流程|程序|规则|价格|保证金|时间)|(公开|进行|密封)(拍卖|变卖|竞拍)|第[一二三]次拍卖|(资产|司法|网络)拍卖|交易方式.{,2}拍卖|拍卖会',
  3702. '产权交易': '(产权|资产|权证)的?(类型|类别|用途|性质|状态|信息|名称|编号|(基本)?情况)|(经营权|承包权|使用权|租赁权|股权|债权|排污权|化学需氧量|储备量)(挂牌|转让|出让)|竞价销售|销售结果|房屋所有权房产|免租期限|交易期限|(受让|转让|承租|出租)(人|方)|(店面|店铺|商铺|铺位?|门面|门市|食堂|饭堂|校舍|车位|停车场|厂?房|仓?库|馆|资产|物业|房产|房屋|场地|农田|鱼?塘)\w{,4}(处置|招租|出租|续租|租赁|转让)|(出租|转让|产权|资产)(项目|中标|成交|流标|废标)|出租(用途|类型)|转让底价|租赁(标的物|情况)|看[样货](时间|地[点址]|方式|仓库|验货)|最小加价|加价[幅梯]度|交易模式[::\s]*延时竞价销售|挂牌(开始|结束)时间',
  3703. '采招数据': '(采购|招标)(条件|范围|文件|内容)|(申请人|投标人|供应商|报价人|参选人)的?资格要求;|采购需求清单|最低价排序|竞争性采购方式|采购进行公开竞价|竞价模式[::\s]*一次报价|预算金额' # |变更|答疑|澄清|中标|成交|合同|废标|流标 |(采购|招标|代理)(人|机构|单位)|
  3704. }
  3705. self.title_type_dic = {
  3706. '土地矿产': '(土地|用地|宗地|荒地|山地|海域|矿)(出让|出租|招租|租赁|承包|流转|使用权|经营权|征收|划拨|中标|成交)|供地结果|矿业权|探矿权|采矿权|(土地|用地|宗地|地块)(使用权)?(终止|中止|网上)?(挂牌|出让|拍卖|招拍|划拨)|征收土地',
  3707. '拍卖出让': '(拍卖|变卖|流拍|竞拍)的?(公告|公示)|拍卖|变卖|流拍|竞拍',
  3708. '产权交易': '经营权|承包权|使用权|租赁权|股权|债权|排污权|化学需氧量|储备量|竞价销售|销售结果|出租|招租|拍租|竞租|续租|挂牌|出让',
  3709. '采招数据': '(采购|招标|询价|议价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|征询|调研)的?(公告|公示|中标|成交|结果|$)|工程招标|定点服务|竞价采购|(设备|服务)采购|网上超市采购|定点采购',
  3710. # |竞价 采招/产权都有竞价方式 # 意向|需求|预公?告|报建|总承包|工程|施工|设计|勘察|代理|监理 |变更|答疑|澄清|中标|成交|合同|废标|流标
  3711. '新闻资讯': '(考试|面试|笔试)成绩|成绩的?(公告|公示|公布)|公开招聘|招聘(公告|简章|启事|合同制)|疫情防控\s{,5}(通知|情况|提示)|行政审批结果'
  3712. }
  3713. self.life_dic = {
  3714. '采购意向': '采购意向|招标意向|选取意向|意向公告|意向公示',
  3715. '采购意向neg': '发布政府采购意向|采购意向公告已于',
  3716. '招标预告': '(预计|计划)(采购|招标)(时间|日期)|采购(计划编号|需求方案|预告|预案)|(预|需求)公示|需求(方案|信息|论证|公告|公示)',
  3717. '招标公告': '(采购|招标|竞选|报名)条件|报名(时间|流程|方法|要求|\w{,5}材料)[:\s]|[^\w]成交规则|参加竞价采购交易资格|(申请人|投标人|供应商|报价人|参选人)的?资格要求|获取(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答)文件|(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答)文件的?(获取|领取)|评选方式:?\s*价格最低',
  3718. '资审结果': '资审及业绩公示|资审结果及业绩|资格后审情况报告|资格(后审|预审|审查)结果(公告|公示)|(预审|审查)工作已经?结束|未通过原因', #|资格
  3719. '招标答疑': '现澄清(为|如下)|答疑补遗|澄清内容如下|第[0-9一二三四五]次澄清|答疑澄清|(最高(投标)?限价|控制价|拦标价)公示', # |异议的回复
  3720. '公告变更': '第[\d一二]次变更|(更正|变更)(公告|公示|信息|内容|事项|原因|理由|日期|时间|如下)|原公告((主要)?(信息|内容)|发布时间)|(变更|更正)[前后]内容|现?在?(变更|更正|修改|更改)(内容)?为|(公告|如下|信息|内容|事项|结果|文件|发布|时间|日期)(更正|变更)',
  3721. '公告变更neg': '履约变更内容',
  3722. '候选人公示': '候选人公示|评标结果公示|中标候选人名单公示|现将中标候选人(进行公示|公[示布]如下)|(中标|中选)候选人(信息|情况)[::\s]',
  3723. '候选人公示neg': '中标候选人公示期|中标候选人公示前',
  3724. '中标信息': '供地结果信息|采用单源直接采购的?情况说明|[特现]?将\w{,4}(成交|中标|中选|选定结果|选取结果|入围结果|竞价结果)\w{,4}(进行公示|公[示布]如下)|(询价|竞价|遴选)?(成交|中标|中选)(公告|公示)|(成交|中标|中选|选定|选取|入围|询价)结果(如下|公告|公示)|(中标|中选)(供应商|承包商|候选人|入围单位)如下|拟定供应商的情况|((中标|中选)(人|成交)|成交)\w{,3}(信息|情况)[::\s]',
  3725. '中标信息2': '\s(成交|中标|中选)(信息|日期|时间|总?金额|价格)[::\s]|(成交|中标|中选)价格\s*[\d.,]+(?万?元|(采购|招标|成交|中标|中选|评标)结果|单一来源(采购|招标)?的?(中标|成交|结果)|项目已结束|中标公示 ', # |单一来源采购原因|拟采取单一来源方式采购|单一来源采购公示
  3726. '中标信息3': '(中标|中选|成交|拟定|拟选用|最终选定的?|受让)(供应商|供货商|服务商|机构|企业|公司|单位|候选人|人)(信息[,:]?)?(名称)?[::\s]|[、\s](第一名|(拟定|推荐|入围)?(供应商|供货商)|(中选|中标|供货)单位|中选人)[::\s]|确定[\w()]{6,25}为中标人', # |唯一
  3727. '中标信息neg': '按项目控制价下浮\d%即为成交价|成交原则|不得确定为(中标|成交)|招标人按下列原则选择中标人|评选成交供应商:|拟邀请供应商|除单一来源采购项目外|单一来源除外|(各.{,5}|尊敬的)(供应商|供货商)[:\s]|竞拍起止时间:|询价结果[\s\n::]*不公开|本项目已具备招标条件|现对该项目进行招标公告|发布\w{2}结果后\d天内送达|本次\w{2}结果不对外公示|供应商\s*资格要求|成交情况:\s*[流废]标|中标单位:本次招标拟?中标单位\d家|通知中标单位|影响(成交|中标)结果|确定为成交供应商|(成交|中标|中选)公[告示](发布|\w{,2})后|竞价成交后', # 503076535 按照服务方案的优劣 确定为成交供应商
  3728. # |确定成交供应商[:,\s]
  3729. '合同公告': '合同(公告|公示|信息|内容)|合同(编号|名称|主体|基本情况|完成(日期|时间))|(供应商乙方|乙方供应商):|合同总?金额|履约信息',
  3730. '废标公告': '(终止|中止|废标|流标|流采|失败|作废|异常|撤销)(结果)?(公告|公示|招标|采购|竞价)|(谈判结果为|结果类型):?废标|((本|该)(项目|标段|合同|合同包|采购包|次)\w{,5})((失败|终止|流标|废标)|予以废标|(按|做|作)?(流标|废标|废置)处理)|(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|竞谈|应答|项目)(终止|中止|废标|流标|失败|作废|异常|撤销)',
  3731. '废标公告2': '(无效|中止|终止|废标|流标|失败|作废|异常|撤销)的?(原因|理由)|本项目因故取消|本(项目|次)(公开)?\w{2}失败|已终止\s*原因:|(人|人数|供应商|单位)(不足|未达\w{,3}数量)|已终止|不足[3三]家|无(废标)|成交情况:\s*[流废]标|现予以废置',
  3732. '废标公告neg': '超过此报价将作为[废流]标处理|否则按[废流]标处理|终止规则:|成交规则:|视为流标|竞价失败的一切其他情形|是否废标:否|若不足三家公司参与|供应商数量:?\s*报名供应商不足三家|有效报价不足三家,\s*系统自动废标' # 503076535 供应商数量: 报名供应商不足三家。
  3733. }
  3734. self.title_life_dic = {
  3735. '采购意向': '采购意向|招标意向|选取意向|意向公告|意向公示|意向公开',
  3736. '招标预告': '预公?告|预公示|报建公告|(批前|标前)公示|(供应|招标)计划表?$|(论证|征求|征集)(供应商)?意见|意见征询|需求评审公告|需求(公告|公示|意见)',
  3737. '公告变更': '第[\d一二]次变更|(变更|更正(事项)?|更改|延期|暂停)(招标|采购)?的?(公告|公示|通知)|变更$|更正$',
  3738. '招标答疑': '质疑|澄清|答疑(文件)?|补遗书?|(最高(投标)?限价|控制价|拦标价)(公示|公告|$)',
  3739. '废标公告': '(终止|中止|废标|废除|废置|流标|失败|作废|异常|撤销|撤回|取消成?交?|流拍)(结果|竞价|项目)?的?(公告|公示|$)|(终止|中止)(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|拍卖|招租|交易|出让)|关于废置',
  3740. '合同公告': '(合同(成交|变更)?)(公告|公示|信息|公式|公开|签订)|合同备案|合同书|合同$', # |(履约|验收)(结果)?
  3741. '候选人公示': '候选人(变更)?公示|评标(结果)?公示|评审结果', #中标前公示|中标预公示|
  3742. '中标信息': '(中标|中选|中价|中租|成交|入选|确认)(候选人|人|供应商|记录|结果|变更)?(公告|公示|结果)|未?入围(公示|公告)|(遴选|采购|招标|竞价|议价|比选|询比?价|评选|谈判|邀标|邀请|洽谈|约谈|评标|发包|遴选|交易)\w{,2}结果|单一来源(采购|招标)?的?(中标|成交|结果)|中标通知书|中标$|项目中标', # |开标(记录|信息|情况)
  3743. '资审结果': '((资格|资质)(审查|预审|后审|审核)|资审)结果(公告|公示)?|(资质|资格)(预审|后审)公示|资审及业绩公示',
  3744. '招标公告': '(采购|招标|询价|议价|竞价|比价|比选|遴选|邀请|邀标|磋商|洽谈|约谈|谈判|拍卖|招租|交易|出让)的?(公告|公示|$)|公开(采购|招标|招租|拍卖|挂牌|出让)|(资审|预审|后审)公告',
  3745. '开标记录': '开标记录|截标信息|评委名单公示|开标安排|开标数据表|开标信息|开标情况|开标一览表|开标结果|开标会',
  3746. '验收合同': '(验收|履约)(公告|公示)|(验收|履约)(结果|报告|意见|单)(公告|公示)'
  3747. }
  3748. def load_life(self,life_model,config):
  3749. with tf.Graph().as_default() as graph:
  3750. output_graph_def = graph.as_graph_def()
  3751. with open(os.path.dirname(__file__)+life_model, 'rb') as f:
  3752. output_graph_def.ParseFromString(f.read())
  3753. tf.import_graph_def(output_graph_def, name='')
  3754. # print("%d ops in the final graph" % len(output_graph_def.node))
  3755. del output_graph_def
  3756. sess = tf.Session(graph=graph,config=config)
  3757. sess.run(tf.global_variables_initializer())
  3758. inputs = sess.graph.get_tensor_by_name('inputs/inputs:0')
  3759. prob = sess.graph.get_tensor_by_name('inputs/dropout:0')
  3760. title = sess.graph.get_tensor_by_name('inputs/title:0')
  3761. mask = sess.graph.get_tensor_by_name('inputs/mask:0')
  3762. mask_title = sess.graph.get_tensor_by_name('inputs/mask_title:0')
  3763. # logit = sess.graph.get_tensor_by_name('output/logit:0')
  3764. softmax = sess.graph.get_tensor_by_name('output/softmax:0')
  3765. return sess, title, inputs, prob, softmax, mask, mask_title
  3766. def load_type(self,type_model):
  3767. with tf.Graph().as_default() as graph:
  3768. output_graph_def = graph.as_graph_def()
  3769. with open(os.path.dirname(__file__)+type_model, 'rb') as f:
  3770. output_graph_def.ParseFromString(f.read())
  3771. tf.import_graph_def(output_graph_def, name='')
  3772. # print("%d ops in the final graph" % len(output_graph_def.node))
  3773. del output_graph_def
  3774. sess = tf.Session(graph=graph)
  3775. sess.run(tf.global_variables_initializer())
  3776. inputs = sess.graph.get_tensor_by_name('inputs/inputs:0')
  3777. prob = sess.graph.get_tensor_by_name('inputs/dropout:0')
  3778. title = sess.graph.get_tensor_by_name('inputs/title:0')
  3779. mask = sess.graph.get_tensor_by_name('inputs/mask:0')
  3780. mask_title = sess.graph.get_tensor_by_name('inputs/mask_title:0')
  3781. # logit = sess.graph.get_tensor_by_name('output/logit:0')
  3782. softmax = sess.graph.get_tensor_by_name('output/softmax:0')
  3783. return sess, title, inputs, prob, softmax, mask, mask_title
  3784. def predict_process(self, docid='', doctitle='', dochtmlcon=''):
  3785. # print('准备预处理')
  3786. def get_kw_senten(s, span=10):
  3787. doc_sens = []
  3788. tmp = 0
  3789. num = 0
  3790. end_idx = 0
  3791. for it in re.finditer(self.kws, s): # '|'.join(keywordset)
  3792. left = s[end_idx:it.end()].split()
  3793. right = s[it.end():].split()
  3794. tmp_seg = s[tmp:it.start()].split()
  3795. if len(tmp_seg) > span or tmp == 0:
  3796. doc_sens.append(' '.join(left[-span:] + right[:span]))
  3797. end_idx = it.end() + 1 + len(' '.join(right[:span]))
  3798. tmp = it.end()
  3799. num += 1
  3800. if num >= self.sentence_num:
  3801. break
  3802. if doc_sens == []:
  3803. doc_sens.append(s)
  3804. return doc_sens
  3805. def word2id(wordlist, max_len=self.sequen_len):
  3806. ids = [getIndexOfWords(w) for w in wordlist]
  3807. ids = ids[:max_len] if len(ids) >= max_len else ids + [0] * (max_len - len(ids))
  3808. assert len(ids) == max_len
  3809. return ids
  3810. cost_time = dict()
  3811. datas = []
  3812. datas_title = []
  3813. try:
  3814. segword_title = ' '.join(selffool.cut(doctitle)[0])
  3815. segword_content = dochtmlcon
  3816. except:
  3817. segword_content = ''
  3818. segword_title = ''
  3819. if isinstance(segword_content, float):
  3820. segword_content = ''
  3821. if isinstance(segword_title, float):
  3822. segword_title = ''
  3823. segword_content = segword_content.replace(' 中 选 ', ' 中选 ').replace(' 中 标 ', ' 中标 ').replace(' 补 遗 ', ' 补遗 '). \
  3824. replace(' 更 多', '').replace(' 更多', '').replace(' 中 号 ', ' 中标 ').replace(' 中 选人 ', ' 中选人 '). \
  3825. replace(' 点击 下载 查看', '').replace(' 咨询 报价 请 点击', '').replace('终结', '终止')
  3826. segword_title = re.sub('[^\s\u4e00-\u9fa5]', '', segword_title)
  3827. segword_content = re.sub('[^\s\u4e00-\u9fa5]', '', segword_content)
  3828. doc_word_list = segword_content.split()
  3829. if len(doc_word_list) > self.sequen_len / 2:
  3830. doc_sens = get_kw_senten(' '.join(doc_word_list[100:500]))
  3831. doc_sens = ' '.join(doc_word_list[:100]) + '\n' + '\n'.join(doc_sens)
  3832. else:
  3833. doc_sens = ' '.join(doc_word_list[:self.sequen_len])
  3834. # print('标题:',segword_title)
  3835. # print('正文:',segword_content)
  3836. datas.append(doc_sens.split())
  3837. datas_title.append(segword_title.split())
  3838. # print('完成预处理')
  3839. return datas, datas_title
  3840. def is_houxuan(self, title, content):
  3841. '''
  3842. 通过标题和中文内容判断是否属于候选人公示类别
  3843. :param title: 公告标题
  3844. :param content: 公告正文文本内容
  3845. :return: 1 是候选人公示 ;0 不是
  3846. '''
  3847. if re.search('候选人的?公示|评标结果|评审结果|中标公示', title): # (中标|成交|中选|入围)
  3848. if re.search('变更公告|更正公告|废标|终止|答疑|澄清', title):
  3849. return 0
  3850. return 1
  3851. if re.search('候选人的?公示', content[:100]):
  3852. if re.search('公示(期|活动)?已经?结束|公示期已满|中标结果公告|中标结果公示|变更公告|更正公告|废标|终止|答疑|澄清', content[:100]):
  3853. return 0
  3854. return 1
  3855. else:
  3856. return 0
  3857. def predict(self, title='', list_sentence='', web_source_no='', original_docchannel=''):
  3858. not_extract_dic = {
  3859. 104: '招标文件',
  3860. 106: '法律法规',
  3861. 107: '新闻资讯',
  3862. 108: '拟建项目',
  3863. 109: '展会推广',
  3864. 110: '企业名录',
  3865. 111: '企业资质',
  3866. 112: '全国工程人员',
  3867. 113: '业主采购'
  3868. }
  3869. if original_docchannel in not_extract_dic:
  3870. return {'docchannel': {'docchannel':'', 'doctype':not_extract_dic[original_docchannel], "original_docchannel_id": str(original_docchannel)}}
  3871. if web_source_no in ['02104-7']:
  3872. return {'docchannel': {'docchannel':'', 'doctype':'采招数据'}}
  3873. if isinstance(list_sentence, list):
  3874. token_l = [it.tokens for it in list_sentence]
  3875. tokens = [it for l in token_l for it in l]
  3876. content = ' '.join(tokens[:500])
  3877. title = re.sub('[^\u4e00-\u9fa5]', '', title)
  3878. if len(title)>50:
  3879. title = title[:20]+title[-30:]
  3880. data_content, data_title = self.predict_process(docid='', doctitle=title[-50:], dochtmlcon=content) # 标题最多取50字
  3881. text_len = len(data_content[0]) if len(data_content[0])<self.sequen_len else self.sequen_len
  3882. title_len = len(data_title[0]) if len(data_title[0])<self.title_len else self.title_len
  3883. result = {'docchannel': {'docchannel':'', 'doctype':'', "original_docchannel_id": str(original_docchannel)}}
  3884. array_content = embedding(data_content, shape=(len(data_content), self.sequen_len, 128))
  3885. array_title = embedding(data_title, shape=(len(data_title), self.title_len, 128))
  3886. pred = self.type_sess.run(self.type_softmax,
  3887. feed_dict={
  3888. self.type_title: array_title,
  3889. self.type_content: array_content,
  3890. self.type_mask:[[0]*text_len+[1]*(self.sequen_len-text_len)],
  3891. self.type_mask_title:[[0]*title_len+[1]*(self.title_len-title_len)],
  3892. self.type_prob:1}
  3893. )
  3894. id = np.argmax(pred, axis=1)[0]
  3895. prob = pred[0][id]
  3896. result['docchannel']['doctype'] = self.id2type[id]
  3897. # print('公告类别:', self.id2type[id], '概率:',prob)
  3898. # if id == 0:
  3899. if result['docchannel']['doctype'] not in ['', '新闻资讯']:
  3900. pred = self.lift_sess.run(self.lift_softmax,
  3901. feed_dict={
  3902. self.lift_title: array_title,
  3903. self.lift_content: array_content,
  3904. self.mask: [[0] * text_len + [1] * (self.sequen_len - text_len)],
  3905. self.mask_title: [[0] * title_len + [1] * (self.title_len - title_len)],
  3906. self.lift_prob:1}
  3907. )
  3908. id = np.argmax(pred, axis=1)[0]
  3909. prob = pred[0][id]
  3910. result['docchannel']['docchannel'] = self.id2life[id]
  3911. # print('生命周期:纯模型预测',self.id2life[id], '概率:',prob)
  3912. # if id == 6:
  3913. if result['docchannel']['docchannel'] == '中标信息':
  3914. if self.is_houxuan(''.join([it for it in title if it.isalpha()]), ''.join([it for it in content if it.isalpha()])):
  3915. result['docchannel']['docchannel'] = '候选人公示'
  3916. # return '候选人公示', prob
  3917. # return [{'docchannel': '候选人公示'}]
  3918. return result
  3919. # return [{'docchannel':self.id2life[id]}]
  3920. # else:
  3921. # # return self.id2type[id], prob
  3922. # return [{'docchannel':self.id2type[id]}]
  3923. def predict_rule(self, title, content, channel_dic, prem_dic):
  3924. '''2022/2/10加入规则去除某些数据源及内容过短且不包含类别关键词的公告不做预测'''
  3925. hetong = '(合同|验收|履约)(公告|公示)|合同号?$' # 合同标题正则
  3926. zhongbiao_t = '(中标|中选|成交|入选|入围|结果|确认)(公告|公示|结果)|(遴选|采购|招标|竞价|议价|比选|询价|评选|谈判|邀标|邀请|洽谈|约谈|评标|发包|遴选)结果|开标(记录|信息|情况)|单一来源|直接(选取|选定)|中标通知书|中标$'
  3927. zhongbiao_c = '(中标|中选|成交|拟选用|拟邀请|最终选定的?|拟定)(供应商|供货商|服务商|企业|公司|单位|(候选)?人)(名称)?[::]|[,。:.](供应商|供货商|服务商)(名称)?:|指定的中介服务机构:|建设服务单位:'
  3928. zhaobiao_t = '(遴选|采购|招标|竞价|议价|比选|询价|评选|谈判|邀标|邀请|洽谈|约谈)(公告|公示|$)'
  3929. title_cn = re.sub('[^\u4e00-\u9fa5]', '', title)
  3930. if len(re.sub('[^\u4e00-\u9fa5]', "", content))<50 and channel_dic['docchannel']['doctype'] != '新闻资讯':
  3931. if re.search(hetong, title_cn) != None:
  3932. channel_dic['docchannel']['docchannel'] = '合同公告'
  3933. elif re.search(zhongbiao_t, title_cn):
  3934. channel_dic['docchannel']['docchannel'] = '中标信息'
  3935. elif re.search(zhaobiao_t, title_cn):
  3936. channel_dic['docchannel']['docchannel'] = '招标公告'
  3937. else:
  3938. channel_dic['docchannel']['docchannel'] = ''
  3939. elif channel_dic['docchannel'].get('docchannel', '') == '招标公告' and 'win_tenderer' in json.dumps(prem_dic,
  3940. ensure_ascii=False):
  3941. if re.search(hetong, title_cn) != None:
  3942. channel_dic['docchannel']['docchannel'] = '合同公告'
  3943. log('正则把招标公告修改为合同公告')
  3944. elif re.search(zhongbiao_t, title_cn) or re.search(zhongbiao_t, content[:200]) or re.search(zhongbiao_c,
  3945. content):
  3946. channel_dic['docchannel']['docchannel'] = '中标信息'
  3947. log('正则把招标公告修改为中标信息')
  3948. elif channel_dic['docchannel'].get('docchannel', '') == '中标信息' and 'win_tenderer' not in json.dumps(prem_dic,
  3949. ensure_ascii=False):
  3950. if re.search(hetong, title_cn):
  3951. channel_dic['docchannel']['docchannel'] = '合同公告'
  3952. log('正则把中标信息修改为合同公告')
  3953. elif re.search(zhongbiao_t, title_cn) or re.search(zhongbiao_t, content[:200]) or re.search(zhongbiao_c,
  3954. content):
  3955. pass
  3956. elif re.search(zhaobiao_t, title_cn):
  3957. channel_dic['docchannel']['docchannel'] = '招标公告'
  3958. log('正则把中标信息修改为招标公告')
  3959. elif re.search('中标|成交|中选|入选|入围|结果|供应商|供货商|候选人', title_cn+content)==None:
  3960. channel_dic['docchannel']['docchannel'] = ''
  3961. log('正则把中标信息修改为空')
  3962. return channel_dic
  3963. def predict_merge(self, title, list_sentence, html, bidway, prem, original_docchannel='', web_source_no=''):
  3964. '''
  3965. 正则,模型混合预测,返回公告类型及生命周期
  3966. :param title: 公告标题
  3967. :param content: 预处理后的返回的句子实体列表 list_sentence
  3968. :param html: 公告原文 html 内容
  3969. :param bidway: 招标方式
  3970. :param prem: 提取的prem 字典
  3971. :return: {'docchannel': {'docchannel':'中标信息', 'doctype':'采招数据'}} 字典格式
  3972. '''
  3973. def cut_single_cn_space(text):
  3974. new_text = ""
  3975. for w in text.split():
  3976. if len(w) == 1 or re.search('^[\u4e00-\u9fa5][::]', w):
  3977. new_text += w
  3978. else:
  3979. new_text += ' ' + w
  3980. return new_text
  3981. def html2text(html):
  3982. ser = re.search('<div[^<>]*richTextFetch', html)
  3983. # if ser and len(re.sub('[^\u4e00-\u9fa5]', '', html[:ser.start()]))>500:
  3984. # html = html[:ser.start()]+'##richTextFetch##'
  3985. if ser:
  3986. if len(re.sub('[^\u4e00-\u9fa5]', '', html[:ser.start()])) > 200:
  3987. html = html[:ser.start()] + '##richTextFetch##'
  3988. else:
  3989. html = html[:ser.start() + 500]
  3990. text = re.sub('<[^<]*?>', '', html).replace('&nbsp;', ' ')
  3991. # text = re.sub('http[0-9a-zA-Z-.:/]+|[0-9a-zA-Z-./@]+', '', text)
  3992. text = re.sub('\s+', ' ', text)
  3993. # text = re.sub('[/|[()()]', '', text)
  3994. text = cut_single_cn_space(text)
  3995. return text[:20000]
  3996. def count_diffser(pattern, text):
  3997. num = 0
  3998. kw = []
  3999. for p in pattern.split(';'):
  4000. if re.search(p, text):
  4001. num += 1
  4002. kw.append(re.search(p, text).group(0))
  4003. return num, ';'.join(kw)
  4004. def is_contain_winner(extract_json):
  4005. if re.search('win_tenderer', extract_json):
  4006. return True
  4007. else:
  4008. return False
  4009. def is_single_source(bidway, title):
  4010. if re.search('单一来源|单一性采购', title):
  4011. return True
  4012. elif bidway == '单一来源':
  4013. return True
  4014. else:
  4015. return False
  4016. def get_type(title, text):
  4017. if re.search(self.title_type_dic['土地矿产'], title) or re.search(self.type_dic['土地矿产'],
  4018. text): # and re.search('(土地|用地|宗地|地块)(经营权)?(流转|承包|出租|招租|租赁|确权)', text)==None
  4019. if re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title):
  4020. return '采招数据', re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title).group(0)
  4021. return '土地矿产', (re.search(self.title_type_dic['土地矿产'], title) or re.search(self.type_dic['土地矿产'], text)).group(0)
  4022. elif (re.search(self.title_type_dic['拍卖出让'], title) or re.search(self.type_dic['拍卖出让'], text)):
  4023. if re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title):
  4024. return '采招数据', re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title).group(0)
  4025. return '拍卖出让', (re.search(self.title_type_dic['拍卖出让'], title) or re.search(self.type_dic['拍卖出让'], text)).group(0)
  4026. elif re.search(self.title_type_dic['产权交易'], title) or re.search(self.type_dic['产权交易'], text):
  4027. if re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title):
  4028. return '采招数据', re.search(self.title_type_dic['采招数据'], text.strip().split(' ')[0] + title).group(0)
  4029. return '产权交易', (re.search(self.title_type_dic['产权交易'], title) or re.search(self.type_dic['产权交易'], text)).group(0)
  4030. elif re.search(self.title_type_dic['采招数据'], title) or re.search(self.type_dic['采招数据'], title + text):
  4031. return '采招数据', (
  4032. re.search(self.title_type_dic['采招数据'], title) or re.search(self.type_dic['采招数据'], title + text)).group(
  4033. 0)
  4034. elif re.search(self.title_type_dic['新闻资讯'], title):
  4035. if re.search(self.title_type_dic['采招数据'], title +text.strip().split(' ')[0]):
  4036. return '采招数据', re.search(self.title_type_dic['采招数据'], title +text.strip().split(' ')[0]).group(0)
  4037. return '新闻资讯', re.search(self.title_type_dic['新闻资讯'], title).group(0)
  4038. else:
  4039. return '', '没有公告类型关键词,返回空'
  4040. def get_life(title, text):
  4041. title = re.sub('[-()()0-9a-z]|第?[二三四]次公?告?', '', title)
  4042. first_line = text.split()[0] if len(text.split()) > 2 else ''
  4043. if title.strip()[-2:] not in ['公告', '公示'] and 5 < len(first_line) < 50 and first_line[-2:] in ['公告', '公示']:
  4044. # print('title: ', title, first_line)
  4045. title += first_line
  4046. def count_score(l):
  4047. return len(l) + len(set(l)) * 2
  4048. life_kw_title = {}
  4049. life_kw_content = {}
  4050. life_score = {}
  4051. # msc = ""
  4052. # 查找标题每个类别关键词
  4053. for k, v in self.title_life_dic.items():
  4054. k2 = re.sub('[\da-z]', '', k)
  4055. if k2 not in life_kw_title:
  4056. life_kw_title[k2] = []
  4057. for it in re.finditer(v, title):
  4058. life_kw_title[k2].append(it.group(0))
  4059. # 查找正文每个类别关键词
  4060. for k, v in self.life_dic.items():
  4061. k2 = re.sub('[\da-z]', '', k)
  4062. if k2 not in life_kw_content:
  4063. life_kw_content[k2] = {'pos': [], 'neg': []}
  4064. for it in re.finditer(v, text):
  4065. if 'neg' not in k:
  4066. life_kw_content[k2]['pos'].append(it.group(0))
  4067. else:
  4068. life_kw_content[k2]['neg'].append(it.group(0))
  4069. for k2 in life_kw_content:
  4070. life_score[k2] = count_score(life_kw_content[k2]['pos']) - count_score(
  4071. life_kw_content[k2]['neg'])
  4072. life_kw_title = {k: v for k, v in life_kw_title.items() if v != []}
  4073. life_kw_content = {k: v for k, v in life_kw_content.items() if life_score[k] > 0}
  4074. msc = [life_kw_title, life_kw_content, life_score]
  4075. msc = json.dumps(msc, ensure_ascii=False)
  4076. max_score = 0
  4077. life_list = []
  4078. for k in life_score.keys():
  4079. if life_score[k] > max_score:
  4080. max_score = life_score[k]
  4081. life_list = [k]
  4082. elif life_score[k] == max_score and life_score[k] > 0:
  4083. life_list.append(k)
  4084. if '采购意向' in life_kw_title or '采购意向' in life_list:
  4085. if '中标信息' in life_kw_title or '中标信息' in life_list:
  4086. return '中标信息', msc
  4087. elif set(['候选人公示', '合同公告']) & set(life_kw_title) != set():
  4088. return '', msc
  4089. return '采购意向', msc
  4090. elif '招标预告' in life_kw_title or '招标预告' in life_list:
  4091. if '中标信息' in life_kw_title or '中标信息' in life_list:
  4092. return '中标信息', msc
  4093. elif set(['候选人公示', '合同公告']) & set(life_kw_title) != set():
  4094. return '', msc
  4095. return '招标预告', msc
  4096. elif '公告变更' in life_kw_title or '公告变更' in life_list:
  4097. if life_score.get('候选人公示', 0) > 3 or '候选人公示' in life_kw_title:
  4098. return '候选人公示', msc
  4099. elif life_score.get('合同公告', 0) > 3 or '合同公告' in life_kw_title:
  4100. return '合同公告', msc
  4101. elif life_score.get('中标信息', 0) > 3 or '中标信息' in life_kw_title:
  4102. return '中标信息', msc
  4103. elif '招标公告' in life_kw_title and re.search('变更|更正', title[-4:])==None and life_score.get('公告变更', 0) < 4:
  4104. return '招标公告', msc
  4105. return '公告变更', msc
  4106. elif '招标答疑' in life_kw_title or '招标答疑' in life_list:
  4107. if '招标公告' in life_kw_title and life_score.get('招标答疑', 0) < 4:
  4108. return '招标公告', msc
  4109. elif life_score.get('招标答疑', 0) < max_score:
  4110. if max_score > 3 and len(life_list) == 1:
  4111. return life_list[0], msc
  4112. return '', msc
  4113. return '招标答疑', msc
  4114. elif '开标记录' in life_kw_title:
  4115. if '开标结果' in title and is_contain_winner(prem_json):
  4116. return '中标信息', msc
  4117. return '开标记录', msc
  4118. elif '验收合同' in life_kw_title:
  4119. return '验收合同', msc
  4120. elif '候选人公示' in life_kw_title or '候选人公示' in life_list:
  4121. if '招标公告' in life_kw_title and '候选人公示' not in life_kw_title: # and life_score.get('招标公告', 0) > 3
  4122. return '招标公告', msc
  4123. elif '废标公告' in life_kw_title or life_score.get('废标公告', 0) > 5:
  4124. return '废标公告', msc
  4125. return '候选人公示', msc
  4126. elif '合同公告' in life_kw_title or '合同公告' in life_list:
  4127. if '招标公告' in life_kw_title and life_score.get('招标公告', 0) > 3:
  4128. return '招标公告', msc
  4129. elif '废标公告' in life_kw_title or life_score.get('废标公告', 0) > 5:
  4130. return '废标公告', msc
  4131. return '合同公告', msc
  4132. elif '中标信息' in life_kw_title or '中标信息' in life_list:
  4133. if '招标公告' in life_kw_title and '中标信息' not in life_kw_title and life_score.get('招标公告',0) >= life_score.get('中标信息',0): # (life_score.get('招标公告', 0)>2 or life_score.get('中标信息', 0)<4) 0.7886409793924245
  4134. return '招标公告', msc
  4135. elif '废标公告' in life_kw_title or life_score.get('废标公告', 0) > 5:
  4136. return '废标公告', msc
  4137. elif life_score.get('候选人公示', 0) > 3:
  4138. return '候选人公示', msc
  4139. elif life_score.get('合同公告', 0) > 5:
  4140. return '合同公告', msc
  4141. return '中标信息', msc
  4142. elif '废标公告' in life_kw_title or '废标公告' in life_list:
  4143. if life_score.get('招标公告', 0) > 3 and '废标公告' not in life_kw_title:
  4144. return '招标公告', msc
  4145. return '废标公告', msc
  4146. elif '资审结果' in life_kw_title or '资审结果' in life_list:
  4147. return '资审结果', msc
  4148. elif '招标公告' in life_kw_title or '招标公告' in life_list:
  4149. return '招标公告', msc
  4150. return '', msc
  4151. def get_model_inputs(list_sentence):
  4152. list_sentence = sorted(list_sentence, key=lambda x:x.sentence_index)
  4153. token_l = [it.tokens for it in list_sentence]
  4154. tokens = [it for l in token_l for it in l]
  4155. content = ' '.join(tokens[:500])
  4156. data_content, data_title = self.predict_process(docid='', doctitle=title[-50:],
  4157. dochtmlcon=content) # 标题最多取50字
  4158. text_len = len(data_content[0]) if len(data_content[0]) < self.sequen_len else self.sequen_len
  4159. title_len = len(data_title[0]) if len(data_title[0]) < self.title_len else self.title_len
  4160. array_content = embedding(data_content, shape=(len(data_content), self.sequen_len, 128))
  4161. array_title = embedding(data_title, shape=(len(data_title), self.title_len, 128))
  4162. return array_content, array_title ,text_len, title_len, content
  4163. def type_model_predict():
  4164. pred = self.type_sess.run(self.type_softmax,
  4165. feed_dict={
  4166. self.type_title: array_title,
  4167. self.type_content: array_content,
  4168. self.type_mask: [[0] * text_len + [1] * (self.sequen_len - text_len)],
  4169. self.type_mask_title: [[0] * title_len + [1] * (self.title_len - title_len)],
  4170. self.type_prob: 1}
  4171. )
  4172. id = np.argmax(pred, axis=1)[0]
  4173. prob = pred[0][id]
  4174. return id, prob
  4175. def life_model_predict():
  4176. pred = self.lift_sess.run(self.lift_softmax,
  4177. feed_dict={
  4178. self.lift_title: array_title,
  4179. self.lift_content: array_content,
  4180. self.mask: [[0] * text_len + [1] * (self.sequen_len - text_len)],
  4181. self.mask_title: [[0] * title_len + [1] * (self.title_len - title_len)],
  4182. self.lift_prob: 1}
  4183. )
  4184. id = np.argmax(pred, axis=1)[0]
  4185. prob = pred[0][id]
  4186. return id, prob
  4187. def final_change(msc):
  4188. '''
  4189. 修改逻辑:
  4190. 1、中标公告、合同公告无中标人且原始为非中标,返回原类型
  4191. 2、废标公告有中标人且标题无废标关键词,返回中标信息
  4192. 3、答疑公告标题无答疑关键且原始为招标,返回原始类别
  4193. 4、招标公告有中标人且原始为中标,返回中标信息
  4194. 5、预测为招标,原始为预告、意向,返回原始类别
  4195. 6、预测及原始均在变更、答疑,返回原始类别
  4196. 7、预测为采招数据,原始为产权且有关键词,返回原始类别
  4197. 8、废标公告原始为招标、预告且标题无废标关键期,返回原始类别
  4198. 9、若预测为非采招数据且源网为采招数据且有招标关键词返回采招数据
  4199. 10、招标公告有中标人,且标题有直购关键词,改为中标信息
  4200. 11、预测预告,原始为意向、招标且标题无预告关键词,返回原始类别
  4201. '''
  4202. if result['docchannel']['docchannel'] in ['中标信息', '合同公告'] and origin_dic.get(
  4203. original_docchannel, '') in ['招标公告', '采购意向', '招标预告', '公告变更'] and is_contain_winner(
  4204. prem_json)==False and re.search(self.title_life_dic['中标信息'], title)==None:
  4205. result['docchannel']['docchannel'] = origin_dic.get(original_docchannel, '')
  4206. msc += '最终规则修改:中标公告、合同公告无中标人且原始为非中标,返回原类型'
  4207. elif result['docchannel']['docchannel'] == '废标公告' and is_contain_winner(prem_json) and re.search(
  4208. self.title_life_dic['废标公告'], title) == None:
  4209. result['docchannel']['docchannel'] = '中标信息'
  4210. msc += '最终规则修改:预测为废标却有中标人且标题无废标关键词改为中标信息;'
  4211. elif result['docchannel']['docchannel'] in ['招标答疑'] and re.search(
  4212. self.title_life_dic['招标答疑'], title) == None and origin_dic.get(
  4213. original_docchannel, '') in ['招标公告', '采购意向', '招标预告']:
  4214. result['docchannel']['docchannel'] = origin_dic.get(original_docchannel, '')
  4215. msc += '最终规则修改:答疑公告标题无答疑关键且原始为招标,返回原始类别;'
  4216. elif result['docchannel']['docchannel'] == '招标公告' and is_contain_winner(prem_json) and origin_dic.get(
  4217. original_docchannel, '') == '中标信息':
  4218. result['docchannel']['docchannel'] = '中标信息'
  4219. msc += '最终规则修改:预测为招标公告却有中标人且原始为中标改为中标信息;'
  4220. elif result['docchannel']['docchannel'] in ['招标公告'] and origin_dic.get(
  4221. original_docchannel, '') in ['采购意向', '招标预告']:
  4222. result['docchannel']['docchannel'] = origin_dic.get(original_docchannel, '')
  4223. msc += '最终规则修改:预测为招标,原始为预告、意向,返回原始类别'
  4224. elif result['docchannel']['docchannel'] in ['招标预告'] and origin_dic.get(
  4225. original_docchannel, '') in ['采购意向', '招标公告'] and re.search(
  4226. self.title_life_dic['招标预告'], title)==None:
  4227. result['docchannel']['docchannel'] = origin_dic.get(original_docchannel, '')
  4228. msc += '最终规则修改:预测预告,原始为意向、招标且标题无预告关键词,返回原始类别'
  4229. elif result['docchannel']['docchannel'] in ['招标答疑', '公告变更'] and origin_dic.get(
  4230. original_docchannel, '') in ['招标答疑', '公告变更']:
  4231. result['docchannel']['docchannel'] = origin_dic.get(original_docchannel, '')
  4232. msc += '最终规则修改:预测及原始均在答疑、变更,返回原始类别'
  4233. elif result['docchannel']['doctype'] == '采招数据' and origin_dic.get(
  4234. original_docchannel, '') in ['产权交易', '土地矿产'] and re.search('产权|转让|受让|招租|出租|承租|竞价|资产', text):
  4235. result['docchannel']['doctype'] = origin_dic.get(original_docchannel, '')
  4236. msc += '最终规则修改:预测为采招数据,原始为产权且有关键词,返回原始类别'
  4237. elif result['docchannel']['docchannel'] == '废标公告' and origin_dic.get(
  4238. original_docchannel, '') in ['招标公告', '采购意向', '招标预告'] and re.search(
  4239. self.title_life_dic['废标公告'], title) == None:
  4240. result['docchannel']['docchannel'] = origin_dic.get(original_docchannel, '')
  4241. msc += '最终规则修改:废标公告原始为招标、预告且标题无废标关键期,返回原始类别;'
  4242. elif result['docchannel']['docchannel'] in ['招标公告', '招标预告'] and is_contain_winner(
  4243. prem_json) and re.search('直购', title):
  4244. result['docchannel']['docchannel'] = '中标信息'
  4245. msc += "最终规则修改:预测为招标却有中标人且标题有直购关键词返回中标"
  4246. if result['docchannel']['doctype'] in ['产权交易', '土地矿产', '拍卖出让'] and origin_dic.get(
  4247. original_docchannel, '') not in ['产权交易', '土地矿产', '拍卖出让'] \
  4248. and (re.search(self.title_type_dic['采招数据'], title) or re.search('工程|服务|采购|询价|磋商', title) or re.search('(采购|招投?标|投标)(信息|内容|项目|公告|数量|人|单位|方式)|(建设|工程|服务|施工|监理|勘察|设计)项目|(%s)'%self.type_dic['采招数据'], text)):
  4249. result['docchannel']['doctype'] = '采招数据'
  4250. msc += ' 最终规则修改:预测为非采招数据,原始为采招数据且有招标关键词,返回采招数据'
  4251. elif result['docchannel']['doctype'] in ['土地矿产'] and origin_dic.get(original_docchannel, '') in ['拍卖出让', '产权交易']:
  4252. if origin_dic.get(original_docchannel, '') in ['拍卖出让'] and (re.search(self.title_type_dic['拍卖出让'], title) or re.search(self.type_dic['拍卖出让'], text)):
  4253. result['docchannel']['doctype'] = '拍卖出让'
  4254. msc += "最终规则修改:预测为土地矿产原始为拍卖且有拍卖关键词,返回拍卖"
  4255. elif (re.search(self.title_type_dic['产权交易'], title) or re.search(self.type_dic['产权交易'], text)):
  4256. result['docchannel']['doctype'] = '产权交易'
  4257. msc += "最终规则修改:预测为土地矿产原始为产权交易且有产权交易关键词,返回产权交易"
  4258. '''下面是新格式增加返回字段'''
  4259. if result['docchannel']['docchannel'] != '': # 预测到生命周期的复制到life_docchannel,否则用数据源结果
  4260. result['docchannel']['life_docchannel'] = result['docchannel']['docchannel']
  4261. else:
  4262. result['docchannel']['life_docchannel'] = origin_dic.get(original_docchannel, '原始类别')
  4263. return msc
  4264. not_extract_dic = {
  4265. 104: '招标文件',
  4266. 106: '法律法规',
  4267. 107: '新闻资讯',
  4268. 108: '拟建项目',
  4269. 109: '展会推广',
  4270. 110: '企业名录',
  4271. 111: '企业资质',
  4272. 112: '全国工程人员',
  4273. 113: '业主采购'
  4274. }
  4275. origin_dic = {51: '公告变更',
  4276. 52: '招标公告',
  4277. 101: '中标信息',
  4278. 102: '招标预告',
  4279. 103: '招标答疑',
  4280. 104: '招标文件',
  4281. 105: '资审结果',
  4282. 106: '法律法规',
  4283. 107: '新闻资讯',
  4284. 108: '拟建项目',
  4285. 109: '展会推广',
  4286. 110: '企业名录',
  4287. 111: '企业资质',
  4288. 112: '全国工程',
  4289. 113: '业主采购',
  4290. 114: '采购意向',
  4291. 115: '拍卖出让',
  4292. 116: '土地矿产',
  4293. 117: '产权交易',
  4294. 118: '废标公告',
  4295. 119: '候选人公示',
  4296. 120: '合同公告'}
  4297. if original_docchannel in not_extract_dic:
  4298. return {'docchannel': {'docchannel': '', 'doctype': not_extract_dic[original_docchannel], 'life_docchannel': origin_dic.get(original_docchannel, '原始类别')}}, '公告类别不在提取范围'
  4299. if web_source_no in ['02104-7', '04733', 'DX007628-6']: # 这些数据源无法识别
  4300. return {'docchannel': {'docchannel': '', 'doctype': '采招数据', 'life_docchannel': origin_dic.get(original_docchannel, '原始类别')}}, '此数据源公告分类不明确,返回数据源类别'
  4301. if original_docchannel == 303:
  4302. return {'docchannel': {'docchannel': '处罚公告', 'doctype': '处罚公告', 'life_docchannel': '处罚公告'}}, "源类别为处罚公告"
  4303. title = re.sub('[^\u4e00-\u9fa5]+|出租车', '', title)
  4304. if len(title) > 50:
  4305. title = title[:20] + title[-30:]
  4306. text = html2text(html)
  4307. prem_json = json.dumps(prem, ensure_ascii=False)
  4308. result = {'docchannel': {'docchannel': '', 'doctype': ''}}
  4309. doc_type, type_kw = get_type(title, text)
  4310. # doc_life, life_kw = get_life(title, text, prem_json, bidway, original_docchannel)
  4311. doc_life, life_kw = get_life(title, text)
  4312. if doc_type in self.title_type_dic:
  4313. result['docchannel']['doctype'] = doc_type
  4314. if doc_life in self.title_life_dic:
  4315. result['docchannel']['docchannel'] = doc_life
  4316. # print('channel正则预测结果:', result)
  4317. msc = '正则结果:类型:%s, 关键词:%s, 周期:%s, 关键词:%s'%(doc_type, type_kw,doc_life, life_kw)+'\n'+'模型结果:'
  4318. # print('类型:%s, 关键词:%s, 周期:%s, 关键词:%s'%(doc_type, type_kw,doc_life, life_kw))
  4319. if doc_type == "" or doc_life == "" or (doc_type != '采招数据' and origin_dic.get(original_docchannel, '原始类别') in ['招标公告', '中标信息', '招标预告', '采购意向']):
  4320. array_content, array_title, text_len, title_len, content = get_model_inputs(list_sentence)
  4321. if doc_type =="" or (doc_type != '采招数据' and origin_dic.get(original_docchannel, '原始类别') in ['招标公告', '中标信息', '招标预告', '采购意向']):
  4322. type_id, type_prob = type_model_predict()
  4323. type_model = self.id2type[type_id]
  4324. if type_model == '新闻资讯' and doc_life!='': # 修复bug 78584245 "docchannel": "合同公告", "doctype": "新闻资讯",
  4325. result['docchannel']['doctype'] = '采招数据'
  4326. msc += '模型结果为新闻资讯,生命周期不为空,改为采招数据;'
  4327. else:
  4328. result['docchannel']['doctype'] = type_model
  4329. msc += type_model + ' 概率:%.4f;'%type_prob
  4330. # print('公告类别:', self.id2type[id], '概率:',prob)
  4331. # if id == 0:
  4332. if doc_life=="" and result['docchannel']['doctype'] not in ['', '新闻资讯']:
  4333. if len(text)>150 and re.search(self.kws, content):
  4334. life_id, life_prob = life_model_predict()
  4335. if life_prob>=0.8:
  4336. life_model = self.id2life[life_id]
  4337. result['docchannel']['docchannel'] = life_model
  4338. msc += life_model + ' 概率:%.4f;\n'%life_prob
  4339. msc = final_change(msc)
  4340. # print('channel ', msc)
  4341. return result, msc
  4342. # 保证金支付方式提取
  4343. class DepositPaymentWay():
  4344. def __init__(self,):
  4345. self.pt = '(保证金的?(交纳|缴纳|应按下列|入账|支付)方式)[::]*([^,。]{,60})'
  4346. self.pt2 = '保证金(必?须以|必?须?通过|以)(.{,8})方式'
  4347. kws = ['银行转账', '公?对公方?式?转账', '对公转账', '柜台转账', '(线上|网上)自?行?(缴纳|交纳|缴退|收退)',
  4348. '网上银行支付', '现金存入', '直接缴纳', '支票', '汇票', '本票', '电汇', '转账', '汇款', '随机码',
  4349. '入账', '基本账户转出', '基本账户汇入', '诚信库中登记的账户转出',
  4350. '银行保函', '电子保函', '担保函', '保证保险', '合法担保机构出具的担保', '金融机构、担保机构出具的保函']
  4351. self.kws = sorted(kws, key=lambda x: len(x), reverse=True)
  4352. def predict(self,content):
  4353. pay_way = {'deposit_patment_way':''}
  4354. result = []
  4355. pay = re.search(self.pt, content)
  4356. if pay:
  4357. # print(pay.group(0))
  4358. pay = pay.group(3)
  4359. for it in re.finditer('|'.join(self.kws), pay):
  4360. result.append(it.group(0))
  4361. pay_way['deposit_patment_way'] = ';'.join(result)
  4362. return pay_way
  4363. pay = re.search(self.pt2, content)
  4364. if pay:
  4365. # print(pay.group(0))
  4366. pay = pay.group(2)
  4367. for it in re.finditer('|'.join(self.kws), pay):
  4368. result.append(it.group(0))
  4369. pay_way['deposit_patment_way'] = ';'.join(result)
  4370. return pay_way
  4371. else:
  4372. return pay_way
  4373. # 项目标签
  4374. class ProjectLabel():
  4375. def __init__(self, ):
  4376. self.keyword_list = self.get_label_keywords()
  4377. self.kongjing_keyword_list = self.get_kongjing_keywords()
  4378. def get_label_keywords(self):
  4379. import csv
  4380. path = os.path.dirname(__file__)+'/project_label_keywords.csv'
  4381. with open(path, 'r',encoding='utf-8') as f:
  4382. reader = csv.reader(f)
  4383. key_word_list = []
  4384. for r in reader:
  4385. if r[0] == '类型':
  4386. continue
  4387. type = r[0]
  4388. key_wrod = r[1]
  4389. key_paichuci = str(r[2])
  4390. key_paichuci = key_paichuci if key_paichuci and key_paichuci != 'nan' else ""
  4391. type_paichuci = str(r[3])
  4392. type_paichuci = type_paichuci if type_paichuci and type_paichuci != 'nan' else ""
  4393. key_word_list.append((type, key_wrod, key_paichuci, type_paichuci))
  4394. return key_word_list
  4395. def get_kongjing_keywords(self):
  4396. import csv
  4397. path = os.path.dirname(__file__)+'/kongjing_label_keywords.csv'
  4398. with open(path, 'r',encoding='utf-8') as f:
  4399. reader = csv.reader(f)
  4400. key_word_list = []
  4401. for r in reader:
  4402. if r[0] == '关键词':
  4403. continue
  4404. key_wrod = r[0]
  4405. key_wrod2 = str(r[1])
  4406. key_wrod2 = key_wrod2 if key_wrod2 and key_wrod2 != 'nan' else ""
  4407. search_type = r[2]
  4408. info_type_list = str(r[3])
  4409. info_type_list = info_type_list if info_type_list and info_type_list != 'nan' else ""
  4410. key_word_list.append((key_wrod, key_wrod2, search_type, info_type_list))
  4411. return key_word_list
  4412. def predict(self, doctitle,product,project_name,prem):
  4413. doctitle = doctitle if doctitle else ""
  4414. product = product if product else ""
  4415. product = ",".join(set(product.split(','))) # 产品词去重
  4416. project_name = project_name if project_name else ""
  4417. tenderee = ""
  4418. agency = ""
  4419. sub_project_names = [] # 标段名称
  4420. try:
  4421. for k,v in prem[0]['prem'].items():
  4422. # sub_project_names.append(k)
  4423. sub_project_names.append(v.get("name",""))
  4424. for link in v['roleList']:
  4425. if link['role_name'] == 'tenderee' and tenderee == "":
  4426. tenderee = link['role_text']
  4427. if link['role_name'] == 'agency' and agency == "":
  4428. agency = link['role_text']
  4429. except Exception as e:
  4430. # print('解析prem 获取招标人、代理人出错')
  4431. pass
  4432. sub_project_names = ";".join(sub_project_names)
  4433. # 核心字段:标题+产品词+项目名称+标段名称
  4434. main_text = ",".join([doctitle, product, project_name, sub_project_names])
  4435. # 剔除 招标单位、代理机构名称
  4436. if tenderee:
  4437. doctitle = doctitle.replace(tenderee, " ")
  4438. main_text = main_text.replace(tenderee, " ")
  4439. if agency:
  4440. doctitle = doctitle.replace(agency, " ")
  4441. main_text = main_text.replace(agency, " ")
  4442. doctitle_dict = dict()
  4443. main_text_dict = dict()
  4444. for item in self.keyword_list:
  4445. _type = item[0]
  4446. key_wrod = item[1]
  4447. # 关键词排除词
  4448. key_paichuci = item[2]
  4449. key_paichuci_s = "|".join(key_paichuci.split('、'))
  4450. # 类型排除词
  4451. type_paichuci = item[3]
  4452. if type_paichuci:
  4453. paichuci_split = type_paichuci.split('、')
  4454. if re.search("|".join(paichuci_split), main_text):
  4455. continue
  4456. if doctitle:
  4457. if key_wrod in doctitle:
  4458. if not key_paichuci_s or (key_paichuci_s and not re.search(key_paichuci_s, doctitle)):
  4459. key_wrod_count1 = doctitle.count(key_wrod)
  4460. if _type not in doctitle_dict:
  4461. # doctitle_dict[_type] = {'关键词': [], '排除词': type_paichuci}
  4462. doctitle_dict[_type] = []
  4463. doctitle_dict[_type].append((key_wrod, key_wrod_count1))
  4464. if main_text:
  4465. if key_wrod in main_text:
  4466. if not key_paichuci_s or (key_paichuci_s and not re.search(key_paichuci_s, main_text)):
  4467. key_wrod_count2 = main_text.count(key_wrod)
  4468. if _type not in main_text_dict:
  4469. # main_text_dict[_type] = {'关键词': [], '排除词': type_paichuci}
  4470. main_text_dict[_type] = []
  4471. main_text_dict[_type].append((key_wrod, key_wrod_count2))
  4472. # 排序 doctitle
  4473. for k, v in doctitle_dict.items():
  4474. doctitle_dict[k].sort(key=lambda x: x[1], reverse=True)
  4475. # 按匹配次数保留前10个标签
  4476. if len(doctitle_dict) > 10:
  4477. doctitle_labels = [(k, sum(w[1] for w in doctitle_dict[k])) for k in doctitle_dict]
  4478. doctitle_labels.sort(key=lambda x: x[1], reverse=True)
  4479. for item in doctitle_labels[10:]:
  4480. doctitle_dict.pop(item[0])
  4481. # main_text
  4482. pop_list = []
  4483. for k, v in main_text_dict.items():
  4484. if sum([j[1] for j in main_text_dict[k]]) == 1:
  4485. # 关键词匹配次数等于1的标签
  4486. pop_list.append(k)
  4487. main_text_dict[k].sort(key=lambda x: x[1], reverse=True)
  4488. # 核心字段标签,若存在同一个标签的关键词匹配次数大于1,则只保留关键词匹配次数大于1的标签,关键词匹配次数等于1的标签不要
  4489. if len(pop_list) < len(main_text_dict):
  4490. for k in pop_list:
  4491. main_text_dict.pop(k)
  4492. # 按匹配次数保留前10个标签
  4493. if len(main_text_dict) > 10:
  4494. main_text_labels = [(k, sum(w[1] for w in main_text_dict[k])) for k in main_text_dict]
  4495. main_text_labels.sort(key=lambda x: x[1], reverse=True)
  4496. for item in main_text_labels[10:]:
  4497. main_text_dict.pop(item[0])
  4498. return {"标题":doctitle_dict,"核心字段":main_text_dict}
  4499. def predict_other(self,project_label,industry,doctitle,project_name,product,list_articles):
  4500. # doctextcon 取正文内容
  4501. doctextcon = list_articles[0].content.split('##attachment##')[0]
  4502. info_type = industry.get('industry',{}).get("class_name","")
  4503. doctitle = doctitle if doctitle else ""
  4504. product = product if product else ""
  4505. product = ",".join(set(product.split(','))) # 产品词去重
  4506. project_name = project_name if project_name else ""
  4507. get_kongjing_label = False
  4508. keywords_list = []
  4509. for item in self.kongjing_keyword_list:
  4510. key_wrod = item[0]
  4511. key_wrod2 = item[1]
  4512. search_type = item[2]
  4513. info_type_list = item[3]
  4514. info_type_list = info_type_list.split("|") if info_type_list else []
  4515. search_text = ""
  4516. if search_type=='正文':
  4517. search_text = ",".join([doctextcon,doctitle,project_name,product])
  4518. elif search_type=='产品':
  4519. search_text = ",".join([doctitle,project_name,product])
  4520. if search_type=='行业':
  4521. # ’行业’类型直接用info_type匹配关键词
  4522. if info_type==key_wrod:
  4523. # 匹配关键词记录
  4524. keywords_list.append(key_wrod)
  4525. get_kongjing_label = True
  4526. # break
  4527. else:
  4528. if key_wrod in search_text:
  4529. if key_wrod2 and key_wrod2 not in search_text:
  4530. continue
  4531. if info_type_list and info_type not in info_type_list:
  4532. continue
  4533. # 匹配关键词记录
  4534. if key_wrod2:
  4535. keywords_list.append(key_wrod+'+'+key_wrod2)
  4536. else:
  4537. keywords_list.append(key_wrod)
  4538. get_kongjing_label = True
  4539. # break
  4540. if get_kongjing_label:
  4541. project_label["核心字段"]["空净通"] = [[word,1] for word in keywords_list][:10]
  4542. return project_label
  4543. # 产权分类二级标签
  4544. class PropertyLabel():
  4545. '''
  4546. 产权分类二级标签
  4547. 全部类别:
  4548. 股权, 债权, 知识产权, 矿权, 房产, 土地, 交通运输工具, 闲置物资、设备、材料, 其他
  4549. '''
  4550. def __init__(self, ):
  4551. car = "比亚迪|奇瑞|奥迪|宝马|菲尼迪|雷克萨斯|三菱|铃木|马自达|奔驰|劳斯莱斯|北京现代|" \
  4552. "宾利|兰博基尼|布加迪|保时捷|斯柯达|雪佛兰|别克|凯迪拉克|庞蒂亚克|克尔维特|福特|林肯|克莱斯勒|道奇|JEEP品牌"
  4553. self.keywords_dict = {
  4554. "房产": "房产|住宅|公寓|商铺|车位|写字楼|办公楼|别墅|综合楼|在建工程|厂房|车库|车房|房转让|房屋|商品房|商业用房|"
  4555. "宅基地|[\u4e00-\u9fa5]{,2}用房|店面|商业房|门[面市]房|仓库|铺位|地下室|\d号?(房|室|门市|门面|商?铺|单元|户)|不动产|"
  4556. "自建房|铺面|商务楼|商住楼|阁楼|(杂物|储物|储藏)(房|间|室)|套房|[\da-zA-Z](栋|棟|幢|层|座|号?楼|单元)\d{1,4}(号|房|室|商?铺|户)|"
  4557. "[\da-zA-Z](栋|棟|幢|层|座|号?楼|单元)\d{2,}|门面+转让|楼+变卖|房地产",
  4558. "交通运输工具": "车辆|轿车|汽车(?!用品|库|位|衡)|公车|客车|货车|面包车|SUV|新能源车|二手车|车辆|商用车|机动车|观光车|巴车|"
  4559. "船舶|四驱" + "|" + car,
  4560. "股权": "\d.?股|股权(?!交易中心)|\d%(比例)?.?股|\d万.?股|\d.?元/股|增资(?!源)|扩股|股(转让|出售)|百分之[一二三四五六七八九十]{1,3}股",
  4561. "债权": "债权|债权转让|债权人|债务人|原债权人|新债权人|金融资产",
  4562. "土地": "住宅用地|商业用地|工业用地|国有[\u4e00-\u9fa5]{,3}[土用]地|集体土地|划拨|流转|地块编号|"
  4563. "土地使用权证|土地经营权|土地证|土地[发承]包|[\u4e00-\u9fa5]{,2}用地|土地\d{1,3}(亩|公?顷)|\d{1,3}(亩|公?顷)(使用|经营)权|"
  4564. "承包土地|(地块|土地)承包|水面经营权|[鱼水]塘|鱼池|(水面|旱田)[\u4e00-\u9fa5]{,3}[发承]包|水面资源|(水面|水田)[\u4e00-\u9fa5]{,3}权|"
  4565. "四荒|林地|林场|林木所有权|采伐权|水利设施所有权|水利设施使用权|海域|滩涂|林业产权|旱田|水田|机动田|机动地|耕地|荒地|农田|"
  4566. "苗圃地|塘口",
  4567. "矿权": "矿权|矿业权|采矿许可|探矿权|采矿权|开采权|矿产资源处置|矿[\u4e00-\u9fa5]{1,3}开[发采]",
  4568. "知识产权": "知识产权(?!局)|商标|专利|著作权|版权|商业秘密|科研成果",
  4569. "闲置物资、设备、材料": "(废旧|报废|废|闲置|二手|淘汰)(物资|资产|机械|设备|仪器|汽车|车|钢铁|钢材|钢|金属|塑料|材料|导管|漆|渣|有色|品|[\u4e00-\u9fa5]{,2}车|偶头)|"
  4570. "(金属|机械|设备|仪器|汽车|钢铁|钢材|钢|塑料|有色|)废料|废液|废旧|报废|边角料|残次品|(热轧|冷轧|酸洗|镀铝|热镀|镀锌|镀镁)|"
  4571. "机[器械]设备|医疗设备|生产设备|办公设备|仪器|仪表|设备出租|设备租赁|拖拉机|收割机|插秧机|挖机|车床|挖掘机|电机|"
  4572. "戒指|弃渣|电解质块|茶杯|装置|花瓶|女表|手表|男表|硫磺|物资|书画|茶叶|油茶|红茶|[茗名]茶|白酒|红酒|酒水|酒品|名酒|毛石|[石金木铁矿铜锌铝钢]料|"
  4573. "零部件",
  4574. "经营权": "经营权",
  4575. "租赁": "房+租|市场+续约|资产+出租|租赁|续租|招租|出租|租金|房租"
  4576. }
  4577. self.cqjy_keywords = self.get_cqjy_keywords()
  4578. self.score_idx = ["股权", "债权", "知识产权", "矿权", "房产", "土地", "交通运输工具", "闲置物资、设备、材料"]
  4579. def get_cqjy_keywords(self):
  4580. import csv
  4581. path = os.path.dirname(__file__)+'/property_label_products.csv'
  4582. with open(path, 'r',encoding='utf-8') as f:
  4583. reader = csv.reader(f)
  4584. key_word_list = []
  4585. for r in reader:
  4586. if r[0] == 'product':
  4587. continue
  4588. key_wrod = r[0]
  4589. _type = r[1]
  4590. key_word_list.append((_type, key_wrod))
  4591. return key_word_list
  4592. def get_type(self, text):
  4593. keyword_list = []
  4594. for key, value in self.keywords_dict.items():
  4595. keyword = "|".join([i for i in value.split("|") if '+' not in i])
  4596. keyword2 = [i for i in value.split("|") if '+' in i]
  4597. if re.search(keyword, text):
  4598. re1 = [i for i in re.finditer(keyword, text)][-1]
  4599. keyword_list.append((key, re1.start()))
  4600. else:
  4601. # 组合词 查询
  4602. for k in keyword2:
  4603. k1, k2 = k.split('+')
  4604. if re.search(k1, text) and re.search(k2, text):
  4605. keyword_list.append((key, re.search(k2, text).start()))
  4606. break
  4607. return keyword_list
  4608. def get_type2(self, text, cqjy_type_list):
  4609. have_type = [i[0] for i in cqjy_type_list]
  4610. for item in self.cqjy_keywords:
  4611. _type = item[0]
  4612. key_wrod = item[1]
  4613. if _type not in have_type:
  4614. if '+' in key_wrod:
  4615. k1, k2 = key_wrod.split('+')
  4616. if re.search(k1, text) and re.search(k2, text):
  4617. cqjy_type_list.append((_type, re.search(k2, text).start()))
  4618. have_type.append(_type)
  4619. else:
  4620. if key_wrod in text:
  4621. cqjy_type_list.append((_type, text.index(key_wrod)))
  4622. have_type.append(_type)
  4623. return cqjy_type_list
  4624. def predict(self, doctitle,product,project_name,prem,channel_dic):
  4625. docchannel = channel_dic['docchannel']['doctype']
  4626. # print('docchannel',docchannel)
  4627. if docchannel not in ['土地矿产', '拍卖出让', '产权交易']:
  4628. return ""
  4629. doctitle = doctitle if doctitle else ""
  4630. product = product if product else ""
  4631. product = ",".join(set(product.split(','))) # 产品词去重
  4632. project_name = project_name if project_name else ""
  4633. tenderee = ""
  4634. agency = ""
  4635. try:
  4636. for k,v in prem[0]['prem'].items():
  4637. for link in v['roleList']:
  4638. if link['role_name'] == 'tenderee' and tenderee == "":
  4639. tenderee = link['role_text']
  4640. if link['role_name'] == 'agency' and agency == "":
  4641. agency = link['role_text']
  4642. except Exception as e:
  4643. # print('解析prem 获取招标人、代理人出错')
  4644. pass
  4645. cqjy_type = []
  4646. idx = 0
  4647. for text in [doctitle, project_name, product]:
  4648. if tenderee:
  4649. text = text.replace(tenderee, "")
  4650. if agency:
  4651. text = text.replace(agency, "")
  4652. cqjy_type = self.get_type(text)
  4653. if not cqjy_type:
  4654. cqjy_type = self.get_type2(text, cqjy_type)
  4655. idx += 1
  4656. if idx == 2: # project_name
  4657. if len(re.split("[,、]", text)) > 9:
  4658. cqjy_type = []
  4659. if idx == 3: # product
  4660. if len(text.split(",")) > 15:
  4661. cqjy_type = []
  4662. if cqjy_type:
  4663. break
  4664. cqjy_type2 = [i[0] for i in cqjy_type]
  4665. if cqjy_type:
  4666. # 类别优先级调整
  4667. if "租赁" in cqjy_type2:
  4668. cqjy_type2 = ['租赁']
  4669. elif "经营权" in cqjy_type2:
  4670. cqjy_type2 = ['经营权']
  4671. elif "股权" in cqjy_type2 or "债权" in cqjy_type2 or "知识产权" in cqjy_type2:
  4672. cqjy_type.sort(key=lambda x: self.score_idx.index(x[0]))
  4673. cqjy_type = cqjy_type[0]
  4674. cqjy_type2 = [cqjy_type[0]]
  4675. elif len(cqjy_type2) == 2 and "房产" in cqjy_type2 and "土地" in cqjy_type2:
  4676. cqjy_type2 = ['房产']
  4677. else:
  4678. # 权重排序,取第一位
  4679. if idx in [1, 2]: # doctitle, project_name
  4680. cqjy_type.sort(key=lambda x: x[1], reverse=True)
  4681. cqjy_type = cqjy_type[0]
  4682. cqjy_type2 = [cqjy_type[0]]
  4683. else:
  4684. cqjy_type.sort(key=lambda x: self.score_idx.index(x[0]))
  4685. cqjy_type = cqjy_type[0]
  4686. cqjy_type2 = [cqjy_type[0]]
  4687. cqjy_type2 = ",".join(cqjy_type2)
  4688. if not cqjy_type2:
  4689. cqjy_type2 = '其他'
  4690. return cqjy_type2
  4691. # 总价单价提取
  4692. class TotalUnitMoney:
  4693. def __init__(self):
  4694. pass
  4695. def predict(self, list_sentences, list_entitys):
  4696. for i in range(len(list_entitys)):
  4697. list_entity = list_entitys[i]
  4698. # 总价单价
  4699. for _entity in list_entity:
  4700. if _entity.entity_type == 'money':
  4701. word_of_sentence = list_sentences[i][_entity.sentence_index].sentence_text
  4702. # 总价在中投标金额中
  4703. if _entity.label == 1:
  4704. result = extract_total_money(word_of_sentence,
  4705. _entity.entity_text,
  4706. [_entity.wordOffset_begin, _entity.wordOffset_end])
  4707. if result:
  4708. _entity.is_total_money = 1
  4709. # 单价在普通金额中
  4710. else:
  4711. result = extract_unit_money(word_of_sentence,
  4712. _entity.entity_text,
  4713. [_entity.wordOffset_begin, _entity.wordOffset_end])
  4714. if result:
  4715. _entity.is_unit_money = 1
  4716. # print("total_unit_money", _entity.entity_text,
  4717. # _entity.is_total_money, _entity.is_unit_money)
  4718. # 行业分类
  4719. class IndustryPredictor():
  4720. def __init__(self,):
  4721. self.model_path = os.path.dirname(__file__)+ '/industry_model'
  4722. self.id2lb = {0: '专业施工', 1: '专用仪器仪表', 2: '专用设备修理', 3: '互联网信息服务', 4: '互联网安全服务', 5: '互联网平台', 6: '互联网接入及相关服务', 7: '人力资源服务',
  4723. 8: '人造原油', 9: '仓储业', 10: '仪器仪表', 11: '仪器仪表修理', 12: '会计、审计及税务服务', 13: '会议、展览及相关服务', 14: '住宅、商业用房',
  4724. 15: '体育场地设施管理', 16: '体育组织', 17: '体育设备', 18: '保险服务', 19: '信息处理和存储支持服务', 20: '信息技术咨询服务',
  4725. 21: '信息系统集成和物联网技术服务', 22: '修缮工程', 23: '健康咨询', 24: '公路旅客运输', 25: '其他专业咨询与调查', 26: '其他专业技术服务',
  4726. 27: '其他交通运输设备', 28: '其他公共设施管理', 29: '其他土木工程建筑', 30: '其他工程服务', 31: '其他建筑建材', 32: '其他运输业', 33: '农业和林业机械',
  4727. 34: '农业服务', 35: '农产品', 36: '农副食品,动、植物油制品', 37: '出版业', 38: '办公消耗用品及类似物品', 39: '办公设备', 40: '化学原料及化学制品',
  4728. 41: '化学纤维', 42: '化学药品和中药专用设备', 43: '医疗设备', 44: '医药品', 45: '卫星传输服务', 46: '卫生', 47: '印刷服务', 48: '图书和档案',
  4729. 49: '图书档案设备', 50: '图书馆与档案馆', 51: '土地管理业', 52: '地质勘查', 53: '地震服务', 54: '场馆、站港用房', 55: '城市公共交通运输',
  4730. 56: '塑料制品、半成品及辅料', 57: '天然石料', 58: '娱乐设备', 59: '婚姻服务', 60: '安全保护服务', 61: '安全生产设备', 62: '家具用具',
  4731. 63: '家用电器修理', 64: '工业、生产用房', 65: '工业与专业设计及其他专业技术服务', 66: '工矿工程建筑', 67: '工程技术与设计服务', 68: '工程机械',
  4732. 69: '工程监理服务', 70: '工程评价服务', 71: '工程造价服务', 72: '市场调查', 73: '广告业', 74: '广播', 75: '广播、电视、电影设备',
  4733. 76: '广播电视传输服务', 77: '废弃资源综合利用业', 78: '建筑涂料', 79: '建筑物、构筑物附属结构', 80: '建筑物拆除和场地准备活动', 81: '建筑装饰和装修业',
  4734. 82: '录音制作', 83: '影视节目制作', 84: '房地产中介服务', 85: '房地产开发经营', 86: '房地产租赁经营', 87: '房屋租赁', 88: '招标代理',
  4735. 89: '探矿、采矿、选矿和造块设备', 90: '政法、检测专用设备', 91: '教育服务', 92: '教育设备', 93: '文物及非物质文化遗产保护', 94: '文物和陈列品',
  4736. 95: '文艺创作与表演', 96: '文艺设备', 97: '新闻业', 98: '旅行社及相关服务', 99: '日杂用品', 100: '有色金属冶炼及压延产品', 101: '有色金属矿',
  4737. 102: '木材、板材等', 103: '木材采集和加工设备', 104: '机械设备', 105: '机械设备经营租赁', 106: '林业产品', 107: '林业服务', 108: '架线和管道工程建筑',
  4738. 109: '核工业专用设备', 110: '橡胶制品', 111: '殡葬服务', 112: '殡葬设备及用品', 113: '气象服务', 114: '水上交通运输设备', 115: '水上运输业',
  4739. 116: '水利和水运工程建筑', 117: '水工机械', 118: '水文服务', 119: '水资源管理', 120: '污水处理及其再生利用', 121: '汽车、摩托车修理与维护',
  4740. 122: '法律服务', 123: '洗染服务', 124: '测绘地理信息服务', 125: '海洋仪器设备', 126: '海洋工程建筑', 127: '海洋服务', 128: '消防设备',
  4741. 129: '清洁服务', 130: '渔业产品', 131: '渔业服务', 132: '炼焦和金属冶炼轧制设备', 133: '烟草加工设备', 134: '热力生产和供应', 135: '焦炭及其副产品',
  4742. 136: '煤炭采选产品', 137: '燃气生产和供应业', 138: '物业管理', 139: '特种用途动、植物', 140: '环保咨询', 141: '环境与生态监测检测服务',
  4743. 142: '环境污染防治设备', 143: '环境治理业', 144: '玻璃及其制品', 145: '理发及美容服务', 146: '生态保护', 147: '电信',
  4744. 148: '电力、城市燃气、蒸汽和热水、水', 149: '电力供应', 150: '电力工业专用设备', 151: '电力工程施工', 152: '电力生产', 153: '电子和通信测量仪器',
  4745. 154: '电工、电子专用生产设备', 155: '电影放映', 156: '电气安装', 157: '电气设备', 158: '电气设备修理', 159: '畜牧业服务', 160: '监控设备',
  4746. 161: '石油制品', 162: '石油和化学工业专用设备', 163: '石油和天然气开采产品', 164: '石油天然气开采专用设备', 165: '研究和试验发展', 166: '社会工作',
  4747. 167: '社会经济咨询', 168: '科技推广和应用服务业', 169: '科研、医疗、教育用房', 170: '管道和设备安装', 171: '粮油作物和饲料加工设备', 172: '纸、纸制品及印刷品',
  4748. 173: '纺织原料、毛皮、被服装具', 174: '纺织设备', 175: '绿化管理', 176: '缝纫、服饰、制革和毛皮加工设备', 177: '航空器及其配套设备', 178: '航空客货运输',
  4749. 179: '航空航天工业专用设备', 180: '节能环保工程施工', 181: '装卸搬运', 182: '计算机和办公设备维修', 183: '计算机设备', 184: '计量标准器具及量具、衡器',
  4750. 185: '货币处理专用设备', 186: '货币金融服务', 187: '质检技术服务', 188: '资本市场服务', 189: '车辆', 190: '边界勘界和联检专用设备', 191: '运行维护服务',
  4751. 192: '通信设备', 193: '通用设备修理', 194: '道路货物运输', 195: '邮政专用设备', 196: '邮政业', 197: '采矿业和制造业服务',
  4752. 198: '铁路、船舶、航空航天等运输设备修理', 199: '铁路、道路、隧道和桥梁工程建筑', 200: '铁路运输设备', 201: '防洪除涝设施管理', 202: '陶瓷制品',
  4753. 203: '雷达、无线电和卫星导航设备', 204: '非金属矿', 205: '非金属矿物制品工业专用设备', 206: '非金属矿物材料', 207: '食品加工专用设备', 208: '食品及加工盐',
  4754. 209: '餐饮业', 210: '饮料、酒精及精制茶', 211: '饮料加工设备', 212: '饲养动物及其产品', 213: '黑色金属冶炼及压延产品', 214: '黑色金属矿'}
  4755. self.industry_dic = {'专业施工': {'大类': '专业施工', '门类': '建筑业'},
  4756. '专用仪器仪表': {'大类': '专用设备', '门类': '零售批发'},
  4757. '专用设备修理': {'大类': '金属制品、机械和设备修理业', '门类': '金属制品、机械和设备修理业'},
  4758. '互联网信息服务': {'大类': '互联网和相关服务', '门类': '信息传输、软件和信息技术服务业'},
  4759. '互联网安全服务': {'大类': '互联网和相关服务', '门类': '信息传输、软件和信息技术服务业'},
  4760. '互联网平台': {'大类': '互联网和相关服务', '门类': '信息传输、软件和信息技术服务业'},
  4761. '互联网接入及相关服务': {'大类': '互联网和相关服务', '门类': '信息传输、软件和信息技术服务业'},
  4762. '人力资源服务': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4763. '人造原油': {'大类': '炼焦产品、炼油产品', '门类': '零售批发'},
  4764. '仓储业': {'大类': '装卸搬运和运输代理业', '门类': '交通运输、仓储和邮政业'},
  4765. '仪器仪表': {'大类': '通用设备', '门类': '零售批发'},
  4766. '仪器仪表修理': {'大类': '金属制品、机械和设备修理业', '门类': '金属制品、机械和设备修理业'},
  4767. '会计、审计及税务服务': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4768. '会议、展览及相关服务': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4769. '住宅、商业用房': {'大类': '房屋建筑业', '门类': '建筑业'},
  4770. '体育场地设施管理': {'大类': '体育', '门类': '文化、体育和娱乐业'},
  4771. '体育组织': {'大类': '体育', '门类': '文化、体育和娱乐业'},
  4772. '体育设备': {'大类': '专用设备', '门类': '零售批发'},
  4773. '保险服务': {'大类': '保险业', '门类': '金融业'},
  4774. '信息处理和存储支持服务': {'大类': '软件和信息技术服务业', '门类': '信息传输、软件和信息技术服务业'},
  4775. '信息技术咨询服务': {'大类': '软件和信息技术服务业', '门类': '信息传输、软件和信息技术服务业'},
  4776. '信息系统集成和物联网技术服务': {'大类': '软件和信息技术服务业', '门类': '信息传输、软件和信息技术服务业'},
  4777. '修缮工程': {'大类': '修缮工程', '门类': '建筑业'},
  4778. '健康咨询': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4779. '公路旅客运输': {'大类': '道路运输业', '门类': '交通运输、仓储和邮政业'},
  4780. '其他专业咨询与调查': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4781. '其他专业技术服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4782. '其他交通运输设备': {'大类': '专用设备', '门类': '零售批发'},
  4783. '其他公共设施管理': {'大类': '公共设施管理业', '门类': '水利、环境和公共设施管理业'},
  4784. '其他土木工程建筑': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4785. '其他工程服务': {'大类': '工程服务', '门类': '科学研究和技术服务业'},
  4786. '其他建筑建材': {'大类': '建筑建材', '门类': '零售批发'},
  4787. '其他运输业': {'大类': '其他运输业', '门类': '交通运输、仓储和邮政业'},
  4788. '农业和林业机械': {'大类': '专用设备', '门类': '零售批发'},
  4789. '农业服务': {'大类': '农林牧副渔服务', '门类': '农林牧副渔服务'},
  4790. '农产品': {'大类': '农林牧渔业产品', '门类': '零售批发'},
  4791. '农副食品,动、植物油制品': {'大类': '食品、饮料和烟草原料', '门类': '零售批发'},
  4792. '出版业': {'大类': '新闻和出版业', '门类': '文化、体育和娱乐业'},
  4793. '办公消耗用品及类似物品': {'大类': '办公消耗用品及类似物品', '门类': '零售批发'},
  4794. '办公设备': {'大类': '通用设备', '门类': '零售批发'},
  4795. '化学原料及化学制品': {'大类': '基础化学品及相关产品', '门类': '零售批发'},
  4796. '化学纤维': {'大类': '基础化学品及相关产品', '门类': '零售批发'},
  4797. '化学药品和中药专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4798. '医疗设备': {'大类': '专用设备', '门类': '零售批发'},
  4799. '医药品': {'大类': '医药品', '门类': '零售批发'},
  4800. '卫星传输服务': {'大类': '电信、广播电视和卫星传输服务', '门类': '信息传输、软件和信息技术服务业'},
  4801. '卫生': {'大类': '卫生', '门类': '卫生和社会工作'},
  4802. '印刷服务': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4803. '图书和档案': {'大类': '图书和档案', '门类': '零售批发'},
  4804. '图书档案设备': {'大类': '通用设备', '门类': '零售批发'},
  4805. '图书馆与档案馆': {'大类': '文化艺术业', '门类': '文化、体育和娱乐业'},
  4806. '土地管理业': {'大类': '土地管理业', '门类': '水利、环境和公共设施管理业'},
  4807. '地质勘查': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4808. '地震服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4809. '场馆、站港用房': {'大类': '房屋建筑业', '门类': '建筑业'},
  4810. '城市公共交通运输': {'大类': '道路运输业', '门类': '交通运输、仓储和邮政业'},
  4811. '塑料制品、半成品及辅料': {'大类': '橡胶、塑料、玻璃和陶瓷制品', '门类': '零售批发'},
  4812. '天然石料': {'大类': '建筑建材', '门类': '零售批发'},
  4813. '娱乐设备': {'大类': '专用设备', '门类': '零售批发'},
  4814. '婚姻服务': {'大类': '居民服务业', '门类': '居民服务、修理和其他服务业'},
  4815. '安全保护服务': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4816. '安全生产设备': {'大类': '专用设备', '门类': '零售批发'},
  4817. '家具用具': {'大类': '家具用具', '门类': '零售批发'},
  4818. '家用电器修理': {'大类': '机动车、电子产品和日用产品修理业', '门类': '居民服务、修理和其他服务业'},
  4819. '工业、生产用房': {'大类': '房屋建筑业', '门类': '建筑业'},
  4820. '工业与专业设计及其他专业技术服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4821. '工矿工程建筑': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4822. '工程技术与设计服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4823. '工程机械': {'大类': '专用设备', '门类': '零售批发'},
  4824. '工程监理服务': {'大类': '工程服务', '门类': '科学研究和技术服务业'},
  4825. '工程评价服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4826. '工程造价服务': {'大类': '工程服务', '门类': '科学研究和技术服务业'},
  4827. '市场调查': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4828. '广告业': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4829. '广播': {'大类': '广播、电视、电影和影视录音制作业', '门类': '文化、体育和娱乐业'},
  4830. '广播、电视、电影设备': {'大类': '通用设备', '门类': '零售批发'},
  4831. '广播电视传输服务': {'大类': '电信、广播电视和卫星传输服务', '门类': '信息传输、软件和信息技术服务业'},
  4832. '废弃资源综合利用业': {'大类': '废弃资源综合利用业', '门类': '废弃资源综合利用业'},
  4833. '建筑涂料': {'大类': '建筑建材', '门类': '零售批发'},
  4834. '建筑物、构筑物附属结构': {'大类': '建筑建材', '门类': '零售批发'},
  4835. '建筑物拆除和场地准备活动': {'大类': '建筑装饰和其他建筑业', '门类': '建筑业'},
  4836. '建筑装饰和装修业': {'大类': '建筑装饰和其他建筑业', '门类': '建筑业'},
  4837. '录音制作': {'大类': '广播、电视、电影和影视录音制作业', '门类': '文化、体育和娱乐业'},
  4838. '影视节目制作': {'大类': '广播、电视、电影和影视录音制作业', '门类': '文化、体育和娱乐业'},
  4839. '房地产中介服务': {'大类': '房地产业', '门类': '房地产业'},
  4840. '房地产开发经营': {'大类': '房地产业', '门类': '房地产业'},
  4841. '房地产租赁经营': {'大类': '房地产业', '门类': '房地产业'},
  4842. '房屋租赁': {'大类': '租赁业', '门类': '租赁和商务服务业'},
  4843. '招标代理': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4844. '探矿、采矿、选矿和造块设备': {'大类': '专用设备', '门类': '零售批发'},
  4845. '政法、检测专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4846. '教育服务': {'大类': '教育服务', '门类': '教育'},
  4847. '教育设备': {'大类': '专用设备', '门类': '零售批发'},
  4848. '文体设备和用品出租': {'大类': '租赁业', '门类': '租赁和商务服务业'},
  4849. '文物及非物质文化遗产保护': {'大类': '文化艺术业', '门类': '文化、体育和娱乐业'},
  4850. '文物和陈列品': {'大类': '文物和陈列品', '门类': '零售批发'},
  4851. '文艺创作与表演': {'大类': '文化艺术业', '门类': '文化、体育和娱乐业'},
  4852. '文艺设备': {'大类': '专用设备', '门类': '零售批发'},
  4853. '新闻业': {'大类': '新闻和出版业', '门类': '文化、体育和娱乐业'},
  4854. '旅行社及相关服务': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4855. '日杂用品': {'大类': '日杂用品', '门类': '零售批发'},
  4856. '有色金属冶炼及压延产品': {'大类': '建筑建材', '门类': '零售批发'},
  4857. '有色金属矿': {'大类': '矿与矿物', '门类': '零售批发'},
  4858. '木材、板材等': {'大类': '建筑建材', '门类': '零售批发'},
  4859. '木材采集和加工设备': {'大类': '专用设备', '门类': '零售批发'},
  4860. '机械设备': {'大类': '通用设备', '门类': '零售批发'},
  4861. '机械设备经营租赁': {'大类': '租赁业', '门类': '租赁和商务服务业'},
  4862. '林业产品': {'大类': '农林牧渔业产品', '门类': '零售批发'},
  4863. '林业服务': {'大类': '农林牧副渔服务', '门类': '农林牧副渔服务'},
  4864. '架线和管道工程建筑': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4865. '核工业专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4866. '橡胶制品': {'大类': '橡胶、塑料、玻璃和陶瓷制品', '门类': '零售批发'},
  4867. '殡葬服务': {'大类': '居民服务业', '门类': '居民服务、修理和其他服务业'},
  4868. '殡葬设备及用品': {'大类': '专用设备', '门类': '零售批发'},
  4869. '气象服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4870. '水上交通运输设备': {'大类': '专用设备', '门类': '零售批发'},
  4871. '水上运输业': {'大类': '水上运输业', '门类': '交通运输、仓储和邮政业'},
  4872. '水利和水运工程建筑': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4873. '水工机械': {'大类': '专用设备', '门类': '零售批发'},
  4874. '水文服务': {'大类': '水利管理业', '门类': '水利、环境和公共设施管理业'},
  4875. '水资源管理': {'大类': '水利管理业', '门类': '水利、环境和公共设施管理业'},
  4876. '污水处理及其再生利用': {'大类': '水的生产和供应业', '门类': '电力、热力、燃气及水生产和供应业'},
  4877. '汽车、摩托车修理与维护': {'大类': '机动车、电子产品和日用产品修理业', '门类': '居民服务、修理和其他服务业'},
  4878. '法律服务': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4879. '洗染服务': {'大类': '居民服务业', '门类': '居民服务、修理和其他服务业'},
  4880. '测绘地理信息服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4881. '海洋仪器设备': {'大类': '专用设备', '门类': '零售批发'},
  4882. '海洋工程建筑': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4883. '海洋服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4884. '消防设备': {'大类': '专用设备', '门类': '零售批发'},
  4885. '清洁服务': {'大类': '其他服务业', '门类': '居民服务、修理和其他服务业'},
  4886. '渔业产品': {'大类': '农林牧渔业产品', '门类': '零售批发'},
  4887. '渔业服务': {'大类': '农林牧副渔服务', '门类': '农林牧副渔服务'},
  4888. '炼焦和金属冶炼轧制设备': {'大类': '专用设备', '门类': '零售批发'},
  4889. '烟草加工设备': {'大类': '专用设备', '门类': '零售批发'},
  4890. '热力生产和供应': {'大类': '电力、热力生产和供应业', '门类': '电力、热力、燃气及水生产和供应业'},
  4891. '焦炭及其副产品': {'大类': '炼焦产品、炼油产品', '门类': '零售批发'},
  4892. '煤炭采选产品': {'大类': '矿与矿物', '门类': '零售批发'},
  4893. '燃气生产和供应业': {'大类': '燃气生产和供应业', '门类': '电力、热力、燃气及水生产和供应业'},
  4894. '物业管理': {'大类': '房地产业', '门类': '房地产业'},
  4895. '特种用途动、植物': {'大类': '农林牧渔业产品', '门类': '零售批发'},
  4896. '环保咨询': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4897. '环境与生态监测检测服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4898. '环境污染防治设备': {'大类': '专用设备', '门类': '零售批发'},
  4899. '环境治理业': {'大类': '生态保护和环境治理业', '门类': '水利、环境和公共设施管理业'},
  4900. '玻璃及其制品': {'大类': '橡胶、塑料、玻璃和陶瓷制品', '门类': '零售批发'},
  4901. '理发及美容服务': {'大类': '居民服务业', '门类': '居民服务、修理和其他服务业'},
  4902. '生态保护': {'大类': '生态保护和环境治理业', '门类': '水利、环境和公共设施管理业'},
  4903. '电信': {'大类': '电信、广播电视和卫星传输服务', '门类': '信息传输、软件和信息技术服务业'},
  4904. '电力、城市燃气、蒸汽和热水、水': {'大类': '电力、城市燃气、蒸汽和热水、水', '门类': '零售批发'},
  4905. '电力供应': {'大类': '电力、热力生产和供应业', '门类': '电力、热力、燃气及水生产和供应业'},
  4906. '电力工业专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4907. '电力工程施工': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4908. '电力生产': {'大类': '电力、热力生产和供应业', '门类': '电力、热力、燃气及水生产和供应业'},
  4909. '电子和通信测量仪器': {'大类': '通用设备', '门类': '零售批发'},
  4910. '电工、电子专用生产设备': {'大类': '专用设备', '门类': '零售批发'},
  4911. '电影放映': {'大类': '广播、电视、电影和影视录音制作业', '门类': '文化、体育和娱乐业'},
  4912. '电气安装': {'大类': '建筑安装业', '门类': '建筑业'},
  4913. '电气设备': {'大类': '通用设备', '门类': '零售批发'},
  4914. '电气设备修理': {'大类': '金属制品、机械和设备修理业', '门类': '金属制品、机械和设备修理业'},
  4915. '畜牧业服务': {'大类': '农林牧副渔服务', '门类': '农林牧副渔服务'},
  4916. '监控设备': {'大类': '通用设备', '门类': '零售批发'},
  4917. '石油制品': {'大类': '炼焦产品、炼油产品', '门类': '零售批发'},
  4918. '石油和化学工业专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4919. '石油和天然气开采产品': {'大类': '矿与矿物', '门类': '零售批发'},
  4920. '石油天然气开采专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4921. '研究和试验发展': {'大类': '研究和试验发展', '门类': '科学研究和技术服务业'},
  4922. '社会工作': {'大类': '社会工作', '门类': '卫生和社会工作'},
  4923. '社会经济咨询': {'大类': '商务服务业', '门类': '租赁和商务服务业'},
  4924. '科技推广和应用服务业': {'大类': '科技推广和应用服务业', '门类': '科学研究和技术服务业'},
  4925. '科研、医疗、教育用房': {'大类': '房屋建筑业', '门类': '建筑业'},
  4926. '管道和设备安装': {'大类': '建筑安装业', '门类': '建筑业'},
  4927. '粮油作物和饲料加工设备': {'大类': '专用设备', '门类': '零售批发'},
  4928. '纸、纸制品及印刷品': {'大类': '纸、纸制品及印刷品', '门类': '零售批发'},
  4929. '纺织原料、毛皮、被服装具': {'大类': '纺织原料、毛皮、被服装具', '门类': '零售批发'},
  4930. '纺织设备': {'大类': '专用设备', '门类': '零售批发'},
  4931. '绿化管理': {'大类': '公共设施管理业', '门类': '水利、环境和公共设施管理业'},
  4932. '缝纫、服饰、制革和毛皮加工设备': {'大类': '专用设备', '门类': '零售批发'},
  4933. '航空器及其配套设备': {'大类': '专用设备', '门类': '零售批发'},
  4934. '航空客货运输': {'大类': '航空运输业', '门类': '交通运输、仓储和邮政业'},
  4935. '航空航天工业专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4936. '节能环保工程施工': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4937. '装卸搬运': {'大类': '装卸搬运和运输代理业', '门类': '交通运输、仓储和邮政业'},
  4938. '计算机和办公设备维修': {'大类': '机动车、电子产品和日用产品修理业', '门类': '居民服务、修理和其他服务业'},
  4939. '计算机设备': {'大类': '通用设备', '门类': '零售批发'},
  4940. '计量标准器具及量具、衡器': {'大类': '通用设备', '门类': '零售批发'},
  4941. '货币处理专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4942. '货币金融服务': {'大类': '货币金融服务', '门类': '金融业'},
  4943. '质检技术服务': {'大类': '专业技术服务业', '门类': '科学研究和技术服务业'},
  4944. '资本市场服务': {'大类': '资本市场服务', '门类': '金融业'},
  4945. '车辆': {'大类': '通用设备', '门类': '零售批发'},
  4946. '边界勘界和联检专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4947. '运行维护服务': {'大类': '软件和信息技术服务业', '门类': '信息传输、软件和信息技术服务业'},
  4948. '通信设备': {'大类': '通用设备', '门类': '零售批发'},
  4949. '通用设备修理': {'大类': '金属制品、机械和设备修理业', '门类': '金属制品、机械和设备修理业'},
  4950. '道路货物运输': {'大类': '道路运输业', '门类': '交通运输、仓储和邮政业'},
  4951. '邮政专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4952. '邮政业': {'大类': '邮政业', '门类': '交通运输、仓储和邮政业'},
  4953. '采矿业和制造业服务': {'大类': '采矿业和制造业服务', '门类': '农林牧副渔服务'},
  4954. '铁路、船舶、航空航天等运输设备修理': {'大类': '金属制品、机械和设备修理业', '门类': '金属制品、机械和设备修理业'},
  4955. '铁路、道路、隧道和桥梁工程建筑': {'大类': '土木工程建筑业', '门类': '建筑业'},
  4956. '铁路运输设备': {'大类': '专用设备', '门类': '零售批发'},
  4957. '防洪除涝设施管理': {'大类': '水利管理业', '门类': '水利、环境和公共设施管理业'},
  4958. '陶瓷制品': {'大类': '橡胶、塑料、玻璃和陶瓷制品', '门类': '零售批发'},
  4959. '雷达、无线电和卫星导航设备': {'大类': '通用设备', '门类': '零售批发'},
  4960. '非金属矿': {'大类': '矿与矿物', '门类': '零售批发'},
  4961. '非金属矿物制品工业专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4962. '非金属矿物材料': {'大类': '建筑建材', '门类': '零售批发'},
  4963. '食品加工专用设备': {'大类': '专用设备', '门类': '零售批发'},
  4964. '食品及加工盐': {'大类': '食品、饮料和烟草原料', '门类': '零售批发'},
  4965. '餐饮业': {'大类': '餐饮业', '门类': '住宿和餐饮业'},
  4966. '饮料、酒精及精制茶': {'大类': '食品、饮料和烟草原料', '门类': '零售批发'},
  4967. '饮料加工设备': {'大类': '专用设备', '门类': '零售批发'},
  4968. '饲养动物及其产品': {'大类': '农林牧渔业产品', '门类': '零售批发'},
  4969. '黑色金属冶炼及压延产品': {'大类': '建筑建材', '门类': '零售批发'},
  4970. '黑色金属矿': {'大类': '矿与矿物', '门类': '零售批发'}}
  4971. self.sess = tf.Session(graph=tf.Graph())
  4972. self.get_model()
  4973. with open(os.path.dirname(__file__)+'/industry_rule_kw_json/tw_industry_keyword_org/tw_industry_keyword_org.json', 'r',
  4974. encoding='utf-8') as fp1:
  4975. self.json_data_industry = json.load(fp1)
  4976. with open(os.path.dirname(__file__)+'/industry_rule_kw_json/tw_company_classification_keyword/tw_company_classification_keyword.json', 'r',
  4977. encoding='utf-8') as fp2:
  4978. self.json_data_company = json.load(fp2)
  4979. with open(os.path.dirname(__file__)+'/industry_rule_kw_json/tw_custom_keyword/tw_custom_keyword.json', 'r', encoding='utf-8') as fp3:
  4980. self.json_data_custom = json.load(fp3)
  4981. def get_model(self):
  4982. with self.sess.as_default() as sess:
  4983. with self.sess.graph.as_default():
  4984. meta_graph_def = tf.saved_model.loader.load(sess,
  4985. tags=['serve'],
  4986. export_dir=os.path.dirname(__file__)+'/industry_model')
  4987. signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
  4988. signature_def = meta_graph_def.signature_def
  4989. self.title = sess.graph.get_tensor_by_name(signature_def[signature_key].inputs['title'].name)
  4990. self.project = sess.graph.get_tensor_by_name(signature_def[signature_key].inputs['project'].name)
  4991. self.product = sess.graph.get_tensor_by_name(signature_def[signature_key].inputs['product'].name)
  4992. self.outputs = sess.graph.get_tensor_by_name(signature_def[signature_key].outputs['outputs'].name)
  4993. def text2array(self, text, tenderee='', maxSententLen=20):
  4994. tenderee = tenderee.replace('(', '(').replace(')', ')')
  4995. text = text.replace('(', '(').replace(')', ')')
  4996. text = re.sub(
  4997. '(废标|终止|综?合?评审|评标|开标|资审|履约|验收|成交|中标人?|中选人?|单一来源|合同|候选人|结果|变更|更正|答疑|澄清|意向|需求|采购|招标|询比?价|磋商|谈判|比选|比价|竞价|议价)的?(公告|预告|公示)?|关于为?|选取|定点|直接|邀请函?|通知书?|备案|公开|公示|公告|记录|竞争性',
  4998. ' ', text)
  4999. text = text.replace(tenderee, ' ')
  5000. text = ' ' if text=="" else text
  5001. words_docs_list = selffool.cut(text)
  5002. words_docs_list = [[it for it in l if re.search('^[\u4e00-\u9fa5]+$', it)][-maxSententLen:] for l in words_docs_list]
  5003. array = embedding(words_docs_list, shape=(len(words_docs_list), maxSententLen, 128))
  5004. return array
  5005. def process(self, title, project, product, tenderee):
  5006. return self.text2array(title, tenderee), self.text2array(project, tenderee), self.text2array(product)
  5007. def predict_model(self, title, project, product, tenderee=''):
  5008. title_array, project_array, product_array = self.process(title, project, product, tenderee)
  5009. rs = self.sess.run(self.outputs,
  5010. feed_dict={
  5011. self.title:title_array,
  5012. self.project:project_array,
  5013. self.product:product_array
  5014. }
  5015. )
  5016. pred = np.argmax(rs[0])
  5017. return self.id2lb[pred], rs[0][pred]
  5018. # # 返回top2 结果
  5019. # pred_list = np.argsort(-rs[0])
  5020. # return self.id2lb[pred_list[0]], self.id2lb[pred_list[1]], rs[0][pred_list[0]], rs[0][pred_list[1]]
  5021. def predict_rule(self, doctitle, tenderee, win_tenderer, project_name, product):
  5022. doctitle = doctitle if doctitle else ''
  5023. tenderee = tenderee if tenderee else ''
  5024. win_tenderer = win_tenderer if win_tenderer else ''
  5025. project_name = project_name if project_name else ''
  5026. product = product if product else ''
  5027. text_ind = (doctitle + project_name + product).replace(tenderee, '')
  5028. text_ind = text_ind.replace('墙面粉刷', '墙面 粉刷')
  5029. text_com = win_tenderer
  5030. length_ind_text = len(text_ind) + 1
  5031. length_com_text = len(text_com) + 1
  5032. # print(text)
  5033. dic_res = {} # 行业分类字典
  5034. score_lst = [] # 得分列表
  5035. word_lst = [] # 关键词列表
  5036. # 主要内容关键词
  5037. if text_ind:
  5038. # logging.info("data_ind%s"%str(_json_data_industry[0]))
  5039. for data_industry in self.json_data_industry:
  5040. industry = data_industry['xiaolei']
  5041. key_word = data_industry['key_word']
  5042. key_word_2 = data_industry['key_word2']
  5043. power = float(data_industry['power']) if data_industry['power'] else 0
  5044. this_score = power * (text_ind.count(key_word) * len(key_word) / length_ind_text)
  5045. if key_word_2:
  5046. # key_word_compose = key_word + "+" + key_word_2
  5047. if text_ind.count(key_word_2) == 0:
  5048. this_score = 0
  5049. if this_score > 0:
  5050. # print(industry,key_word,this_score)
  5051. if industry in dic_res.keys():
  5052. dic_res[industry] += this_score
  5053. else:
  5054. dic_res[industry] = this_score
  5055. if key_word not in word_lst:
  5056. word_lst.append(key_word)
  5057. # 供应商关键词
  5058. if text_com:
  5059. for data_company in self.json_data_company:
  5060. industry = data_company['industry_type']
  5061. key_word = data_company['company_word']
  5062. power = float(data_company['industry_rate']) if data_company['industry_rate'] else 0
  5063. this_score = power * (text_com.count(key_word) * len(key_word) / length_com_text)
  5064. if this_score > 0:
  5065. # print(industry,key_word,this_score)
  5066. if industry in dic_res.keys():
  5067. dic_res[industry] += this_score
  5068. else:
  5069. dic_res[industry] = this_score
  5070. if key_word not in word_lst:
  5071. word_lst.append(key_word)
  5072. # 自定义关键词
  5073. if text_ind:
  5074. custom_ind = [
  5075. ['tenderee', '医院|疾病预防', ['设备', '系统', '器'], '医疗设备'],
  5076. ['tenderee', '学校|大学|小学|中学|学院|幼儿园', ['设备', '器'], '教育设备'],
  5077. ['tenderee', '学校|大学|小学|中学|学院|幼儿园|医院', ['工程'], '科研、医疗、教育用房'],
  5078. ['tenderee', '供电局|电网|国网|电力|电厂|粤电', ['设备', '器', '物资'], '电力工业专用设备'],
  5079. ['tenderee', '公安|法院|检察院', ['设备', '器'], '政法、检测专用设备'],
  5080. ['tenderee', '^中铁|^中交|^中建|中国建筑', ['材料'], '其他建筑建材'],
  5081. ['doctextcon', '信息技术服务|系统开发|信息化|信息系统', ['监理'], '信息技术咨询服务'],
  5082. ['doctextcon', '工程', ['消防'], '专业施工'],
  5083. ['doctextcon', '铁路|航空|船舶|航天|广铁', ['维修'], '铁路、船舶、航空航天等运输设备修理'],
  5084. ['doctextcon', '设备|仪|器', ['租赁'], '机械设备经营租赁'],
  5085. ['doctextcon', '交通|铁路|公路|道路|桥梁', ['工程'], '铁路、道路、隧道和桥梁工程建筑'],
  5086. ['win_tenderer', '电力', ['设备', '器'], '电力工业专用设备'],
  5087. ['win_tenderer', '信息|网络科技', ['系统'], '信息系统集成和物联网技术服务'],
  5088. ['tenderee,doctextcon', '铁路|广铁|铁道', ['设备', '器', '物资', '材料', '铁路'], '铁路运输设备'],
  5089. ]
  5090. for data_custom in self.json_data_custom:
  5091. industry_custom = data_custom['industry']
  5092. key_word = data_custom['company_word']
  5093. power = float(data_custom['industry_rate'])
  5094. for k in range(len(custom_ind)):
  5095. subject = ''
  5096. if 'tenderee' in custom_ind[k][0]:
  5097. subject += tenderee
  5098. if 'win_tenderer' in custom_ind[k][0]:
  5099. subject += win_tenderer
  5100. if 'doctextcon' in custom_ind[k][0]:
  5101. subject += text_ind
  5102. ptn = custom_ind[k][1]
  5103. # print('ptn',ptn)
  5104. if re.search(ptn, subject) and industry_custom in custom_ind[k][2]:
  5105. industry = custom_ind[k][3]
  5106. else:
  5107. continue
  5108. this_score = power * (text_ind.count(key_word) * len(key_word) / len(subject))
  5109. if this_score > 0:
  5110. # print(industry,key_word,this_score)
  5111. if industry in dic_res.keys():
  5112. dic_res[industry] += this_score
  5113. else:
  5114. dic_res[industry] = this_score
  5115. if key_word not in word_lst:
  5116. word_lst.append(key_word)
  5117. sort_res = sorted(dic_res.items(), key=lambda x: x[1], reverse=True)
  5118. lst_res = [s[0] for s in sort_res]
  5119. score_lst = [str(round(float(s[1]), 2)) for s in sort_res]
  5120. if len(lst_res) > 0:
  5121. return lst_res, score_lst, word_lst
  5122. else:
  5123. return [""], [], []
  5124. def predict_merge(self, pinmu_type, industry_lst):
  5125. '''
  5126. 通过一系列规则最终决定使用模型还是规则的结果
  5127. :param pinmu_type: 模型预测类别
  5128. :param industry_lst: 规则预测类别列表
  5129. :return:
  5130. '''
  5131. industry_type = industry_lst[0]
  5132. if industry_type == "":
  5133. return pinmu_type
  5134. if industry_type == '专用设备修理' and re.search('修理|维修|装修|修缮', pinmu_type):
  5135. final_type = pinmu_type
  5136. elif industry_type == '其他土木工程建筑' and re.search('工程|建筑|用房|施工|安装|质检|其他专业咨询与调查', pinmu_type):
  5137. final_type = pinmu_type
  5138. elif pinmu_type == '专用设备修理' and re.search('工程|修理', industry_type):
  5139. final_type = industry_type
  5140. elif pinmu_type == '信息系统集成和物联网技术服务' and re.search('卫星传输|信息处理和存储支持服务|信息技术咨询服务|运行维护服务|其他专业技术服务|医疗设备|医药品',
  5141. industry_type):
  5142. final_type = industry_type
  5143. elif industry_type == '仪器仪表' and re.search('仪器|器具|医疗设备', pinmu_type):
  5144. final_type = pinmu_type
  5145. elif industry_type == '医药品' and re.search('医疗设备', pinmu_type):
  5146. final_type = pinmu_type
  5147. elif industry_type == '医药品' and re.search('医疗设备', pinmu_type):
  5148. final_type = pinmu_type
  5149. elif re.search('设备', industry_type) and re.search('修理|维修', pinmu_type):
  5150. final_type = pinmu_type
  5151. elif industry_type == '社会工作' and re.search('工程', pinmu_type):
  5152. final_type = pinmu_type
  5153. elif industry_type == '信息系统集成和物联网技术服务' and re.search('信息处理|设备', pinmu_type):
  5154. final_type = pinmu_type
  5155. elif industry_type == '研究和试验发展' and re.search('其他专业咨询与调查|质检技术服务|信息系统集成|其他工程服务', pinmu_type):
  5156. final_type = pinmu_type
  5157. elif industry_type == '其他专业咨询与调查' and re.search('工程造价服务', pinmu_type):
  5158. final_type = pinmu_type
  5159. elif industry_type == '广告业' and re.search('印刷服务|影视节目制作|信息系统', pinmu_type):
  5160. final_type = pinmu_type
  5161. elif industry_type == '清洁服务' and re.search('工程|环境污染防治设备|修理', pinmu_type):
  5162. final_type = pinmu_type
  5163. elif industry_type == '其他公共设施管理' and re.search('信息系统', pinmu_type):
  5164. final_type = pinmu_type
  5165. elif industry_type == '其他专业技术服务' and re.search('工程技术与设计服务|质检技术服务|环境与生态监测检测服务', pinmu_type):
  5166. final_type = pinmu_type
  5167. elif industry_type == '机械设备经营租赁' and re.search('电信', pinmu_type):
  5168. final_type = pinmu_type
  5169. elif industry_type == '货币金融服务' and re.search('信息系统集成和物联网技术服务', pinmu_type):
  5170. final_type = pinmu_type
  5171. elif industry_type == '体育场地设施管理' and re.search('体育设备', pinmu_type):
  5172. final_type = pinmu_type
  5173. elif industry_type == '安全保护服务' and re.search('信息系统|监控设备|互联网安全服务', pinmu_type):
  5174. final_type = pinmu_type
  5175. elif industry_type == '互联网接入及相关服务' and re.search('通信设备', pinmu_type):
  5176. final_type = pinmu_type
  5177. elif industry_type == '卫生' and re.search('医疗设备|信息系统', pinmu_type):
  5178. final_type = pinmu_type
  5179. elif pinmu_type == '研究和试验发展' and re.search('其他工程服务', industry_type):
  5180. final_type = industry_type
  5181. elif pinmu_type == '办公设备' and re.search('教育设备', industry_type):
  5182. final_type = industry_type
  5183. elif re.search('车辆|机械设备经营租赁', pinmu_type) and re.search('公路旅客运输', industry_type):
  5184. final_type = industry_type
  5185. elif len(industry_lst) > 1 and pinmu_type == industry_lst[1] and re.search('会计|法律|物业|家具|印刷|互联网安全',
  5186. industry_type) == None \
  5187. and re.search('其他|人力资源服务', pinmu_type) == None:
  5188. final_type = pinmu_type
  5189. elif industry_type != "":
  5190. final_type = industry_type
  5191. else:
  5192. final_type = pinmu_type
  5193. return final_type
  5194. def predict(self, title, project, product, prem):
  5195. def get_ree_win(prem):
  5196. tenderee = ""
  5197. win_tenderer = ""
  5198. try:
  5199. for v in prem[0]['prem'].values():
  5200. for link in v['roleList']:
  5201. if link['role_name'] == 'tenderee' and tenderee == "":
  5202. tenderee = link['role_text']
  5203. elif link['role_name'] == 'win_tenderer' and win_tenderer == "":
  5204. win_tenderer = link['role_text']
  5205. except Exception as e:
  5206. print('解析prem 获取招标人、中标人出错')
  5207. return tenderee, win_tenderer
  5208. tenderee, win_tenderer = get_ree_win(prem)
  5209. result_model, prob = self.predict_model(title, project, product, tenderee)
  5210. industry_lst, score_lst, word_lst = self.predict_rule(title, tenderee, win_tenderer, project, product)
  5211. final_type = self.predict_merge(result_model, industry_lst)
  5212. # print('模型:%s;规则:%s;最终:%s'%(result_model, industry_lst[0], final_type))
  5213. # return {'industry': final_type}
  5214. return {'industry': {
  5215. 'class_name': final_type,
  5216. 'subclass': self.industry_dic[final_type]['大类'],
  5217. 'class': self.industry_dic[final_type]['门类']
  5218. }
  5219. }
  5220. class DistrictPredictor():
  5221. def __init__(self):
  5222. # with open(os.path.dirname(__file__)+'/district_dic.pkl', 'rb') as f:
  5223. # dist_dic = pickle.load(f)
  5224. # short_name = '|'.join(sorted(set([v['简称'] for v in dist_dic.values()]), key=lambda x: len(x), reverse=True))
  5225. # full_name = '|'.join(sorted(set([v['全称'] for v in dist_dic.values()]), key=lambda x: len(x), reverse=True))
  5226. # short2id = {}
  5227. # full2id = {}
  5228. # for k, v in dist_dic.items():
  5229. # if v['简称'] not in short2id:
  5230. # short2id[v['简称']] = [k]
  5231. # else:
  5232. # short2id[v['简称']].append(k)
  5233. # if v['全称'] not in full2id:
  5234. # full2id[v['全称']] = [k]
  5235. # else:
  5236. # full2id[v['全称']].append(k)
  5237. # self.dist_dic = dist_dic
  5238. # self.short_name = short_name
  5239. # self.full_name = full_name
  5240. # self.short2id = short2id
  5241. # self.full2id = full2id
  5242. # # self.f = open(os.path.dirname(__file__)+'/../test/data/district_predict.txt', 'w', encoding='utf-8')
  5243. with open(os.path.dirname(__file__)+'/district_tuple.pkl', 'rb') as f:
  5244. district_tuple = pickle.load(f)
  5245. self.p_pro, self.p_city, self.p_dis, self.idx_dic, self.full_dic, self.short_dic = district_tuple
  5246. def predict_backup(self, project_name, prem, title, list_articles, web_source_name = "", list_entitys=""):
  5247. '''
  5248. 先匹配 project_name+tenderee+tenderee_address, 如果缺少省或市 再匹配 title+content
  5249. :param project_name:
  5250. :param prem:
  5251. :param title:
  5252. :param list_articles:
  5253. :param web_source_name:
  5254. :return:
  5255. '''
  5256. def get_ree_addr(prem):
  5257. tenderee = ""
  5258. tenderee_address = ""
  5259. try:
  5260. for v in prem[0]['prem'].values():
  5261. for link in v['roleList']:
  5262. if link['role_name'] == 'tenderee' and tenderee == "":
  5263. tenderee = link['role_text']
  5264. tenderee_address = link['address']
  5265. except Exception as e:
  5266. print('解析prem 获取招标人、及地址出错')
  5267. return tenderee, tenderee_address
  5268. def get_area(text, web_source_name, not_in_content=True):
  5269. score_l = []
  5270. id_set = set()
  5271. if re.search(self.short_name, text):
  5272. for it in re.finditer(self.full_name, text):
  5273. name = it.group(0)
  5274. score = len(name) / len(text)
  5275. for _id in self.full2id[name]:
  5276. area = self.dist_dic[_id]['area'] + [''] * (3 - len(self.dist_dic[_id]['area']))
  5277. # score_l.append([_id, score] + area)
  5278. # w = self.dist_dic[_id]['权重']
  5279. score_l.append([_id, score + 1] + area) # 匹配全称的加1 ,不加权重,因为权重某些赋值不好
  5280. flag = 0
  5281. for it in re.finditer(self.short_name, text):
  5282. if it.end() < len(text) and re.search('^(村|镇|街|路|江|河|湖|北路|南路|东路|大道|社区)', text[it.end():]) == None:
  5283. name = it.group(0)
  5284. score = (it.start() + len(name)) / len(text)
  5285. for _id in self.short2id[name]:
  5286. score2 = 0
  5287. w = self.dist_dic[_id]['权重']
  5288. _type = self.dist_dic[_id]['类型']
  5289. area = self.dist_dic[_id]['area'] + [''] * (3 - len(self.dist_dic[_id]['area']))
  5290. if area[0] in ['2', '16', '20', '30']:
  5291. _type += 10
  5292. if w < 1 and it.end() < len(text) and text[it.end()] in ['省', '市', '县']: # 如果简称后面 有省市县权重改为1
  5293. w = 1
  5294. score2 += w
  5295. if _id not in id_set:
  5296. if _type == 20:
  5297. type_w = 3
  5298. elif _type == 30:
  5299. if it.start()>3 and text[it.start()-1] == '市': # 城市后面 简称不能作为市
  5300. type_w = 0
  5301. else:
  5302. type_w = 2
  5303. else:
  5304. if it.end()<len(text) and text[it.end()] == '市': # 简称后面 有市字 改为市级
  5305. type_w = 2
  5306. else:
  5307. type_w = 0.5
  5308. id_set.add(_id)
  5309. score2 += w * type_w
  5310. score_l.append([_id, score * w + score2] + area)
  5311. if flag == 1:
  5312. pass
  5313. # print('score', score)
  5314. if re.search('公司', web_source_name) == None:
  5315. for it in re.finditer(self.short_name, web_source_name):
  5316. name = it.group(0)
  5317. for _id in self.short2id[name]:
  5318. area = self.dist_dic[_id]['area'] + [''] * (3 - len(self.dist_dic[_id]['area']))
  5319. w = self.dist_dic[_id]['权重']
  5320. score = w * 0.2
  5321. score_l.append([_id, score] + area)
  5322. area_dic = {'area': '全国', 'province': '全国', 'city': '未知', 'district': '未知', "is_in_text": False}
  5323. if len(score_l) == 0:
  5324. return {'district': area_dic}
  5325. else:
  5326. df = pd.DataFrame(score_l, columns=['id', 'score', 'province', 'city', 'district'])
  5327. df['简称'] = df['id'].apply(lambda x: self.dist_dic[x]['地区'])
  5328. # print('地区评分:')
  5329. # print(df)
  5330. df_pro = df.groupby('province').sum().sort_values(by=['score'], ascending=False)
  5331. pro_id = df_pro.index[0]
  5332. if df_pro.loc[pro_id, 'score'] < 0.1 and not_in_content: # 不是二次全文匹配的 省级评分小于0.1的不要
  5333. # print('评分低于0.1', df_pro.loc[pro_id, 'score'], self.dist_dic[pro_id]['地区'])
  5334. return {'district': area_dic}
  5335. area_dic['province'] = self.dist_dic[pro_id]['地区']
  5336. area_dic['area'] = self.dist_dic[pro_id]['大区']
  5337. df = df[df['city'] != ""]
  5338. df = df[df['province'] == pro_id]
  5339. if len(df) > 0:
  5340. df_city = df.groupby('city').sum().sort_values(by=['score'], ascending=False)
  5341. city_id = df_city.index[0]
  5342. area_dic['city'] = self.dist_dic[city_id]['地区']
  5343. df = df[df['district'] != ""]
  5344. df = df[df['city'] == city_id]
  5345. if len(df) > 0:
  5346. df_dist = df.groupby('district').sum().sort_values(by=['score'], ascending=False)
  5347. dist_id = df_dist.index[0]
  5348. area_dic['district'] = self.dist_dic[dist_id]['地区']
  5349. # print(area_dic)
  5350. return {'district': area_dic}
  5351. def get_role_address(text):
  5352. '''正则匹配获取招标人地址
  5353. 3:地址直接在招标人后面 招标人:xxx,地址:xxx
  5354. 4:招标、代理一起,两个地址一起 招标人:xxx, 代理人:xxx, 地址:xxx, 地址:xxx.
  5355. '''
  5356. p3 = '(招标|采购|甲)(人|方|单位)(信息:|(甲方))?(名称)?:[\w()]{4,15},(联系)?地址:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5357. p4 = '(招标|采购|甲)(人|方|单位)(信息:|(甲方))?(名称)?:[\w()]{4,15},(招标|采购)?代理(人|机构)(名称)?:[\w()]{4,15},(联系)?地址:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5358. p5 = '(采购|招标)(人|单位)(联系)?地址:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5359. if re.search(p3, text):
  5360. return re.search(p3, text).group('addr')
  5361. elif re.search(p4, text):
  5362. return re.search(p4, text).group('addr')
  5363. elif re.search(p5, text):
  5364. return re.search(p5, text).group('addr')
  5365. else:
  5366. return ''
  5367. def get_project_addr(text):
  5368. p1 = '(项目(施工|实施)?|建设|工程|服务|交货|送货|收货|展示|看样|拍卖)(地址|地点|位置|所在地区?):(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5369. if re.search(p1, text):
  5370. return re.search(p1, text).group('addr')
  5371. else:
  5372. return ''
  5373. def get_bid_addr(text):
  5374. p2 = '(磋商|谈判|开标|投标|评标|报名|递交|评审|发售)(地址|地点|所在地区?):(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5375. if re.search(p2, text):
  5376. return re.search(p2, text).group('addr')
  5377. else:
  5378. return ''
  5379. def get_all_addr(list_entitys):
  5380. tenderee_l = []
  5381. addr_l = []
  5382. for ent in list_entitys[0]:
  5383. if ent.entity_type == 'location' and len(ent.entity_text)>2:
  5384. addr_l.append(ent.entity_text)
  5385. elif ent.entity_type in ['org', 'company']:
  5386. if ent.label in [0, 1]: # 加招标或代理
  5387. tenderee_l.append(ent.entity_text)
  5388. return ' '.join(addr_l), ' '.join(tenderee_l)
  5389. def get_title_addr(text):
  5390. p1 = '(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5391. if re.search(p1, text):
  5392. return re.search(p1, text).group('addr')
  5393. else:
  5394. return ''
  5395. if '##attachment##' in list_articles[0].content:
  5396. content, attachment = list_articles[0].content.split('##attachment##')
  5397. if len(content) < 200:
  5398. content += attachment
  5399. else:
  5400. content = list_articles[0].content
  5401. tenderee, tenderee_address = get_ree_addr(prem)
  5402. msc = ""
  5403. pro_addr = get_project_addr(content)
  5404. if pro_addr != "":
  5405. msc += '使用规则提取的项目地址;'
  5406. tenderee_address = pro_addr
  5407. else:
  5408. role_addr = get_role_address(content)
  5409. if role_addr != "":
  5410. msc += '使用规则提取的联系人地址;'
  5411. tenderee_address = role_addr
  5412. if tenderee_address == "":
  5413. title_addr = get_title_addr(title)
  5414. if title_addr != "":
  5415. msc += '使用规则提取的标题地址;'
  5416. tenderee_address = title_addr
  5417. else:
  5418. bid_addr = get_bid_addr(content)
  5419. if bid_addr != "":
  5420. msc += '使用规则提取的开标地址;'
  5421. tenderee_address = bid_addr
  5422. project_name = str(project_name)
  5423. tenderee = str(tenderee)
  5424. # print('招标人地址',role_addr, tenderee_address)
  5425. project_name = project_name + title if project_name not in title else project_name
  5426. project_name = project_name.replace(tenderee, '')
  5427. text1 = "{0} {1} {2}".format(project_name, tenderee, tenderee_address)
  5428. web_source_name = str(web_source_name) # 修复某些不是字符串类型造成报错
  5429. text1 = re.sub('复合肥|铁路|公路|新会计', ' ', text1) #预防提取错 合肥 路南 新会 等地区
  5430. if pro_addr:
  5431. msc += '## 使用项目地址输入:%s ##;' % pro_addr
  5432. rs = get_area(pro_addr, '')
  5433. msc += '预测结果:省份:%s, 城市:%s,区县:%s;' % (
  5434. rs['district']['province'], rs['district']['city'], rs['district']['district'])
  5435. if rs['district']['province'] != '全国':
  5436. # print('地区匹配:', msc)
  5437. return rs
  5438. # print('text1:', text1)
  5439. msc += '## 第一次预测输入:%s ##;'%text1
  5440. rs = get_area(text1, web_source_name)
  5441. msc += '预测结果:省份:%s, 城市:%s,区县:%s;' % (
  5442. rs['district']['province'], rs['district']['city'], rs['district']['district'])
  5443. # self.f.write('%s %s \n' % (list_articles[0].id, msc))
  5444. # print('地区匹配:', msc)
  5445. if rs['district']['province'] == '全国' or rs['district']['city'] == '未知':
  5446. msc = ""
  5447. all_addr, tenderees = get_all_addr(list_entitys)
  5448. text2 = tenderees + " " + all_addr + ' ' + title
  5449. msc += '使用实体列表所有招标人+所有地址;'
  5450. # text2 += title + content if len(content)<2000 else title + content[:1000] + content[-1000:]
  5451. text2 = re.sub('复合肥|铁路|公路|新会计', ' ', text2)
  5452. # print('text2:', text2)
  5453. msc += '## 第二次预测输入:%s ##'%text2
  5454. rs2 = get_area(text2, web_source_name, not_in_content=False)
  5455. rs2['district']['is_in_text'] = True
  5456. if rs['district']['province'] == '全国' and rs2['district']['province'] != '全国':
  5457. rs = rs2
  5458. elif rs['district']['province'] == rs2['district']['province'] and rs2['district']['city'] != '未知':
  5459. rs = rs2
  5460. msc += '预测结果:省份:%s, 城市:%s,区县:%s'%(
  5461. rs['district']['province'],rs['district']['city'],rs['district']['district'])
  5462. # self.f.write('%s %s \n'%(list_articles[0].id, msc))
  5463. # print('地区匹配:', msc)
  5464. return rs
  5465. def get_area(self, text, web_name, in_content=False):
  5466. p_pro, p_city, p_dis, idx_dic, full_dic, short_dic = self.p_pro, self.p_city, self.p_dis, self.idx_dic, self.full_dic, self.short_dic
  5467. def get_final_addr(pro_ids, city_ids, dis_ids):
  5468. '''
  5469. 先把所有匹配的全称、简称转为id,如果省份不为空,城市不为空且有城市属于省份的取该城市
  5470. :param province_l: 匹配到的所有省份
  5471. :param city_l: 匹配到的所有城市
  5472. :param district_l: 匹配到的所有区县
  5473. :return:
  5474. '''
  5475. big_area = ""
  5476. pred_pro = ""
  5477. pred_city = ""
  5478. pred_dis = ""
  5479. final_pro = ""
  5480. final_city = ""
  5481. if len(pro_ids) >= 1:
  5482. pro_l = sorted([(k, v) for k, v in pro_ids.items()], key=lambda x: x[1], reverse=True)
  5483. final_pro, score = pro_l[0]
  5484. if score >= 0.01:
  5485. pred_pro = idx_dic[final_pro]['返回名称']
  5486. big_area = idx_dic[final_pro]['大区']
  5487. # else:
  5488. # print("得分过低,过滤掉", idx_dic[final_pro]['返回名称'], score)
  5489. if pred_pro != "" and len(city_ids) >= 1:
  5490. city_l = sorted([(k, v) for k, v in city_ids.items()], key=lambda x: x[1], reverse=True)
  5491. for it in city_l:
  5492. if idx_dic[it[0]]['省'] == final_pro:
  5493. final_city = it[0]
  5494. pred_city = idx_dic[final_city]['返回名称']
  5495. break
  5496. if final_city != "" and len(set(dis_ids)) >= 1:
  5497. dis_l = sorted([(k, v) for k, v in dis_ids.items()], key=lambda x: x[1], reverse=True)
  5498. for it in dis_l:
  5499. if idx_dic[it[0]]['市'] == final_city:
  5500. pred_dis = idx_dic[it[0]]['返回名称']
  5501. if pred_city in ['北京', '天津', '上海', '重庆']:
  5502. pred_city = pred_dis
  5503. pred_dis = ""
  5504. return big_area, pred_pro, pred_city, pred_dis
  5505. def find_areas(pettern, text):
  5506. '''
  5507. 通过正则匹配字符串返回地址
  5508. :param pettern: 地址正则 广东省|广西省|...
  5509. :param text: 待匹配文本
  5510. :return:
  5511. '''
  5512. addr = []
  5513. for it in re.finditer(pettern, text):
  5514. if re.search('[省市区县旗盟]$', it.group(0)) == None and re.search(
  5515. '^([东南西北中一二三四五六七八九十大小]?(村|镇|街|路|道|社区)|酒店|宾馆)', text[it.end():]):
  5516. continue
  5517. if it.group(0) == '站前': # 20240314 修复类似 中铁二局新建沪苏湖铁路工程站前VI标项目 错识别为 省份:辽宁, 城市:营口,区县:站前
  5518. continue
  5519. if re.search('^(经济开发区|开发区|新区)', text[it.end():]) and re.search('广州市', pettern): # 城市不匹配为区的地址 修复 滨州北海经济开发区 北海新区 等提取为北海
  5520. continue
  5521. addr.append((it.group(0), it.start(), it.end()))
  5522. if re.search('^([分支](公司|局|行|校|院|干?线)|\w{,3}段|地铁|(火车|高铁)?站|\w{,3}项目)', text[it.end():]):
  5523. addr.append((it.group(0), it.start(), it.end()))
  5524. return addr
  5525. def chage_area2score(group_list, max_len):
  5526. '''
  5527. 把匹配的的地址转为分数
  5528. :param group_list: [('name', b, e)]
  5529. :return:
  5530. '''
  5531. area_list = []
  5532. if group_list != []:
  5533. for it in group_list:
  5534. name, b, e = it
  5535. area_list.append((name, (e - b + e) / max_len / 2))
  5536. return area_list
  5537. def find_whole_areas(text):
  5538. '''
  5539. 通过正则匹配字符串返回地址
  5540. :param pettern: 地址正则 广东省|广西省|...
  5541. :param text: 待匹配文本
  5542. :return:
  5543. '''
  5544. pettern = "((?P<prov>%s)(?P<city>%s)?(?P<dist>%s)?)|((?P<city1>%s)(?P<dist1>%s)?)|(?P<dist2>%s)" % (
  5545. p_pro, p_city, p_dis, p_city, p_dis, p_dis)
  5546. province_l, city_l, district_l = [], [], []
  5547. for it in re.finditer(pettern, text):
  5548. if re.search('[省市区县旗盟]$', it.group(0)) == None and re.search(
  5549. '^([东南西北中一二三四五六七八九十大小]?(村|镇|街|路|道|社区)|酒店|宾馆)', text[it.end():]):
  5550. continue
  5551. if it.group(0) == '站前': # 20240314 修复类似 中铁二局新建沪苏湖铁路工程站前VI标项目 错识别为 省份:辽宁, 城市:营口,区县:站前
  5552. continue
  5553. for k, v in it.groupdict().items():
  5554. if v != None:
  5555. if k in ['prov']:
  5556. province_l.append((it.group(k), it.start(k), it.end(k)))
  5557. elif k in ['city', 'city1']:
  5558. if re.search('^(经济开发区|开发区|新区)', text[it.end(k):]): # 城市不匹配为区的地址 修复 滨州北海经济开发区 北海新区 等提取为北海
  5559. continue
  5560. city_l.append((it.group(k), it.start(k), it.end(k)))
  5561. if re.search('^([分支](公司|局|行|校|院|干?线)|\w{,3}段|地铁|(火车|高铁)?站|\w{,3}项目)', text[it.end(k):]):
  5562. city_l.append((it.group(k), it.start(k), it.end(k)))
  5563. elif k in ['dist', 'dist1', 'dist2']:
  5564. if it.group(k)=='昌江' and '景德镇' not in it.group(0):
  5565. district_l.append(('昌江黎族', it.start(k), it.end(k)))
  5566. else:
  5567. district_l.append((it.group(k), it.start(k), it.end(k)))
  5568. return province_l, city_l, district_l
  5569. def get_pro_city_dis_score(text, text_weight=1):
  5570. text = re.sub('复合肥|海南岛|兴业银行|双河口|阳光|杭州湾', ' ', text)
  5571. text = re.sub('珠海城市', '珠海', text) # 修复 426624023 珠海城市 预测为海城市
  5572. text = re.sub('怒江州', '怒江傈僳族自治州', text) # 修复 423589589 所属地域:怒江州 识别为广西 - 崇左 - 江州
  5573. text = re.sub('茂名滨海新区', '茂名市', text)
  5574. text = re.sub('中山([东南西][部区环]|黄圃|南头|东凤|小榄|石岐|翠亨|南朗)', '中山市', text)
  5575. ser = re.search('海南(昌江|白沙|乐东|陵水|保亭|琼中)(黎族)?', text)
  5576. if ser and '黎族' not in ser.group(0):
  5577. text = text.replace(ser.group(0), ser.group(0)+'黎族')
  5578. # province_l = find_areas(p_pro, text)
  5579. # city_l = find_areas(p_city, text)
  5580. # district_l = find_areas(p_dis, text)
  5581. province_l, city_l, district_l = find_whole_areas(text) # 20240703 优化地址提取,解决类似 海南昌江 得到 海南 南昌 结果
  5582. if len(province_l) == len(city_l) == 0:
  5583. district_l = [it for it in district_l if
  5584. re.search('[市县旗区]$', it[0])] # 20240428去掉只有区县地址且不是全称的匹配,避免错误 例 凌云工业股份有限公司 提取地区为广西白色凌云
  5585. province_l = chage_area2score(province_l, max_len=len(text))
  5586. city_l = chage_area2score(city_l, max_len=len(text))
  5587. district_l = chage_area2score(district_l, max_len=len(text))
  5588. pro_ids = dict()
  5589. city_ids = dict()
  5590. dis_ids = dict()
  5591. for pro in province_l:
  5592. name, score = pro
  5593. assert (name in full_dic['province'] or name in short_dic['province'])
  5594. if name in full_dic['province']:
  5595. idx = full_dic['province'][name]
  5596. if idx not in pro_ids:
  5597. pro_ids[idx] = 0
  5598. pro_ids[idx] += (score + 1)
  5599. else:
  5600. idx = short_dic['province'][name]
  5601. if idx not in pro_ids:
  5602. pro_ids[idx] = 0
  5603. pro_ids[idx] += (score + 0)
  5604. for city in city_l:
  5605. name, score = city
  5606. if name in full_dic['city']:
  5607. w = 0.1 if len(full_dic['city'][name]) > 1 else 1
  5608. for idx in full_dic['city'][name]:
  5609. if idx not in city_ids:
  5610. city_ids[idx] = 0
  5611. # weight = idx_dic[idx]['权重']
  5612. city_ids[idx] += (score + 2) * w
  5613. pro_idx = idx_dic[idx]['省']
  5614. if pro_idx in pro_ids:
  5615. pro_ids[pro_idx] += (score + 2) * w
  5616. else:
  5617. pro_ids[pro_idx] = (score + 2) * w * 0.5
  5618. elif name in short_dic['city']:
  5619. w = 0.1 if len(short_dic['city'][name]) > 1 else 1
  5620. for idx in short_dic['city'][name]:
  5621. if idx not in city_ids:
  5622. city_ids[idx] = 0
  5623. weight = idx_dic[idx]['权重']
  5624. city_ids[idx] += (score + 1) * w * weight
  5625. pro_idx = idx_dic[idx]['省']
  5626. if pro_idx in pro_ids:
  5627. pro_ids[pro_idx] += (score + 1) * w * weight
  5628. else:
  5629. pro_ids[pro_idx] = (score + 1) * w * weight * 0.5
  5630. for dis in district_l:
  5631. name, score = dis
  5632. if name in full_dic['district']:
  5633. w = 0.1 if len(full_dic['district'][name]) > 1 else 1
  5634. for idx in full_dic['district'][name]:
  5635. if idx not in dis_ids:
  5636. dis_ids[idx] = 0
  5637. # weight = idx_dic[idx]['权重']
  5638. dis_ids[idx] += (score + 1) * w
  5639. pro_idx = idx_dic[idx]['省']
  5640. if pro_idx in pro_ids:
  5641. pro_ids[pro_idx] += (score + 1) * w
  5642. else:
  5643. pro_ids[pro_idx] = (score + 1) * w * 0.5
  5644. city_idx = idx_dic[idx]['市']
  5645. if city_idx in city_ids:
  5646. city_ids[city_idx] += (score + 1) * w
  5647. else:
  5648. city_ids[city_idx] = (score + 1) * w * 0.5
  5649. elif name in short_dic['district']:
  5650. w = 0.1 if len(short_dic['district'][name]) > 1 else 1
  5651. for idx in short_dic['district'][name]:
  5652. if idx not in dis_ids:
  5653. dis_ids[idx] = 0
  5654. weight = idx_dic[idx]['权重']
  5655. dis_ids[idx] += (score + 0) * w
  5656. pro_idx = idx_dic[idx]['省']
  5657. if pro_idx in pro_ids:
  5658. pro_ids[pro_idx] += (score + 0) * w * weight
  5659. else:
  5660. pro_ids[pro_idx] = (score + 0) * w * weight * 0.5
  5661. city_idx = idx_dic[idx]['市']
  5662. if city_idx in city_ids:
  5663. city_ids[city_idx] += (score + 0) * w * weight
  5664. else:
  5665. city_ids[city_idx] = (score + 0) * w * weight * 0.1
  5666. for k, v in pro_ids.items():
  5667. pro_ids[k] = v * text_weight
  5668. for k, v in city_ids.items():
  5669. city_ids[k] = v * text_weight
  5670. for k, v in dis_ids.items():
  5671. dis_ids[k] = v * text_weight
  5672. return pro_ids, city_ids, dis_ids
  5673. area_dic = {'area': '全国', 'province': '全国', 'city': '未知', 'district': '未知', "is_in_text": False}
  5674. pro_ids, city_ids, dis_ids = get_pro_city_dis_score(text)
  5675. pro_ids1, city_ids1, dis_ids1 = get_pro_city_dis_score(web_name[:3], text_weight=0.2) # 20240422 修改为站源名称只取前三字,避免类似 459056219 中金岭南阳光采购平台 错提取阳光
  5676. for k in pro_ids1:
  5677. if k in pro_ids:
  5678. pro_ids[k] += pro_ids1[k]
  5679. else:
  5680. pro_ids[k] = pro_ids1[k]
  5681. for k in city_ids1:
  5682. if k in city_ids:
  5683. city_ids[k] += city_ids1[k]
  5684. else:
  5685. city_ids[k] = city_ids1[k]
  5686. for k in dis_ids1:
  5687. if k in dis_ids:
  5688. dis_ids[k] += dis_ids1[k]
  5689. else:
  5690. dis_ids[k] = dis_ids1[k]
  5691. big_area, pred_pro, pred_city, pred_dis = get_final_addr(pro_ids, city_ids, dis_ids)
  5692. if big_area != "":
  5693. area_dic['area'] = big_area
  5694. if pred_pro != "":
  5695. area_dic['province'] = pred_pro
  5696. if pred_city != "":
  5697. area_dic['city'] = pred_city
  5698. if pred_dis != "":
  5699. area_dic['district'] = pred_dis
  5700. if in_content:
  5701. area_dic['is_in_text'] = True
  5702. return {'district': area_dic}
  5703. def predict(self, project_name, prem, title, list_articles, web_source_name = "", list_entitys=""):
  5704. '''
  5705. 先匹配 project_name+tenderee+tenderee_address, 如果缺少省或市 再匹配 title+content
  5706. :param project_name:
  5707. :param prem:
  5708. :param title:
  5709. :param list_articles:
  5710. :param web_source_name:
  5711. :return:
  5712. '''
  5713. def get_ree_addr(prem):
  5714. tenderee = ""
  5715. tenderee_address = ""
  5716. try:
  5717. for v in prem[0]['prem'].values():
  5718. for link in v['roleList']:
  5719. if link['role_name'] == 'tenderee' and tenderee == "":
  5720. tenderee = link['role_text']
  5721. tenderee_address = link['address']
  5722. except Exception as e:
  5723. print('解析prem 获取招标人、及地址出错')
  5724. return tenderee, tenderee_address
  5725. def get_role_address(text):
  5726. '''正则匹配获取招标人地址
  5727. 3:地址直接在招标人后面 招标人:xxx,地址:xxx
  5728. 4:招标、代理一起,两个地址一起 招标人:xxx, 代理人:xxx, 地址:xxx, 地址:xxx.
  5729. '''
  5730. p3 = '(招标|采购|甲)(人|方|单位)(信息:|(甲方))?(名称)?:[\w()]{4,15},(联系)?地址:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5731. p4 = '(招标|采购|甲)(人|方|单位)(信息:|(甲方))?(名称)?:[\w()]{4,15},(招标|采购)?代理(人|机构)(名称)?:[\w()]{4,15},(联系)?地址:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5732. p5 = '(采购|招标)(人|单位)(联系)?地址:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5733. if re.search(p3, text):
  5734. return re.search(p3, text).group('addr')
  5735. elif re.search(p4, text):
  5736. return re.search(p4, text).group('addr')
  5737. elif re.search(p5, text):
  5738. return re.search(p5, text).group('addr')
  5739. else:
  5740. return ''
  5741. def get_project_addr(text):
  5742. p1 = '(项目|施工|实施|建设|工程|服务|交货|送货|收货|展示|看样|拍卖)(地址|地点|位置|所在地区?)(位于)?:(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5743. p2 = '项目位于(?P<addr>\w{2}市\w{2,4}区)'
  5744. if re.search(p1, text):
  5745. return re.search(p1, text).group('addr')
  5746. elif re.search(p2, text):
  5747. return re.search(p2, text).group('addr')
  5748. else:
  5749. return ''
  5750. def get_bid_addr(text):
  5751. p2 = '(磋商|谈判|开标|投标|评标|报名|递交|评审|发售|所属)(地址|地点|所在地区?|地域):(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5752. if re.search(p2, text):
  5753. return re.search(p2, text).group('addr')
  5754. else:
  5755. return ''
  5756. def get_all_addr(list_entitys):
  5757. tenderee_l = []
  5758. addr_l = []
  5759. for ent in list_entitys[0]:
  5760. if ent.entity_type == 'location' and len(ent.entity_text) > 2:
  5761. addr_l.append(ent.entity_text)
  5762. elif ent.entity_type in ['org', 'company']:
  5763. if ent.label in [0, 1]: # 加招标或代理
  5764. tenderee_l.append(ent.entity_text)
  5765. return ' '.join(addr_l), ' '.join(tenderee_l)
  5766. def get_title_addr(text):
  5767. p1 = '(?P<addr>(\w{1,13}(自治[区州县旗]|地区|[省市区县旗盟])[^\w]*)+|\w{2,15}[,。])'
  5768. if re.search(p1, text):
  5769. return re.search(p1, text).group('addr')
  5770. else:
  5771. return ''
  5772. if '##attachment##' in list_articles[0].content:
  5773. content, attachment = list_articles[0].content.split('##attachment##')
  5774. if len(content) < 200:
  5775. content += attachment
  5776. else:
  5777. content = list_articles[0].content
  5778. tenderee, tenderee_address = get_ree_addr(prem)
  5779. msc = ""
  5780. pro_addr = get_project_addr(content)
  5781. if pro_addr != "":
  5782. msc += '使用规则提取的项目地址;'
  5783. tenderee_address = pro_addr
  5784. else:
  5785. role_addr = get_role_address(content)
  5786. if role_addr != "":
  5787. msc += '使用规则提取的联系人地址;'
  5788. tenderee_address = role_addr
  5789. if tenderee_address == "":
  5790. title_addr = get_title_addr(title)
  5791. if title_addr != "":
  5792. msc += '使用规则提取的标题地址;'
  5793. tenderee_address = title_addr
  5794. else:
  5795. bid_addr = get_bid_addr(content)
  5796. if bid_addr != "":
  5797. msc += '使用规则提取的开标地址;'
  5798. tenderee_address = bid_addr
  5799. project_name = str(project_name)
  5800. tenderee = str(tenderee)
  5801. # print('招标人地址',role_addr, tenderee_address)
  5802. project_name = project_name + title if project_name not in title else title
  5803. # project_name = project_name.replace(tenderee, '')
  5804. if len(project_name)>3:
  5805. entity_list = getNers([project_name],useselffool=False) # 2024/4/26 修改为去重项目名称中所有公司名称
  5806. for tup in entity_list[0]:
  5807. if tup[2] in ['org', 'company']:
  5808. project_name = project_name.replace(tup[3], '')
  5809. text1 = "{0} {1} {2}".format(tenderee, tenderee_address, project_name)
  5810. web_source_name = str(web_source_name) # 修复某些不是字符串类型造成报错
  5811. text1 = re.sub('复合肥|铁路|公路|新会计', ' ', text1) # 预防提取错 合肥 路南 新会 等地区
  5812. if pro_addr and re.search('\w{2,}([省市县旗盟]|自治[区州县旗])', pro_addr):
  5813. msc += '## 使用项目地址输入:%s ##;' % pro_addr
  5814. rs = self.get_area(pro_addr, '')
  5815. msc += '预测结果:省份:%s, 城市:%s,区县:%s;' % (
  5816. rs['district']['province'], rs['district']['city'], rs['district']['district'])
  5817. if rs['district']['province'] != '全国':
  5818. # print('地区匹配:', msc)
  5819. return rs
  5820. # print('text1:', text1)
  5821. msc += '## 第一次预测输入:%s ##;' % text1
  5822. rs = self.get_area(text1, '') # 2024/4/22 调整第一次输入不带站源名称,避免出错
  5823. msc += '预测结果:省份:%s, 城市:%s,区县:%s;' % (
  5824. rs['district']['province'], rs['district']['city'], rs['district']['district'])
  5825. # self.f.write('%s %s \n' % (list_articles[0].id, msc))
  5826. # print('地区匹配:', msc)
  5827. if rs['district']['province'] == '全国' or rs['district']['city'] == '未知':
  5828. msc = ""
  5829. all_addr, tenderees = get_all_addr(list_entitys)
  5830. text2 = tenderees + " " + all_addr + ' ' + title
  5831. msc += '使用实体列表所有招标人+所有地址;'
  5832. # text2 += title + content if len(content)<2000 else title + content[:1000] + content[-1000:]
  5833. text2 = re.sub('复合肥|铁路|公路|新会计', ' ', text2)
  5834. # print('text2:', text2)
  5835. msc += '## 第二次预测输入:%s %s##' % (text2,web_source_name)
  5836. rs2 = self.get_area(text2, web_source_name, in_content=True)
  5837. # rs2['district']['is_in_text'] = True
  5838. if rs['district']['province'] == '全国' and rs2['district']['province'] != '全国':
  5839. rs = rs2
  5840. elif rs['district']['province'] == rs2['district']['province'] and rs2['district']['city'] != '未知':
  5841. rs = rs2
  5842. msc += '预测结果:省份:%s, 城市:%s,区县:%s' % (
  5843. rs['district']['province'], rs['district']['city'], rs['district']['district'])
  5844. # self.f.write('%s %s \n'%(list_articles[0].id, msc))
  5845. # print('地区匹配:', msc)
  5846. return rs
  5847. class TableTag2List():
  5848. '''把soup table 转化为表格补全后的文本列表[[td, td, td], [td, td, td]]'''
  5849. def table2list(self, table, text_process=None):
  5850. self._output = []
  5851. row_ind = 0
  5852. col_ind = 0
  5853. for row in table.find_all('tr'):
  5854. # record the smallest row_span, so that we know how many rows
  5855. # we should skip
  5856. smallest_row_span = 1
  5857. if len(row.find_all(['td', 'th'], recursive=False)) > 20:
  5858. log('未补全前表格列数大于20的不做表格处理')
  5859. return []
  5860. for cell in row.children:
  5861. if cell.name in ('td', 'th'):
  5862. # check multiple rows
  5863. # pdb.set_trace()
  5864. row_span = int(re.sub('[^0-9]', '', cell.get('rowspan'))) if cell.get('rowspan') and cell.get('rowspan').isdigit() else 1
  5865. # try updating smallest_row_span
  5866. smallest_row_span = min(smallest_row_span, row_span)
  5867. # check multiple columns
  5868. col_span = int(re.sub('[^0-9]', '', cell.get('colspan'))) if cell.get('colspan') and cell.get('colspan').isdigit() else 1
  5869. if col_span > 20: # 修复 335590254 山东港口阳光智采e平台 数据源表格第一行colspan为200超过50列造成无法提取问题
  5870. col_span = 20
  5871. # find the right index
  5872. while True:
  5873. if self._check_cell_validity(row_ind, col_ind):
  5874. break
  5875. col_ind += 1
  5876. # insert into self._output
  5877. try:
  5878. if text_process != None:
  5879. # text = [re.sub('\xa0', '', text_process(cell, final=False)), 0]
  5880. # td_text = re.sub('\xa0', '', text_process(cell, final=False))
  5881. td_text = re.sub('\s|\xa0', '', str(cell.get_text())) # 修复 370835008 td 内公司被p标签拆分为两半情况
  5882. if 'title' in cell.attrs and cell.get_text().strip().endswith('...') and cell.get_text().strip()[:-3] in cell.attrs['title']:
  5883. td_text = cell.attrs['title'] # 修复 类似 215597851 省略号隐藏内容
  5884. elif len(td_text)>30:
  5885. td_text = re.sub('\xa0', '', text_process(cell, final=False))
  5886. if td_text == "":
  5887. td_text = ' '
  5888. text = [td_text,0]
  5889. else:
  5890. text = str(cell.get_text()).strip().replace("\x06", "").replace("\x05", "").replace("\x07", "").replace('\\', '').replace("(", "(").replace(')', ')').replace('?', '')
  5891. # text = re.sub('\s', '', text)[:200] # 只需取前200字即可
  5892. text = ' ' if text == "" else text
  5893. self._insert(row_ind, col_ind, row_span, col_span, text)
  5894. except UnicodeEncodeError:
  5895. raise Exception( 'Failed to decode text; you might want to specify kwargs transformer=unicode' )
  5896. # update col_ind
  5897. col_ind += col_span
  5898. if col_ind > 50 and text_process == None: # 表格要素提取及候选人提取的 表格列数大于50的去掉
  5899. return []
  5900. # update row_ind
  5901. row_ind += smallest_row_span
  5902. col_ind = 0
  5903. return self._output
  5904. def _check_validity(self, i, j, height, width):
  5905. """
  5906. check if a rectangle (i, j, height, width) can be put into self.output
  5907. """
  5908. return all(self._check_cell_validity(ii, jj) for ii in range(i, i+height) for jj in range(j, j+width))
  5909. def _check_cell_validity(self, i, j):
  5910. """
  5911. check if a cell (i, j) can be put into self._output
  5912. """
  5913. if i >= len(self._output):
  5914. return True
  5915. if j >= len(self._output[i]):
  5916. return True
  5917. if self._output[i][j] == "":
  5918. return True
  5919. return False
  5920. def _insert(self, i, j, height, width, val):
  5921. # pdb.set_trace()
  5922. for ii in range(i, i+height):
  5923. for jj in range(j, j+width):
  5924. self._insert_cell(ii, jj, val)
  5925. def _insert_cell(self, i, j, val):
  5926. while i >= len(self._output):
  5927. self._output.append([])
  5928. while j >= len(self._output[i]):
  5929. self._output[i].append("")
  5930. if self._output[i][j] == "":
  5931. self._output[i][j] = val
  5932. class TablePremExtractor(object):
  5933. def __init__(self):
  5934. '''各要素表头规则'''
  5935. self.head_rule_dic = {
  5936. 'project_code': "(项目|招标|采购|计划|公告|包[段组件]|标[段包的]|标段(包)|分[包标])(编号|编码|代码)",
  5937. 'package_code': "(包[段组件]|标[段包]|分[包标])(序?号|$)|包号|^标段$|^品目$",
  5938. "project_name": "(包[段组件]|标[段包的项]|标段(包)|分[包标]|采购|项目|工程|货物|商品|产品|设备|通用|主要标的|^包)(名称?|内容)",
  5939. "win_sort": "排名|排序|名次|推荐顺序",
  5940. 'win_or_not': '是否(建议|推荐)?(中标|成交|中选)|是否入围|是否入库|入围结论|未(中标|成交)原因',
  5941. "tenderer": "(中标|中选|中价|成交|供货|承包|承建|承租|竞得|受让)(候选)?(人|单位|供应商|公司|企业|厂家|商家?|客户|供?方|银行)(名称|$)|^(拟定|单一来源|邀请|拟推荐(入选|入围)?)?供应商(名称)?$",
  5942. "tenderee": "(项目|采购|招标|遴选|寻源|竞价|议价|比选|委托|询比?价|比价|评选|谈判|邀标|邀请|洽谈|约谈|选取|抽取|抽选)(人|公司|单位|组织|用户|业主|主体|方|部门)(名称|$)",
  5943. "budget": "最高(投标)?限价|总价限价|控制(价格?|金额|总价)|(总价|采购)限价|上限价|拦标价|(采购|招标|项目)?预算|(预算|招标|采购|计划)金额|挂牌价",
  5944. "bid_amount": "投标[报总]?价|报价(总?金额|总价|总额)|总报价|^\w{,5}报价(([\w、/]{1,15}))?$|(中标|成交|合同))?总?(金?额|[报均总]价|价[格款]?)|承包价|含税价|经评审的价格|中标存款金?额|中标资金|存放金额",
  5945. "serviceTime": '合同期限|工期/交货期/服务期|工期\(交货期\)|合格工期|服务期限|工期' \
  5946. '|工期要求|项目周期|工期\(交货期\)|计划工期\(服务期限\)|服务时限|履行期限|服务周期|供货期限' \
  5947. '|合格工期|计划工期\(服务期\)|服务期|服务,期|交货\(完工\)时间|交付\(服务、完工\)时间' \
  5948. '|交货时间|保洁期限|维保期|管理年限|工期承诺|(服务|合同|施工|实施|工程|设计)的?(年限|期限|周期|期:)' \
  5949. '|计划工期|工期要求|服务期限?' \
  5950. '|投标工期|设计工期|合格服务周期|总工期|服务时间(范围)?|流转期限|维护期限|服务时限|交货期' \
  5951. '|完成时间|中标工期|项目周期|期限要求|周期|供货期|合同的?履行日期|计划周期' \
  5952. '|履约期限|合同的?约定完成时限|合同的?完成日期|承诺完成日期' \
  5953. '|合同起始日起|合同的?履约期|履约截止日期|承包期限|合同的?完成日期|特许经营期限' \
  5954. '|服务期间|服务履行期|委托(管理)?期限|经营期限|数量' \
  5955. '|(工期|服务期限?|交货期限?|服务履行期|合同期限?|履[行约]期限?)说明|存款期限?|(存款|存放|定存)(期|年)限' \
  5956. '|服务(有效期|年限)|本?合同有效期|协议有效期|项目期限'
  5957. }
  5958. with open(os.path.dirname(__file__)+'/header_set.pkl', 'rb') as f:
  5959. self.headerset = pickle.load(f)
  5960. self.tb = TableTag2List()
  5961. def find_header(self, td_list):
  5962. fix_td_list = [re.sub('[::]$|^[一二三四五六七八九十0-9]{1,3}、|(([\w、×*/]{1,20}))$|(不?含税)|/万?元|拟|\s', '', it) for it in td_list] # 去除表头无关信息,方便匹配判断是否为表头
  5963. header_dic = dict()
  5964. flag = False
  5965. contain_header = False
  5966. # print('表头判断:', set(fix_td_list) - self.headerset)
  5967. if len(set(fix_td_list))>=2 and len(set(fix_td_list) & self.headerset)/len(set(fix_td_list))>=0.6:
  5968. flag = True
  5969. need_replace = 0 # 是否需要替换表头名称
  5970. if re.search('^(投标银行|供应商名称)$', '|'.join(td_list)) and re.search('中标存款金?额|中标资金存放额|中标利率|(中标|成交|合同))?总?(金?额|[报均总]价|价[格款]?)', '|'.join(td_list)):
  5971. need_replace = 1
  5972. for i in range(len(td_list)) :
  5973. text = td_list[i]
  5974. text = re.sub('\s', '', text)
  5975. if need_replace and re.search('^(投标银行|供应商名称)$', text): # 银行类特殊处理
  5976. text = '中标银行'
  5977. if need_replace and re.search('排名|排序|名次|推荐顺序', text): # 银行类特殊处理
  5978. text = '序号'
  5979. if text == '备选中标人':
  5980. text = '第二候选人'
  5981. if len(re.sub('(([\w、×*/]{1,20}))$', '', text)) > 15: # 长度大于15 不进行表头匹配
  5982. continue
  5983. if re.search('未(中标|成交)原因', text): # 不提取此种表格
  5984. return flag, contain_header, dict()
  5985. num = 0
  5986. for k, v in self.head_rule_dic.items():
  5987. if re.search('评分|得分|分数|分值', text):
  5988. continue
  5989. if re.search(v, text):
  5990. if k in ['tenderer'] and re.search('是否', text):
  5991. continue
  5992. if k == 'budget' and re.search('量', text): # 预算工作量 预算采购量 等不作为预算
  5993. continue
  5994. elif k in header_dic:
  5995. if k in ['budget', 'bid_amount'] and re.search('总(价|金?额)', text): # 总价替换单价
  5996. header_dic[k] = (i, text)
  5997. num += 1
  5998. elif k == 'project_code' and text != header_dic[k][1] and 'package_code' not in header_dic\
  5999. and re.search(self.head_rule_dic['package_code'], re.sub('\s', '', ','.join(td_list)))==None: # 如果出现两次项目编号且没有包号,把第二次出现的作为包号 例:472537470
  6000. header_dic['package_code'] = (i, text)
  6001. continue
  6002. header_dic[k] = (i, text)
  6003. num += 1
  6004. if num>1:
  6005. # print('表头错误,一个td匹配到两个表头:', header_dic)
  6006. return flag, contain_header, dict()
  6007. if re.search(';金额((万?元))?;', ';'.join(td_list)): # 召回某些表格只写 金额 作为表头,不能识别为招标或中标金额
  6008. if 'tenderer' in header_dic and 'bid_amount' not in header_dic:
  6009. for i in range(len(td_list)):
  6010. text = td_list[i]
  6011. if re.search('^金额((万?元))?$',text):
  6012. header_dic['bid_amount'] = (i, text)
  6013. break
  6014. elif 'tenderee' in header_dic and 'budget' not in header_dic:
  6015. for i in range(len(td_list)):
  6016. text = td_list[i]
  6017. if re.search('^金额((万?元))?$', text):
  6018. header_dic['budget'] = (i, text)
  6019. break
  6020. if ('project_code' in header_dic or 'package_code' in header_dic or 'project_name' in header_dic) and (
  6021. 'tenderer' in header_dic or'budget' in header_dic): # 包含标段及招标金额或中标人的进行提取
  6022. return flag, contain_header, header_dic
  6023. elif ('tenderer' in header_dic) and ('bid_amount' in header_dic): # 包含中标人及中标金额的进行提取
  6024. if 'win_sort' in header_dic: # 有排名的 用候选人提取类
  6025. return flag, contain_header, dict()
  6026. elif re.search('^(候选)?供应商(名称)?', header_dic['tenderer'][1]) and 'win_or_not' not in header_dic and re.search('(中标|成交|合同))?总?(金?额|[报均总]价|价[格款]?)', header_dic['bid_amount'][1])==None: # 只有供应商名称 没排名和包号的去掉,预防错误包提取 334205629
  6027. # print('只有供应商名称 没排名和包号的去掉')
  6028. return flag, contain_header, dict()
  6029. return flag,contain_header, header_dic
  6030. elif 'tenderer' in header_dic and re.search('(中标|中选|中价|成交|竞得)(人|单位|供应商|公司|企业|厂家|商家?|客户|供?方|银行)',header_dic['tenderer'][1]): # 有中标人,且有明确中标关键词的进行提取
  6031. return flag, contain_header, header_dic
  6032. elif 'tenderer' in header_dic and 'serviceTime' in header_dic:
  6033. return flag, contain_header, header_dic
  6034. elif len(set(fix_td_list) & self.headerset) >= 2 or (len(set(fix_td_list)) == 2 and len(set(td_list) & self.headerset) >= 1): # 如果包含两个表头以上或 只有两列且包含一个表头
  6035. contain_header = True
  6036. return flag, contain_header, dict()
  6037. def get_role(self, text, nlp_enterprise):
  6038. '''
  6039. 获取字符串text角色实体
  6040. :param text: 待获取实体字符串
  6041. :param nlp_enterprise: 公告中的角色实体列表
  6042. :return:
  6043. '''
  6044. text = re.sub('联合体:|联合体(成员|单位)[12345一二三四五]?:|(联合体)?成员单位[12345一二三四五]?:|特殊普通合伙:|[((][主成][))]'
  6045. , ',', text)
  6046. text = re.sub('\s', '', text) # 修复 370835008 表格中实体中间有\n
  6047. text = re.sub('[一二三四五六七八九十]+标段:|标段[一二三四五六七八九十]+:', '', text) # 2024/4/22 修复 372839375 三标段:宁夏一山科技有限公司
  6048. text = re.sub('1[3-9]\d{9}|\d{3}-\d{8}|\d{4}-\d{7}', '', text) # 2024/4/23 去除电话
  6049. if text in nlp_enterprise:
  6050. return text
  6051. if len(text) > 50 or len(text)<4:
  6052. return ''
  6053. ners = getNers([text], useselffool=True)
  6054. roles = []
  6055. if ners:
  6056. for ner in ners[0]:
  6057. if ner[2] in ['org', 'company']:
  6058. roles.append(ner[3])
  6059. elif ner[2] in ['location'] and re.search('^\w{3,10}(海关|殡仪馆|店|村委会|纪念馆|监狱|管教所|修养所|社区|农场|林场|羊场|猪场|石场)$', ner[3]):
  6060. roles.append(ner[3])
  6061. if roles and len(''.join(roles)) > len(text)*0.8:
  6062. return roles[0]
  6063. else:
  6064. return ''
  6065. def extract_from_df(self, df, headers, web_source_name):
  6066. prem_dic = {}
  6067. previous_package = "" # 上一行包号
  6068. multi_same_package = False # 非连续的重复包号
  6069. package_fix2raw = dict() # 处理后包号:处理前包号 字典
  6070. link_set = set()
  6071. tenderer_list = [] # 保存所有中标人
  6072. serviceTime_list = []
  6073. not_package = True if 'project_name' in headers and re.search('(货物|商品|产品|通用|主要标的)(名称?|内容)', headers['project_name'][1]) and \
  6074. 'package_code' not in headers and 'budget' not in headers and "bid_amount" not in headers else False
  6075. if set(['project_code', 'package_code', 'tenderee', 'tenderer']) & set(headers) == set() and ('project_name' not in headers # 补充没有项目名称或有项目名称且是货物的才过滤掉
  6076. or re.search('(货物|商品|产品|设备|通用|主要标的)(名称?|内容)', headers['project_name'][1])): # 20240131修复只有货物名称及最高限价的错误作为多包 396636683; 补充避免423647863采购意向被过滤
  6077. # print('没有包号及角色的不要')
  6078. return {}
  6079. for i in df.index:
  6080. same_package = False # 连续重复包号,一般是 rowspan 造成;一包 多个采购
  6081. project_code = df.loc[i, headers['project_code'][0]].strip() if "project_code" in headers else ""
  6082. package_code_raw = df.loc[i, headers['package_code'][0]].strip() if "package_code" in headers else ""
  6083. project_name = df.loc[i, headers['project_name'][0]].strip() if "project_name" in headers else ""
  6084. tenderee = df.loc[i, headers['tenderee'][0]].strip() if "tenderee" in headers else ""
  6085. tenderer = df.loc[i, headers['tenderer'][0]].strip() if "tenderer" in headers else ""
  6086. budget_ = df.loc[i, headers['budget'][0]].strip() if "budget" in headers else ""
  6087. bid_amount_ = df.loc[i, headers['bid_amount'][0]].strip() if "bid_amount" in headers else ""
  6088. win_sort = df.loc[i, headers['win_sort'][0]].strip() if "win_sort" in headers else ""
  6089. win_or_not = df.loc[i, headers['win_or_not'][0]].strip() if "win_or_not" in headers else ""
  6090. serviceTime = df.loc[i, headers['serviceTime'][0]].strip() if "serviceTime" in headers else ""
  6091. if set([project_code, package_code_raw, project_name,tenderee,tenderer,budget_,bid_amount_]) & self.headerset != set(): # 只要有一项为表头 停止匹配
  6092. # print('只要有一项为表头 停止匹配', set([project_code, package_code_raw, project_name,tenderee,tenderer,budget_,bid_amount_,win_sort]) & self.headerset)
  6093. break
  6094. if len(set([project_code, package_code_raw, project_name,tenderee,tenderer,budget_,bid_amount_,win_sort])- set(['', ' '])) < 2 and tenderer=='': # 内容为空或全部一样 停止匹配
  6095. # print('内容为空或全部一样 停止匹配')
  6096. break
  6097. if re.search('详见', project_name): # 去除某些表达: 详见招标文件
  6098. project_name = ""
  6099. if package_code_raw == "" and re.search('第?[0-9一二三四五六七八九十a-zA-Z]{1,4}(标[段号的包项]|([分子]?包|包[组件号]))$|^(标[段号的包项]|([分子]?包|包[组件号]))号?:?[0-9一二三四五六七八九十a-zA-Z]{1,4}$', project_name):
  6100. package_code_raw = project_name
  6101. project_name = ""
  6102. package_code = package_code_raw
  6103. if re.search('合计|总计', package_code+project_code):
  6104. continue
  6105. if package_code + project_code == previous_package: # 处理 208162730 一个包采购多种东西情况
  6106. same_package = True
  6107. if previous_package!="": # 有包号或项目编号且跟上一行相同时,去除项目名称
  6108. project_name = ''
  6109. previous_package = package_code + project_code
  6110. if win_sort != "" and re.search('排名|排序|名次|推荐顺序', headers['win_sort'][1]): # 此类型表由 CandidateExtractor类提取 防止类似 328485591 作为多包
  6111. break
  6112. if win_or_not != "" and (re.search('(建议|推荐)(中标|成交|中选)|是|^(中标|成交|中选)', win_or_not)==None or re.search('\w', win_or_not)==None): # 2024/04/2 修复 252208201 为空的不中标
  6113. continue
  6114. if "win_sort" in headers and win_sort == "": # '表头有是否中标,内容却空白的,过滤掉'
  6115. continue
  6116. if win_sort == "" and "tenderer" in headers and re.search('候选|入围|入选', headers['tenderer'][1]) and re.search('推荐的?((中标|成交|中选)候选人|(候选|入围|入选)供应商)', headers['tenderer'][1])==None:
  6117. tenderer = ""
  6118. if tenderer in ['采购失败', '废标']: # 避免类似 353867205 这篇只提取到一个
  6119. continue
  6120. # tenderee = tenderee if self.is_role(tenderee) else ""
  6121. # tenderer = tenderer if self.is_role(tenderer) else ""
  6122. package = uniform_package_name(package_code) if package_code else '自增1' # 没有包号的自动编号的修改为提取到多少个包,某些行未必中标
  6123. if project_name != "" and package.startswith('自增'):
  6124. pk_l = find_package(project_name)
  6125. if len(pk_l)==1:
  6126. package = uniform_package_name(pk_l[0].group(0))
  6127. elif re.search('[一二三四五六七八九十]+标段:|标段[一二三四五六七八九十]+:', tenderer) and package.startswith('自增'):
  6128. pk_l = find_package(tenderer)
  6129. if len(pk_l) == 1:
  6130. package = uniform_package_name(pk_l[0].group(0))
  6131. tenderee = self.get_role(tenderee, self.nlp_enterprise) if tenderee!="" else tenderee
  6132. tenderer = self.get_role(tenderer, self.nlp_enterprise) if tenderer!='' else tenderer
  6133. tenderee = cut_repeat_name(tenderee)
  6134. tenderer = cut_repeat_name(tenderer)
  6135. if len(set([project_code, package_code, project_name, tenderee, tenderer, budget_, bid_amount_])) < 2:
  6136. break
  6137. if not_package:
  6138. if (project_code, package_code, tenderee, tenderer, budget_, bid_amount_) in link_set:
  6139. continue
  6140. link_set.add((project_code, package_code, tenderee, tenderer, budget_, bid_amount_))
  6141. else:
  6142. if (project_code, package_code, project_name, tenderee, tenderer, budget_, bid_amount_) in link_set:
  6143. continue
  6144. link_set.add((project_code, package_code, project_name, tenderee, tenderer, budget_, bid_amount_))
  6145. if project_code != "":
  6146. uni_project_code= uniform_package_name(project_code)
  6147. if uni_project_code != "" and uni_project_code!=package:
  6148. if package.startswith('自增'): # 没有包号有项目编号的,直接用项目编号
  6149. package = uni_project_code
  6150. else:
  6151. # print('重组包号:', '%s_%s'%(uni_project_code, package))
  6152. package = '%s_%s'%(uni_project_code, package) # 同时包号项目编号及包号的,组合起来做包号
  6153. if package_code_raw!='':
  6154. if multi_same_package == False and package not in package_fix2raw: # 如果处理后的标段号 已经在列表里面,采用原始标段号文本
  6155. package_fix2raw[package] = package_code_raw
  6156. elif same_package == False:
  6157. multi_same_package = True
  6158. if multi_same_package:
  6159. package = package_code_raw
  6160. if package not in prem_dic or not same_package:
  6161. prem_dic[package] = {
  6162. 'code': '',
  6163. 'name': '',
  6164. 'roleList': [],
  6165. 'tendereeMoney': 0,
  6166. 'tendereeMoneyUnit': ""
  6167. }
  6168. prem_dic[package]['code'] = project_code
  6169. prem_dic[package]['name'] = project_name
  6170. if budget_ != "":
  6171. if len(re.sub('[金额万元()()::零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分¥整\s\d,.]|人民币|不?含税', '', budget_)) > 5: # 金额字段出现超过5个非金额字符,中断匹配
  6172. prem_dic.pop(package)
  6173. break
  6174. budget_header = headers['budget'][1] if 'budget' in headers else ''
  6175. budget, money_unit = money_process(budget_, budget_header) if re.search('[%%‰折]|浮率', budget_)==None else (0, '')
  6176. if (re.search('费率|下浮率|[%%‰折]',
  6177. budget_header + budget_) and budget < 100) or budget > 50000000000: # 如果是费率或大于500亿的金额改为0
  6178. budget = 0
  6179. if budget > 0:
  6180. if same_package and prem_dic[package]['tendereeMoney'] != budget: # 处理 类似 136839070 一包多物品多预算
  6181. prem_dic[package]['tendereeMoney'] += budget
  6182. else:
  6183. prem_dic[package]['tendereeMoney'] = budget
  6184. prem_dic[package]['tendereeMoneyUnit'] = money_unit
  6185. if tenderee and not same_package:
  6186. prem_dic[package]['roleList'].append({
  6187. "address": "",
  6188. "linklist": [],
  6189. "role_money": {
  6190. "discount_ratio": "",
  6191. "downward_floating_ratio": "",
  6192. "floating_ratio": "",
  6193. "money": 0,
  6194. "money_unit": ""
  6195. },
  6196. "role_name": "tenderee",
  6197. "role_text": tenderee,
  6198. "serviceTime": ""
  6199. })
  6200. if tenderer:
  6201. if len(re.sub('[金额万元()()::零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分¥整\s\d,.]|人民币|不?含税', '',
  6202. bid_amount_)) > 5: # 金额字段出现超过5个非金额字符,中断匹配
  6203. prem_dic.pop(package)
  6204. break
  6205. bid_amount, money_unit = money_process(bid_amount_, headers['bid_amount'][1]) if bid_amount_ != "" and re.search('[%%‰折]|浮率', bid_amount_)==None and 'bid_amount' in headers else (0, '')
  6206. if web_source_name == '河钢供应链管理平台' and 'bid_amount' in headers and re.search('[%%‰折]|浮率', bid_amount_) == None and bid_amount == 0: # 有中标金额字段却金额为0的过滤掉,防止类似 河钢供应链管理平台 站源错误,金额不为0的才算中标
  6207. if len(prem_dic[package]['roleList']) == 0 and prem_dic[package]['tendereeMoney'] == 0: # 只有项目编号和名称的包 丢弃
  6208. prem_dic.pop(package)
  6209. continue
  6210. bid_amount_header = headers['bid_amount'][1] if bid_amount_ != "" else ''
  6211. if (re.search('费率|下浮率|[%%‰折]',
  6212. bid_amount_header + bid_amount_) and bid_amount < 100) or bid_amount > 50000000000: # 如果是费率或大于500亿的金额改为0
  6213. bid_amount = 0
  6214. if serviceTime:
  6215. serviceTime_text = headers['serviceTime'][1] + serviceTime if headers['serviceTime'][1][-1] in [':',':'] else headers['serviceTime'][1] + ':' + serviceTime
  6216. # print('serviceTime_text',serviceTime_text)
  6217. serviceTime = extract_servicetime(serviceTime_text)
  6218. serviceTime.sort(key=lambda x:x.get('begin_index',0))
  6219. serviceTime = extract_serviceTime(serviceTime[0]['body'],"") if serviceTime else ""
  6220. # print(serviceTime)
  6221. if not same_package or len(prem_dic[package]['roleList'])==0:
  6222. prem_dic[package]['roleList'].append({
  6223. "address": "",
  6224. "linklist": [],
  6225. "role_money": {
  6226. "discount_ratio": "",
  6227. "downward_floating_ratio": "",
  6228. "floating_ratio": "",
  6229. "money": bid_amount,
  6230. "money_unit": money_unit
  6231. },
  6232. "role_name": "win_tenderer",
  6233. "role_text": tenderer,
  6234. "serviceTime": serviceTime
  6235. })
  6236. elif prem_dic[package]['roleList'] and prem_dic[package]['roleList'][-1].get('role_name', '')=='win_tenderer':
  6237. if 'multi_winner' not in prem_dic[package]['roleList'][-1]:
  6238. prem_dic[package]['roleList'][-1]['multi_winner'] = prem_dic[package]['roleList'][-1]['role_text']
  6239. prem_dic[package]['roleList'][-1]['multi_winner'] += ','+ tenderer
  6240. elif tenderer not in prem_dic[package]['roleList'][-1]['multi_winner']:
  6241. prem_dic[package]['roleList'][-1]['multi_winner'] += ','+ tenderer
  6242. if 'other_winner_dic' not in prem_dic[package]['roleList'][-1]:
  6243. prem_dic[package]['roleList'][-1]['other_winner_dic'] = []
  6244. prem_dic[package]['roleList'][-1]['other_winner_dic'].append({'role_text': tenderer, "money": bid_amount, "money_unit": money_unit,"serviceTime":serviceTime})
  6245. tenderer_list.append(tenderer)
  6246. serviceTime_list.append(serviceTime)
  6247. if len(prem_dic[package]['roleList']) == 0 and prem_dic[package]['tendereeMoney'] == 0: # 只有项目编号和名称的 丢弃 并不再继续往下匹配
  6248. prem_dic.pop(package)
  6249. # break # 注释掉避免 400084571 某些包废标 中断匹配
  6250. if multi_same_package: # 预处理后包号重复的,使用原始包号
  6251. for k, v in package_fix2raw.items():
  6252. if k in prem_dic:
  6253. prem_dic[v] = prem_dic.pop(k)
  6254. if len(tenderer_list)>2 and len(set(tenderer_list))==1 and "package_code" not in headers: # 没提取到包号且中标人一样应该是错误多包,需去掉多包 例 244355092 281854766
  6255. total_money = 0
  6256. for v in prem_dic.values():
  6257. for d in v['roleList']:
  6258. if d['role_name'] == "win_tenderer":
  6259. total_money += d['role_money']['money']
  6260. if 'other_winner_dic' in d:
  6261. for other in d['other_winner_dic']:
  6262. total_money += other.get('money', 0)
  6263. return {'自增1': {
  6264. 'code': '',
  6265. 'name': '',
  6266. 'roleList': [{
  6267. "address": "",
  6268. "linklist": [],
  6269. "role_money": {
  6270. "discount_ratio": "",
  6271. "downward_floating_ratio": "",
  6272. "floating_ratio": "",
  6273. "money": total_money,
  6274. "money_unit": ''
  6275. },
  6276. "role_name": "win_tenderer",
  6277. "role_text": tenderer_list[0],
  6278. "serviceTime": serviceTime_list[0]
  6279. }],
  6280. 'tendereeMoney': 0,
  6281. 'tendereeMoneyUnit': ""
  6282. }}
  6283. return prem_dic
  6284. def update_prem(self, rs_dic, tmp_dic):
  6285. '''
  6286. 合并更新 prem
  6287. :param rs_dic: 返回结果
  6288. :param tmp_dic: 待合并结果
  6289. :return:
  6290. '''
  6291. if '自增1' in tmp_dic and '自增1' not in rs_dic and len(tmp_dic)==len(rs_dic):
  6292. pass
  6293. else:
  6294. for pack in tmp_dic:
  6295. if pack in rs_dic:
  6296. for k in tmp_dic[pack]:
  6297. if rs_dic[pack][k] in ['', 0]:
  6298. rs_dic[pack][k] = tmp_dic[pack][k]
  6299. elif rs_dic[pack][k] == []:
  6300. rs_dic[pack][k] = tmp_dic[pack][k]
  6301. elif k == 'roleList' and len(rs_dic[pack][k])>0 and rs_dic[pack][k][0].get('role_money', {}).get('money', 0) == 0:
  6302. rs_dic[pack][k] = tmp_dic[pack][k]
  6303. else:
  6304. rs_dic[pack] = tmp_dic[pack]
  6305. def get_prem(self, soup, web_source_name=''):
  6306. tables = soup.find_all('table')
  6307. tables.reverse()
  6308. rs_dic = {}
  6309. for table in tables:
  6310. text = table.text.strip()
  6311. previous = table.findPreviousSibling()
  6312. text2 = previous.text.strip() if previous else ""
  6313. # text2 = table.findPreviousSibling().text.strip() if table.findPreviousSibling() != None else ""
  6314. if re.search('项目业主|业\s*主', text) and re.search('业\s*绩', text+text2): # 包含业绩的表格过滤掉,不进行处理
  6315. tb_ex = table.extract()
  6316. if previous:
  6317. sib = previous.extract()
  6318. continue
  6319. trs = self.tb.table2list(table)
  6320. # table.extract()
  6321. i = 0
  6322. headers = ""
  6323. table_prem = {}
  6324. while i < len(trs) - 1:
  6325. flag_, contain_header_, headers_ = self.find_header(trs[i])
  6326. if flag_ and headers_ != dict():
  6327. table_items = []
  6328. headers = headers_
  6329. for j in range(i + 1, len(trs)):
  6330. if len(trs[j]) == len(trs[i]):
  6331. flag_2, contain_header_2, headers_2 = self.find_header(trs[j])
  6332. if flag_2 or contain_header_2:
  6333. if j == i+1 and flag_2:
  6334. if len(headers_)<len(headers_2):
  6335. headers = headers_2
  6336. continue
  6337. break
  6338. else:
  6339. table_items.append(trs[j])
  6340. else:
  6341. # print('表头,内容 列数不一致', len(trs[i]), len(trs[j]))
  6342. break
  6343. if len(table_items) > 0:
  6344. df = pd.DataFrame(table_items)
  6345. prem_ = self.extract_from_df(df, headers, web_source_name)
  6346. # rs_dic.update(prem_)
  6347. # table_prem.update(prem_)
  6348. self.update_prem(table_prem, prem_)
  6349. i = j - 1
  6350. i += 1
  6351. if table_prem and 'project_code' not in headers and 'package_code' not in headers and '自增1' in table_prem and table.find_previous_sibling(): # 表格内没有标段的,从上一个兄弟标签找标段
  6352. sib = table.find_previous_sibling()
  6353. sib_text = sib.get_text()
  6354. ser_sib = re.search('第?[0-9一二三四五六七八九十a-zA-Z]{1,4}(标[段号的包项]|([分子]?包|包[组件号]))|(标[段号的包项]|([分子]?包|包[组件号]))号?:?[0-9一二三四五六七八九十a-zA-Z]{1,4}|包名:[0-9一二三四五六七八九十]{1,4}', sib_text)
  6355. if sib.name in ['p','div','dl','ol','ul','h1','h2','h3','h4','h5','h6'] and len(sib_text)<100 and ser_sib:
  6356. package_sib = ser_sib.group(0)
  6357. package_sib = uniform_package_name(package_sib)
  6358. table_prem[package_sib] = table_prem.pop('自增1')
  6359. if table_prem:
  6360. # rs_dic.update(table_prem)
  6361. self.update_prem(rs_dic, table_prem)
  6362. table.extract()
  6363. return rs_dic
  6364. def predict(self, html, nlp_enterprise, web_source_name=""):
  6365. html = re.sub("<html>|</html>|<body>|</body>","",html)
  6366. html = re.sub("##attachment##","",html)
  6367. soup = BeautifulSoup(html, 'lxml')
  6368. richText = soup.find(name='div', attrs={'class': 'richTextFetch'})
  6369. self.nlp_enterprise = nlp_enterprise
  6370. in_attachment = False
  6371. if richText:
  6372. richText = richText.extract() # 过滤掉附件
  6373. prem = self.get_prem(soup, web_source_name)
  6374. if prem == {} and richText:
  6375. prem = self.get_prem(richText, web_source_name)
  6376. in_attachment = True
  6377. if len(prem) == 1: # 只有一个包且包号为1 或 长度大于2 的大概率为自动增加编号包,改为Project
  6378. k = list(prem)[0]
  6379. if k.startswith('自增'):
  6380. prem['Project'] = prem.pop(k)
  6381. return prem, in_attachment
  6382. class CandidateExtractor(object):
  6383. def __init__(self):
  6384. '''各要素表头规则'''
  6385. self.head_rule_dic = {
  6386. 'package_code': "(包[段组件]|标[段包]|分[包标])(序?号|$)|包号|^标段$",
  6387. 'project_code': "(项目|招标|采购|计划|公告|包[段组件]|标[段包的]|标段(包)|分[包标])(编号|编码)",
  6388. "project_name": "(包[段组件]|标[段包的项]|标段(包)|分[包标]|采购|项目|工程|货物|商品|产品|设备|通用|主要标的|^包)(名称?|内容)|^标的$",
  6389. "win_sort": "排名|排序|名次|推荐顺序",
  6390. 'win_or_not': '是否(建议|推荐)?(中标|成交)|是否入围|是否入库|入围结论',
  6391. "candidate": "((候选|入围|入选|投标)(供应商库)?的?(人|人?单位|机构|供应商|供货商|服务商|投标人|(中标)?公司|(中标)?企业|银行)|(通过)?名单|中标候选人)(名称|名单|全称|\d)?$|^供应商(名称|信息)?$|投标个人/单位", #补充 368295593 投标个人/单位 提取
  6392. "bid_amount": "投标[报总]?价|报价(总?金额|总价|总额)|总报价|^\w{,5}报价(([\w、/]{1,15}))?$|(中标|成交|合同))?([金总]额|[报均总]价|价[格款]?)|承包价|含税价|经评审的价格",
  6393. "win_tenderer": "第一名|第一(中标|成交)?候选人",
  6394. "second_tenderer": "第二名|第二(中标|成交)?候选人",
  6395. "third_tenderer": "第三名|第三(中标|成交)?候选人",
  6396. }
  6397. '''非表格候选人正则'''
  6398. # self.p = '((候选|入围|入选|投标)(供应商库)?的?(人|人?单位|机构|供应商|供货商|服务商|投标人|(中标)?公司|(中标)?企业|应答人)|(通过)?名单)(名称|名单|全称|\d)?:$'
  6399. self.p = '((候选|入围|入选|投标|报价|成交|中标|中选|供[货应]|应答)(人|方|人?单位|机构|厂?商|商家|服务商|公司|企业)|(通过|入围)名单)(名称|名单|全称|\d)?:?$'
  6400. self.tb = TableTag2List()
  6401. with open(os.path.dirname(__file__)+'/header_set.pkl', 'rb') as f:
  6402. self.headerset = pickle.load(f)
  6403. def find_header(self, td_list):
  6404. fix_td_list = [re.sub('[::]$|^[一二三四五六七八九十0-9]{1,3}、|(([\w、×*/]{1,20}))$|(不?含税)|/万?元|拟|\s', '', it) for it in td_list] # 去除表头无关信息,方便匹配判断是否为表头
  6405. header_dic = dict()
  6406. flag = False
  6407. contain_header = False
  6408. if len(set(fix_td_list))>=2 and len(set(fix_td_list) & self.headerset)/len(set(fix_td_list))>=0.6:
  6409. flag = True
  6410. for i in range(len(td_list)) :
  6411. text = td_list[i]
  6412. if len(text) > 15: # 长度大于15 不进行表头匹配
  6413. continue
  6414. if re.search('未(中标|成交)原因', text): # 不提取此种表格
  6415. return flag, contain_header, dict()
  6416. num = 0
  6417. for k, v in self.head_rule_dic.items():
  6418. if k == 'candidate' and re.search('第[一二三]名|第[一二三](中标|成交)?候选人', text):
  6419. continue
  6420. if re.search('评分|得分|分数|分值', text):
  6421. continue
  6422. if re.search(v, text):
  6423. if k in ['candidate', 'win_tenderer', 'second_tenderer', 'third_tenderer'] and re.search('是否', text):
  6424. continue
  6425. header_dic[k] = (i, text)
  6426. # if k != 'candidate': # candidate 可与前三候选重复
  6427. num += 1
  6428. if 'win_tenderer'in header_dic and 'second_tenderer' in header_dic and 'candidate' in header_dic:
  6429. header_dic.pop('candidate')
  6430. if num>1:
  6431. # print('表头错误,一个td匹配到两个表头:', header_dic)
  6432. return flag, contain_header, dict()
  6433. if ('candidate' in header_dic and 'win_sort' in header_dic) or ('win_tenderer' in header_dic and 'second_tenderer' in header_dic): # 有排名才返回表头进行提取
  6434. return flag, contain_header, header_dic
  6435. elif len(set(fix_td_list) & self.headerset) >= 2 or (len(set(fix_td_list)) == 2 and len(set(fix_td_list) & self.headerset) >= 1): # 如果包含两个表头以上或 只有两列且包含一个表头
  6436. contain_header = True
  6437. return flag, contain_header, dict()
  6438. def is_role(self, text):
  6439. if len(text) > 25 or len(text) < 4:
  6440. return False
  6441. elif len(re.findall('有限责?任?公司', text)) > 1:
  6442. return False
  6443. elif re.search('[\w()]{4,}(有限责?任?公司|学校|学院|大学|中学|小学|医院|管理处|办公室|委员会|村委会|纪念馆|监狱|管教所|修养所|社区|农场|林场|羊场|猪场|石场|村|幼儿园|厂|中心|超市|门市|商场|工作室|文印室|城|部|店|站|馆|行|社|处)$', text):
  6444. return True
  6445. else:
  6446. ners = selffool.ner(text)
  6447. if len(ners[0]) == 1 and ('company' in ners[0][0] or 'org' in ners[0][0]):
  6448. return True
  6449. return False
  6450. def get_role(self, text, nlp_enterprise):
  6451. '''
  6452. 获取字符串text角色实体
  6453. :param text: 待获取实体字符串
  6454. :param nlp_enterprise: 公告中的角色实体列表
  6455. :return:
  6456. '''
  6457. text = re.sub('联合体:|联合体(成员|单位)[12345一二三四五]?:|(联合体)?成员单位[12345一二三四五]?:|特殊普通合伙:|[((][主成][))]'
  6458. , ',', text)
  6459. text = re.sub('\s', '', text) # 修复 370835008 表格中实体中间有\n
  6460. if text in nlp_enterprise:
  6461. return text
  6462. if len(text) > 50 or len(text)<4:
  6463. return ''
  6464. ners = getNers([text], useselffool=True)
  6465. roles = []
  6466. if ners:
  6467. for ner in ners[0]:
  6468. if ner[2] in ['org', 'company', 'location']:
  6469. roles.append(ner[3])
  6470. if roles and len(''.join(roles)) > len(text)*0.8:
  6471. return roles[0]
  6472. else:
  6473. return ''
  6474. def extract_from_df(self, df, headers):
  6475. print('表头: ', headers)
  6476. prem_dic = {}
  6477. link_set = set()
  6478. candidate_set = set()
  6479. role_dic = dict() # 保存一二三候选人并排的情况
  6480. findtop3 = False
  6481. findmoney = False
  6482. line_num = 0
  6483. line_package = None
  6484. package_flag = 0
  6485. if "package_code" in headers:
  6486. package_flag = 1
  6487. if len(df)!=len(set(df[headers["package_code"][0]])): # 如果有包号但重复,进行下列判断是否和跟其他字段组合包号
  6488. if "project_code" in headers and df[headers["project_code"][0]][0] != df[headers["package_code"][0]][0]:
  6489. package_flag = 2
  6490. elif "project_name" in headers and find_package(df[headers["package_code"][0]][0]):
  6491. package_flag = 3
  6492. for i in df.index:
  6493. package_code_raw = df.loc[i, headers['package_code'][0]].strip() if "package_code" in headers else ""
  6494. project_code = df.loc[i, headers['project_code'][0]].strip() if "project_code" in headers else ""
  6495. project_name = df.loc[i, headers['project_name'][0]].strip() if "project_name" in headers else ""
  6496. candidate_ = df.loc[i, headers['candidate'][0]].strip() if "candidate" in headers else ""
  6497. win_or_not = df.loc[i, headers['win_or_not'][0]].strip() if "win_or_not" in headers else ""
  6498. # budget_ = df.loc[i, headers['budget'][0]] if "budget" in headers else ""
  6499. bid_amount_ = df.loc[i, headers['bid_amount'][0]].strip() if "bid_amount" in headers else ""
  6500. win_sort = df.loc[i, headers['win_sort'][0]].strip() if "win_sort" in headers else ""
  6501. win_tenderer = df.loc[i, headers['win_tenderer'][0]].strip() if "win_tenderer" in headers else ""
  6502. second_tenderer = df.loc[i, headers['second_tenderer'][0]].strip() if "second_tenderer" in headers else ""
  6503. third_tenderer = df.loc[i, headers['third_tenderer'][0]].strip() if "third_tenderer" in headers else ""
  6504. if set([package_code_raw, candidate_, win_or_not, bid_amount_, win_tenderer, second_tenderer, third_tenderer]) & self.headerset != set(): # 包含表头, 停止匹配 # 排除 ,win_sort 避免367940050漏提取
  6505. # print('包含表头, 停止匹配')
  6506. break
  6507. if len(set([package_code_raw, candidate_,win_sort, win_or_not, bid_amount_, win_tenderer, second_tenderer, third_tenderer]) - set(['', ' '])) < 2: # 全部为空或内容一样 停止匹配
  6508. # print('全部为空或内容一样 停止匹配')
  6509. if len(set(df.loc[i,:]))==1 and re.search('^第?([一二三四五六七八九十]{1,3}|[a-zA-Z0-9-]{,9})?[分子]?(标[段包项]?|包[组件标]?|合同[包段])([一二三四五六七八九十]{1,3}|[a-zA-Z0-9-]{,9})?$', win_sort):
  6510. line_package = win_sort
  6511. continue
  6512. else:
  6513. break
  6514. if candidate_ != "" and win_sort == "" and headers['candidate'][0] > 0: # 修复某些表头不说 排名,直接用候选人代替
  6515. col_indx = headers['candidate'][0] -1
  6516. pre_col = df.loc[i, col_indx]
  6517. if col_indx > 0 and pre_col == candidate_:
  6518. pre_col = df.loc[i, col_indx - 1]
  6519. if re.search('第[一二三]名|第[一二三](中标)?候选人', pre_col):
  6520. win_sort = pre_col
  6521. package_code = package_code_raw
  6522. if package_code == '' and line_package:
  6523. package_code = line_package
  6524. # candidate = candidate_ if self.is_role(candidate_) else ""
  6525. # tenderer = tenderer if self.is_role(tenderer) else ""
  6526. candidate = self.get_role(candidate_, self.nlp_enterprise)
  6527. # if len(set([project_code, package_code, project_name, tenderee, tenderer, budget_, bid_amount_])) < 2:
  6528. # break
  6529. if(candidate_,win_tenderer, second_tenderer,third_tenderer, bid_amount_,package_code,project_code,win_sort) in link_set:
  6530. continue
  6531. link_set.add((candidate_, win_tenderer, second_tenderer, third_tenderer, bid_amount_,package_code,project_code,win_sort))
  6532. package = package_code
  6533. if package == "" and project_code != "": # 修复 395747178 多项目 只提取到一个
  6534. package = project_code
  6535. package = uniform_package_name(package) if package !="" else "Project"
  6536. if package_flag == 2 and project_code != "":
  6537. project_code_pk = uniform_package_name(project_code)
  6538. package = "%s_%s"%(project_code_pk, package)
  6539. elif package_flag == 3 and project_name != "":
  6540. for iter in find_package(project_name):
  6541. project_name_pk = uniform_package_name(iter.group(0))
  6542. package = "%s_%s"%(project_name_pk, package)
  6543. break
  6544. if candidate:
  6545. if win_or_not and re.search('否|未入围', win_or_not):
  6546. candidate_set.add(candidate)
  6547. # elif re.search('^((建议|推荐)(中标|成交)|是)$', win_or_not) and win_sort in ['', '参与投标单位及排名'] and win_tenderer=='':
  6548. # win_sort = '第一名'
  6549. # candidate_set.add(candidate)
  6550. else:
  6551. candidate_set.add(candidate)
  6552. if win_tenderer and second_tenderer: # and third_tenderer 128778062 这篇只有 第一二候选人
  6553. if re.search("(候选人|投标人|单位|公司)名?称?$", df.loc[i, 0]) or re.search("(候选人|投标人|单位|公司)名?称?", df.loc[i, 1]):
  6554. findtop3 = True
  6555. for type, text in zip(['win_tenderer', 'second_tenderer', 'third_tenderer'],
  6556. [win_tenderer, second_tenderer, third_tenderer]):
  6557. text = self.get_role(text, self.nlp_enterprise)
  6558. if text:
  6559. # if self.is_role(text):
  6560. if type not in role_dic:
  6561. role_dic[type] = dict()
  6562. role_dic[type]['role_text'] = text
  6563. candidate_set.add(text)
  6564. elif re.search('投标报价|报价$', df.loc[i, 0]) or re.search('投标报价|报价$', df.loc[i, 1]):
  6565. findmoney = True
  6566. header = df.loc[i, 0] if re.search('投标报价|报价$', df.loc[i, 0]) else df.loc[i, 1]
  6567. for type, text in zip(['win_tenderer', 'second_tenderer', 'third_tenderer'],
  6568. [win_tenderer, second_tenderer, third_tenderer]):
  6569. if len(re.sub('[金额万元()()::零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分¥整\s\d,.]|人民币|不?含税', '',
  6570. text)) > 5: # 金额字段出现超过5个非金额字符,中断匹配
  6571. break
  6572. money, money_unit = money_process(text, header)
  6573. if (re.search('费率|下浮率|[%%‰折]', header+text) and money < 100) or money > 50000000000: # 如果是费率或大于500亿的金额改为0
  6574. money = 0
  6575. if money > 0:
  6576. if type not in role_dic:
  6577. role_dic[type] = dict()
  6578. role_dic[type]['money'] = money
  6579. role_dic[type]['money_unit'] = money_unit
  6580. else:
  6581. line_num += 1
  6582. if findtop3 and findmoney:
  6583. break
  6584. if line_num > 3:
  6585. break
  6586. elif candidate and win_sort:
  6587. role_type = ""
  6588. if re.search('第[一1]|^[一1]$', win_sort):
  6589. role_type = "win_tenderer"
  6590. elif re.search('第[二2]|^[二2]$', win_sort):
  6591. role_type = "second_tenderer"
  6592. elif re.search('第[三3]|^[三3]$', win_sort):
  6593. role_type = "third_tenderer"
  6594. if role_type != "":
  6595. if package not in prem_dic:
  6596. prem_dic[package] = {
  6597. 'code': '',
  6598. 'name': '',
  6599. 'roleList': [],
  6600. 'tendereeMoney': 0,
  6601. 'tendereeMoneyUnit': ""
  6602. }
  6603. prem_dic[package]['code'] = project_code
  6604. prem_dic[package]['name'] = project_name
  6605. if len(re.sub('[金额万元()()::零壹贰叁肆伍陆柒捌玖拾佰仟萬億圆十百千万亿元角分¥整\s\d,.]|人民币|不?含税', '', bid_amount_))> 5: # 金额字段出现超过5个非金额字符,中断匹配
  6606. break
  6607. bid_amount, money_unit = money_process(bid_amount_, headers['bid_amount'][1]) if "bid_amount" in headers else (0, "")
  6608. header = headers['bid_amount'][1] if "bid_amount" in headers else ''
  6609. if (re.search('费率|下浮率|[%%‰折]',
  6610. header + bid_amount_) and bid_amount < 100) or bid_amount > 50000000000: # 如果是费率或大于500亿的金额改为0
  6611. bid_amount = 0
  6612. prem_dic[package]['roleList'].append({
  6613. "address": "",
  6614. "linklist": [],
  6615. "role_money": {
  6616. "discount_ratio": "",
  6617. "downward_floating_ratio": "",
  6618. "floating_ratio": "",
  6619. "money": bid_amount,
  6620. "money_unit": money_unit
  6621. },
  6622. "role_name": role_type,
  6623. "role_text": candidate,
  6624. "serviceTime": ""
  6625. })
  6626. if len(prem_dic[package]['roleList']) == 0: # 只有项目编号和名称的 丢弃
  6627. prem_dic.pop(package)
  6628. if role_dic and prem_dic == dict():
  6629. if package not in prem_dic:
  6630. prem_dic[package] = {
  6631. 'code': '',
  6632. 'name': '',
  6633. 'roleList': [],
  6634. 'tendereeMoney': 0,
  6635. 'tendereeMoneyUnit': ""
  6636. }
  6637. for role_type, v in role_dic.items():
  6638. role_text = v.get('role_text', '')
  6639. if role_text == "":
  6640. continue
  6641. money = v.get('money', 0)
  6642. money_unit = v.get('money_unit', '')
  6643. prem_dic[package]['roleList'].append({
  6644. "address": "",
  6645. "linklist": [],
  6646. "role_money": {
  6647. "discount_ratio": "",
  6648. "downward_floating_ratio": "",
  6649. "floating_ratio": "",
  6650. "money": money,
  6651. "money_unit": money_unit
  6652. },
  6653. "role_name": role_type,
  6654. "role_text": role_text,
  6655. "serviceTime": ""
  6656. })
  6657. if len(prem_dic[package]['roleList']) == 0: # 只有项目编号和名称的 丢弃
  6658. prem_dic.pop(package)
  6659. return prem_dic, candidate_set
  6660. def get_prem(self, soup):
  6661. tables = soup.find_all('table')
  6662. tables.reverse()
  6663. rs_dic = {}
  6664. candidate_set = set()
  6665. for table in tables:
  6666. trs = self.tb.table2list(table)
  6667. i = 0
  6668. headers = ""
  6669. while i < len(trs) - 1:
  6670. flag_, contain_header_, headers_ = self.find_header(trs[i])
  6671. if flag_ and headers_ != dict():
  6672. table_items = []
  6673. headers = headers_
  6674. for j in range(i + 1, len(trs)):
  6675. if len(trs[j]) == len(trs[i]):
  6676. flag_, contain_header_, headers_ = self.find_header(trs[j])
  6677. if flag_ or contain_header_:
  6678. break
  6679. else:
  6680. table_items.append(trs[j])
  6681. else:
  6682. # print('表头,内容 列数不一致', len(trs[i]), len(trs[j]))
  6683. break
  6684. if len(table_items) >= 1:
  6685. df = pd.DataFrame(table_items)
  6686. prem_, candidate_set_ = self.extract_from_df(df, headers)
  6687. # print('prem_: ', prem_)
  6688. rs_dic.update(prem_)
  6689. candidate_set.update(candidate_set_)
  6690. i = j - 1
  6691. i += 1
  6692. if rs_dic and 'package_code' not in headers and 'Project' in rs_dic and table.find_previous_sibling(): # 一个表格只有两行且没有标段的,从上一个兄弟标签找标段
  6693. sib = table.find_previous_sibling()
  6694. sib_text = sib.get_text()
  6695. ser_sib = re.search('第?[0-9一二三四五六七八九十a-zA-Z]{1,4}(标[段号的包项]|([分子]?包|包[组件号]))|(标[段号的包项]|([分子]?包|包[组件号]))号?:?[0-9一二三四五六七八九十a-zA-Z]{1,4}|包名:[0-9一二三四五六七八九十]{1,4}', sib_text)
  6696. if sib.name in ['p', 'div'] and len(sib_text)<100 and ser_sib:
  6697. package_sib = ser_sib.group(0)
  6698. package_sib = uniform_package_name(package_sib)
  6699. rs_dic[package_sib] = rs_dic.pop('Project')
  6700. table.extract()
  6701. return rs_dic, candidate_set
  6702. def get_candidates_from_text(self, list_sentences, list_entitys):
  6703. candidates = set()
  6704. tenderee_or_agency = set()
  6705. sentences = sorted(list_sentences[0], key=lambda x: x.sentence_index)
  6706. for ent in list_entitys[0]:
  6707. if ent.entity_type in ['org', 'company']:
  6708. sen_index = ent.sentence_index
  6709. text = sentences[sen_index].sentence_text
  6710. b = ent.wordOffset_begin
  6711. e = ent.wordOffset_end
  6712. if ent.label in [2,3,4]: # 直接加实体预测的候选人, 否则规则检查是否为候选人
  6713. candidates.add(ent.entity_text)
  6714. elif isinstance(b, int) and isinstance(e, int) and ent.label in [5]:
  6715. foreword = text[max(0, b - 10):b]
  6716. if re.search(self.p, foreword):
  6717. candidates.add(ent.entity_text)
  6718. if ent.label in [0, 1] and ent.values[ent.label]>0.5:
  6719. tenderee_or_agency.add(ent.entity_text)
  6720. candidates -= tenderee_or_agency # 2024/05/10 463166661 把 四川省第二中医医院作为候选人 过滤掉为招标或代理角色 的候选人
  6721. return candidates
  6722. def predict(self, html, list_sentences, list_entitys, nlp_enterprise):
  6723. self.nlp_enterprise = nlp_enterprise
  6724. html = html.replace('比选申请单位', '中标候选人') # 82347769
  6725. html = re.sub("<html>|</html>|<body>|</body>","",html)
  6726. html = re.sub("##attachment##","",html)
  6727. soup = BeautifulSoup(html, 'lxml')
  6728. richText = soup.find(name='div', attrs={'class': 'richTextFetch'})
  6729. in_attachment = False
  6730. if richText:
  6731. richText = richText.extract() # 过滤掉附件
  6732. prem, candidate_set = self.get_prem(soup)
  6733. if prem == {} and richText:
  6734. prem, candidate_set = self.get_prem(richText)
  6735. in_attachment = True
  6736. candidate_set2 = self.get_candidates_from_text(list_sentences, list_entitys)
  6737. candidate_set.update(candidate_set2)
  6738. return prem, {'candidate': ','.join(candidate_set)}, in_attachment
  6739. def role_special_predictor(web_source_name, content, nlp_enterprise):
  6740. if web_source_name == '中国电子科技集团有限公司电子采购平台':
  6741. ser = re.search(',(\w{5,30}),发布时间:\d+', content)
  6742. if ser and ser.group(1) in nlp_enterprise:
  6743. return ser.group(1)
  6744. elif web_source_name == '高校仪器设备竞价网':
  6745. ser = re.search('--(\w{5,30}),申购单主题', content)
  6746. if ser and ser.group(1) in nlp_enterprise:
  6747. return ser.group(1)
  6748. elif web_source_name == '台泥阳光采购平台':
  6749. ser = re.search(',(\w{5,30})招标公告,', content)
  6750. if ser and ser.group(1) in nlp_enterprise:
  6751. return ser.group(1)
  6752. class WebsourceTenderee():
  6753. def __init__(self):
  6754. with open(os.path.dirname(__file__)+'/websource_tenderee.pkl', 'r', encoding='utf-8') as f:
  6755. self.webno2ree = json.load(f)
  6756. def get_websource_tenderee(self, web_source_no, web_source_name, prem):
  6757. '''
  6758. 通过数据源唯一招标人召回调整prem中的招标人,
  6759. :param web_source_no:
  6760. :param prem:
  6761. :return:
  6762. '''
  6763. p = '(医院|学院|学校|中学|小学|大学|幼儿园|保健院|党校|银行|研究院|血站|红十字会|防治院|研究所)'
  6764. web_ree = self.webno2ree.get(web_source_no, '')
  6765. if web_source_no.startswith('18591-') and web_ree == "":
  6766. web_ree = '中国人民解放军总医院'
  6767. elif web_source_no.startswith('Y00484-') and web_ree == "":
  6768. web_ree = '航空总医院'
  6769. if web_ree == "" and re.search('\w{2,8}(大学|医院)$', web_source_name): # 20240524 大学、医院类站源没唯一招标人默认为站源名称
  6770. web_ree = web_source_name
  6771. if web_ree != '':
  6772. if 'Project' in prem[0]['prem']:
  6773. find_tenderee = False
  6774. for d in prem[0]['prem']['Project']['roleList']:
  6775. if d['role_name'] == 'tenderee':
  6776. find_tenderee = True
  6777. if d['role_text'] == "":
  6778. d['role_text'] = web_ree
  6779. elif re.search('大学$', web_ree) and re.search('学院$', d['role_text']) and web_ree not in d['role_text']:
  6780. d['role_text'] = web_ree
  6781. elif d.get('role_prob', 0) < 0.8 and get_business_data(d['role_text'])[0] == False: # 20240201 概率低于0.8且没有工商数据的替换为站源招标人
  6782. d['role_text'] = web_ree
  6783. # elif re.search(p, web_ree) and (re.search(p, d['role_text'])==None and len(d['role_text'])<6): # 数据源唯一招标人以医院等结尾,角色中无相关关键词的,替换为数据源招标人
  6784. # d['role_text'] = web_ree
  6785. # elif re.search('有限(责任)?公司', web_ree) and (re.search('有限(责任)?公司', d['role_text'])==None and len(d['role_text'])<6):
  6786. # d['role_text'] = web_ree
  6787. break
  6788. if not find_tenderee: # 没招标人的添加
  6789. prem[0]['prem']['Project']['roleList'].append({'role_name': 'tenderee',
  6790. 'role_text': '%s' % web_ree,
  6791. 'role_money': {'money': 0, 'money_unit': '',
  6792. 'floating_ratio': '',
  6793. 'downward_floating_ratio': '',
  6794. 'discount_ratio': ''},
  6795. 'linklist': [],
  6796. 'serviceTime': '',
  6797. 'address': ''})
  6798. else:
  6799. prem[0]['prem']['Project'] = {'code': '',
  6800. 'tendereeMoney': 0,
  6801. 'roleList': [
  6802. {'role_name': 'tenderee',
  6803. 'role_text': '%s' % web_ree,
  6804. 'role_money': {'money': 0, 'money_unit': '', 'floating_ratio': '',
  6805. 'downward_floating_ratio': '', 'discount_ratio': ''},
  6806. 'linklist': [],
  6807. 'serviceTime': '',
  6808. 'address': ''}
  6809. ]}
  6810. return prem
  6811. class ApprovalPredictor():
  6812. def __init__(self):
  6813. '''
  6814. 项目(法人)单位
  6815. '''
  6816. self.other_part = {
  6817. "project_name": "((项目|工程|采购|招标|计划|建设|规划)名称?|生产建设项目|申请项目):(?P<main>[^:。]{5,50})[,。](\w{2,10}:|$)?", # 项目名称
  6818. "project_code": "(立案号|项目(统一)?代码|(项目|工程|采购|招标|计划|任务|备案|索引)(编[号码]|号)):?(?P<main>(\w{2,8})?[()〔〕【】\[\]a-zA-Z0-9-]{5,30}号?)(\w{2,10}:|$)?", # 项目编号
  6819. "doc_num": "((审[批查核]|批[复准]|立项|[定知]书|[公发批]文|用地|决定|备案|核准|许可|确认|受理|申请报告|文件|意见书|办件)[文编]?号|综合受理号|文书?号|合格书号):?(?P<main>(\w{2,8})?[()〔〕【】\[\]a-zA-Z0-9-.]{5,30}号?)[,。]?(\w{2,10}:|$)?", # 文号
  6820. "pro_type": "((申[报请]|审核备|项目|立项)(类型|种类)|项目所属行业|行业(分类|归属)|产业领域|项目行业):(?P<main>[^:。]{2,30})[,。](\w{2,10}:|$)?", # 项目类型
  6821. "year_limit": "((建设|工程|服务|项目)(起止|\w{,2})?(年限|期限|时长|工期)):(约|超过|大概|建设工期|共计|合计)?(?P<main>[\d一二三四五六七八九十]+个月|\d{1,3}(日?历?天|小时)|20\d{2}[年/-](\d{1,2}[月/-]?)?(\d{1,2}日?)?([至—-]+20\d{2}[年/-](\d{1,2}[月/-]?)?(\d{1,2}日?)?)?)[(,。](\w{2,10}:|$)?", # 建设年限
  6822. "construction_scale": "(建设内容[及和](建设)?规模|建设规模[及和](主要)?(建设)?内容|(建设|招标|采购))?内容|(建设|工程|项目)(主要)?(规模|内容|概况|面积)([及和](主要)?(规模|内容|概况|面积))?(如下)?):(?P<main>[^:。]{2,250})[,。](\w{2,10}:|$)?", # 建设规模
  6823. "approval_items": "((审[批查核]|批[复准]|申请|监管)(事项|内容|名称)|事项名称|事项审批):(?P<main>[^:。]{2,70})[,。](\w{2,10}:|$)?", # 审批事项
  6824. "properties": "((建设|工程|项目)性质):(?P<main>[^:。]{2,50})[,。](\w{2,10}:|$)?", # 建设性质
  6825. "approval_result": "((审[批查核]|批[复准]|核[发准]|许可|抽查|备案)(结果|决定|结论|状态|回复|意见)|(办[理件]|,)(状态|意见|结果)|项目(当前|目前)?状态):(?P<main>[^:。]{2,20})[,。](\w{2,10}:|$)?", # 审批结果
  6826. "phone": "(联系)?电话:(?P<main>1[3-9][0-9][-—-―]?\d{4}[-—-―]?\d{4}|" # 联系电话
  6827. '\+86.?1[3-9]\d{9}|'
  6828. '0[1-9]\d{1,2}[-—-―][2-9]\d{6}\d?[-—-―]\d{1,4}|'
  6829. '0[1-9]\d{1,2}[-—-―]{0,2}[2-9]\d{6}\d?(?=1[3-9]\d{9})|'
  6830. '0[1-9]\d{1,2}[-—-―]{0,2}[2-9]\d{6}\d?(?=0[1-9]\d{1,2}[-—-―]?[2-9]\d{6}\d?)|'
  6831. '0[1-9]\d{1,2}[-—-―]{0,2}[2-9]\d{6}\d?(?=[2-9]\d{6,7})|'
  6832. '0[1-9]\d{1,2}[-—-―]{0,2}[2-9]\d{6}\d?|'
  6833. '[\(|\(]0[1-9]\d{1,2}[\)|\)]-?[2-9]\d{6}\d?-?\d{,4}|'
  6834. '400\d{7}转\d{1,4}|'
  6835. '[2-9]\d{6,7})[,。](\w{2,10}:|$)?'
  6836. }
  6837. self.role_type = {
  6838. "declare_company": "(申[请报]|填报|呈报)(人|部门|机关|单位|企业|公司|机构|组织)", # 申报单位
  6839. "construct_company": "(业主|建设|用地|委托|发包|产权|项目))?(部门|机关|单位|企业|公司|方|业主)|主送机关|法人单位|甲方", # 建设单位
  6840. "approver": "(审[批查核议图]|许可|批[复准](用地)?|发证|管理|办理|受理|核[发准]|备案|承办)(部门|机关|单位|企业|公司|机构)|实施主体", # 审批部门
  6841. "evaluation_agency": "(环境|环保)?(影响)?(环评|评价|评估)(机构|单位|公司)" , # 环评机构
  6842. "compilation_unit": "编制单位", # 编制单位 20240701加
  6843. "publisher": "(发布|发文|公示|公告)(人|部门|机关|单位|企业|公司|机构|组织)" # 发布机构 20240703加
  6844. }
  6845. self.person_type = {
  6846. "legal_person": "项目法人|法定代表人|企业法人" # 项目法人
  6847. }
  6848. self.date_type = {
  6849. "time_declare": "(申[请报]|填报|呈报)(时间|日期)", # 申报时间
  6850. "time_commencement": "(开工|动工|(项目|建设|工程|施工)开始)(时间|日期)", # 开工时间
  6851. "time_completion": "(竣工|完工|验收|(项目|建设|工程|施工)(完成|结束))(备案)?(时间|日期)", # 竣工时间
  6852. "time_approval": "(审[批查核查议]|许可|批[复准](用地)?|发证|管理|办理|受理|核[发准]|备案|决定)(时间|日期)", # 审批时间 20240701加
  6853. "time_release": "(发布|发文|公告|生成|成文)(时间|日期)" # 发布时间
  6854. }
  6855. self.addr_type = {
  6856. "project_addr": "(建设|工程|项目|施工|地块|用地)\w{,2}(地址|地点|位置|所在地)|[宗土]地坐落" # 建设地址
  6857. }
  6858. self.money_type = {
  6859. "total_tendereeMoney": "(项目|概算|投资)金额|项目投资|总投资|总预算|总概算|投资(规模|总额|估算|概算)|批复概算|投资额", # 总投资
  6860. }
  6861. def predict(self, list_sentences, list_entitys, span=12):
  6862. rs_dic = {k: "" for k in
  6863. self.other_part.keys() | self.role_type.keys() | self.date_type.keys() | self.addr_type.keys() | self.money_type.keys() | self.person_type.keys()}
  6864. rs_dic['moneysource'] = ""
  6865. sentences = [it.sentence_text for it in sorted(list_sentences[0], key=lambda x: x.sentence_index)]
  6866. entities = [[] for _ in range(len(sentences))]
  6867. rs_l = []
  6868. found_key = 0
  6869. code_name_set = set() # 项目编号、名称集合
  6870. org_set = set() # 保存可能为审批部门的角色
  6871. for entity in list_entitys[0]:
  6872. entities[entity.sentence_index].append(entity)
  6873. for i in range(len(sentences)):
  6874. multi_project = {k: "" for k in
  6875. self.other_part.keys() | self.role_type.keys() | self.date_type.keys() | self.addr_type.keys() | self.money_type.keys() | self.person_type.keys()}
  6876. multi_project['moneysource'] = ''
  6877. text = sentences[i]
  6878. for entity in entities[i]:
  6879. b, e = entity.wordOffset_begin, entity.wordOffset_end
  6880. if entity.entity_type in ['org', 'company']:
  6881. flag = 1
  6882. for k, v in self.role_type.items():
  6883. if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
  6884. if rs_dic[k] == '':
  6885. rs_dic[k] = entity.entity_text
  6886. multi_project[k] = entity.entity_text
  6887. found_key = 1
  6888. flag = 0
  6889. if flag and entity.entity_type == "org" and re.search('(局|委员会|委|厅)$', entity.entity_text):
  6890. org_set.add(entity.entity_text)
  6891. elif entity.entity_type in ['person']:
  6892. for k, v in self.person_type.items():
  6893. if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
  6894. if rs_dic[k] == '':
  6895. rs_dic[k] = entity.entity_text
  6896. multi_project[k] = entity.entity_text
  6897. found_key = 1
  6898. break
  6899. elif entity.entity_type in ['time']:
  6900. for k, v in self.date_type.items():
  6901. if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
  6902. time = timeFormat(entity.entity_text, default_first_day=False) if k in ['time_completion'] else timeFormat(entity.entity_text)
  6903. if time == "":
  6904. continue
  6905. if rs_dic[k] == '':
  6906. rs_dic[k] = time
  6907. multi_project[k] = time
  6908. found_key = 1
  6909. elif entity.entity_type in ['location']:
  6910. for k, v in self.addr_type.items():
  6911. if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
  6912. if rs_dic[k] == '':
  6913. rs_dic[k] = entity.entity_text
  6914. multi_project[k] = entity.entity_text
  6915. found_key = 1
  6916. elif entity.entity_type in ['money']:
  6917. for k, v in self.money_type.items():
  6918. if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
  6919. if rs_dic[k] == '':
  6920. rs_dic[k] = entity.entity_text
  6921. multi_project[k] = entity.entity_text
  6922. found_key = 1
  6923. elif entity.entity_type in ['moneysource']:
  6924. rs_dic['moneysource'] = turnMoneySource(entity.entity_text)
  6925. multi_project['moneysource'] = turnMoneySource(entity.entity_text)
  6926. elif entity.entity_type in ['code']:
  6927. k = 'project_code'
  6928. v = self.other_part[k].split(':', maxsplit=1)[0]
  6929. if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
  6930. if rs_dic[k] == '':
  6931. rs_dic[k] = entity.entity_text
  6932. multi_project[k] = entity.entity_text
  6933. found_key = 1
  6934. elif entity.entity_type in ['name']:
  6935. k = 'project_name'
  6936. v = self.other_part[k].split(':', maxsplit=1)[0]
  6937. if re.search(v, sentences[entity.sentence_index][max(0, b - span):b]):
  6938. if rs_dic[k] == '':
  6939. rs_dic[k] = entity.entity_text
  6940. multi_project[k] = entity.entity_text
  6941. found_key = 1
  6942. for k, v in self.other_part.items():
  6943. for iter in re.finditer(v, text):
  6944. if rs_dic[k] == '':
  6945. rs_dic[k] = iter.group('main')
  6946. multi_project[k] = iter.group('main')
  6947. found_key = 1
  6948. break
  6949. for k, v in self.date_type.items():
  6950. for iter in re.finditer(v+':?(?P<main>20\d{2}-\d{1,2}(-\d{1,2})?|20\d{2}/\d{1,2}(/\d{1,2})?|20\d{2}\.\d{1,2}(\.\d{1,2})?|20\d{2}(0[1-9]|1[0-2])(0[1-9]|[1-2][0-9]|3[0-1])?)', text): # 规则补充实体识别不到的日期时间
  6951. time = timeFormat(iter.group('main'), default_first_day=False) if k in ['time_completion'] else timeFormat(iter.group('main'))
  6952. if time == "":
  6953. continue
  6954. if rs_dic[k] == '':
  6955. rs_dic[k] = time
  6956. multi_project[k] = time
  6957. found_key = 1
  6958. break
  6959. if (multi_project['project_code'] != "" or multi_project['project_name'] != "") and multi_project['project_code']+multi_project['project_name'] not in code_name_set:
  6960. code_name_set.add(multi_project['project_code']+multi_project['project_name'])
  6961. district = getPredictor('district').get_area(
  6962. multi_project['approver'] + multi_project['project_name'] + multi_project['project_addr'], '')
  6963. if district['district']['province'] != '全国':
  6964. multi_project['area'] = district['district']['area']
  6965. multi_project['province'] = district['district']['province']
  6966. multi_project['city'] = district['district']['city']
  6967. multi_project['district'] = district['district']['district']
  6968. multi_project = {k:v for k,v in multi_project.items() if v != ''}
  6969. rs_l.append(multi_project)
  6970. if len(rs_l)>1 and len(set(rs_l[0].keys()))>2 and set(rs_l[0].keys())&set(rs_l[1].keys())!=set():
  6971. return rs_l
  6972. elif found_key == 1:
  6973. district = getPredictor('district').get_area(
  6974. rs_dic['approver'] + rs_dic['project_name'] + rs_dic['project_addr'], '')
  6975. if district['district']['province'] != '全国':
  6976. rs_dic['area'] = district['district']['area']
  6977. rs_dic['province'] = district['district']['province']
  6978. rs_dic['city'] = district['district']['city']
  6979. rs_dic['district'] = district['district']['district']
  6980. if len(org_set) == 1 and rs_dic['approver'] == "":
  6981. rs_dic['approver'] == org_set.pop()
  6982. rs_dic = {k: v for k, v in rs_dic.items() if v != ''}
  6983. return [rs_dic]
  6984. return []
  6985. def getSavedModel():
  6986. #predictor = FormPredictor()
  6987. graph = tf.Graph()
  6988. with graph.as_default():
  6989. model = tf.keras.models.load_model("../form/model/model_form.model_item.hdf5",custom_objects={"precision":precision,"recall":recall,"f1_score":f1_score})
  6990. #print(tf.graph_util.remove_training_nodes(model))
  6991. tf.saved_model.simple_save(
  6992. tf.keras.backend.get_session(),
  6993. "./h5_savedmodel/",
  6994. inputs={"image": model.input},
  6995. outputs={"scores": model.output}
  6996. )
  6997. def getBiLSTMCRFModel(MAX_LEN,vocab,EMBED_DIM,BiRNN_UNITS,chunk_tags,weights):
  6998. '''
  6999. model = models.Sequential()
  7000. model.add(layers.Embedding(len(vocab), EMBED_DIM, mask_zero=True)) # Random embedding
  7001. model.add(layers.Bidirectional(layers.LSTM(BiRNN_UNITS // 2, return_sequences=True)))
  7002. crf = CRF(len(chunk_tags), sparse_target=True)
  7003. model.add(crf)
  7004. model.summary()
  7005. model.compile('adam', loss=crf.loss_function, metrics=[crf.accuracy])
  7006. return model
  7007. '''
  7008. input = layers.Input(shape=(None,),dtype="int32")
  7009. if weights is not None:
  7010. embedding = layers.embeddings.Embedding(len(vocab),EMBED_DIM,mask_zero=True,weights=[weights],trainable=True)(input)
  7011. else:
  7012. embedding = layers.embeddings.Embedding(len(vocab),EMBED_DIM,mask_zero=True)(input)
  7013. bilstm = layers.Bidirectional(layers.LSTM(BiRNN_UNITS//2,return_sequences=True))(embedding)
  7014. bilstm_dense = layers.TimeDistributed(layers.Dense(len(chunk_tags)))(bilstm)
  7015. crf = CRF(len(chunk_tags),sparse_target=True)
  7016. crf_out = crf(bilstm_dense)
  7017. model = models.Model(input=[input],output = [crf_out])
  7018. model.summary()
  7019. model.compile(optimizer = 'adam', loss = crf.loss_function, metrics = [crf.accuracy])
  7020. return model
  7021. import h5py
  7022. def h5_to_graph(sess,graph,h5file):
  7023. f = h5py.File(h5file,'r') #打开h5文件
  7024. def getValue(v):
  7025. _value = f["model_weights"]
  7026. list_names = str(v.name).split("/")
  7027. for _index in range(len(list_names)):
  7028. print(v.name)
  7029. if _index==1:
  7030. _value = _value[list_names[0]]
  7031. _value = _value[list_names[_index]]
  7032. return _value.value
  7033. def _load_attributes_from_hdf5_group(group, name):
  7034. """Loads attributes of the specified name from the HDF5 group.
  7035. This method deals with an inherent problem
  7036. of HDF5 file which is not able to store
  7037. data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
  7038. # Arguments
  7039. group: A pointer to a HDF5 group.
  7040. name: A name of the attributes to load.
  7041. # Returns
  7042. data: Attributes data.
  7043. """
  7044. if name in group.attrs:
  7045. data = [n.decode('utf8') for n in group.attrs[name]]
  7046. else:
  7047. data = []
  7048. chunk_id = 0
  7049. while ('%s%d' % (name, chunk_id)) in group.attrs:
  7050. data.extend([n.decode('utf8')
  7051. for n in group.attrs['%s%d' % (name, chunk_id)]])
  7052. chunk_id += 1
  7053. return data
  7054. def readGroup(gr,parent_name,data):
  7055. for subkey in gr:
  7056. print(subkey)
  7057. if parent_name!=subkey:
  7058. if parent_name=="":
  7059. _name = subkey
  7060. else:
  7061. _name = parent_name+"/"+subkey
  7062. else:
  7063. _name = parent_name
  7064. if str(type(gr[subkey]))=="<class 'h5py._hl.group.Group'>":
  7065. readGroup(gr[subkey],_name,data)
  7066. else:
  7067. data.append([_name,gr[subkey].value])
  7068. print(_name,gr[subkey].shape)
  7069. layer_names = _load_attributes_from_hdf5_group(f["model_weights"], 'layer_names')
  7070. list_name_value = []
  7071. readGroup(f["model_weights"], "", list_name_value)
  7072. '''
  7073. for k, name in enumerate(layer_names):
  7074. g = f["model_weights"][name]
  7075. weight_names = _load_attributes_from_hdf5_group(g, 'weight_names')
  7076. #weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
  7077. for weight_name in weight_names:
  7078. list_name_value.append([weight_name,np.asarray(g[weight_name])])
  7079. '''
  7080. for name_value in list_name_value:
  7081. name = name_value[0]
  7082. '''
  7083. if re.search("dense",name) is not None:
  7084. name = name[:7]+"_1"+name[7:]
  7085. '''
  7086. value = name_value[1]
  7087. print(name,graph.get_tensor_by_name(name),np.shape(value))
  7088. sess.run(tf.assign(graph.get_tensor_by_name(name),value))
  7089. def initialize_uninitialized(sess):
  7090. global_vars = tf.global_variables()
  7091. is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
  7092. not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]
  7093. adam_vars = []
  7094. for _vars in not_initialized_vars:
  7095. if re.search("Adam",_vars.name) is not None:
  7096. adam_vars.append(_vars)
  7097. print([str(i.name) for i in adam_vars]) # only for testing
  7098. if len(adam_vars):
  7099. sess.run(tf.variables_initializer(adam_vars))
  7100. def save_codename_model():
  7101. # filepath = "../projectCode/models/model_project_"+str(60)+"_"+str(200)+".hdf5"
  7102. filepath = "../../dl_dev/projectCode/models_tf/59-L0.471516189943-F0.8802154826344823-P0.8789179683459191-R0.8815168335321886/model.ckpt"
  7103. vocabpath = "../projectCode/models/vocab.pk"
  7104. classlabelspath = "../projectCode/models/classlabels.pk"
  7105. # vocab = load(vocabpath)
  7106. # class_labels = load(classlabelspath)
  7107. w2v_matrix = load('codename_w2v_matrix.pk')
  7108. graph = tf.get_default_graph()
  7109. with graph.as_default() as g:
  7110. ''''''
  7111. # model = getBiLSTMCRFModel(None, vocab, 60, 200, class_labels,weights=None)
  7112. #model = models.load_model(filepath,custom_objects={'precision':precision,'recall':recall,'f1_score':f1_score,"CRF":CRF,"loss":CRF.loss_function})
  7113. sess = tf.Session(graph=g)
  7114. # sess = tf.keras.backend.get_session()
  7115. char_input, logits, target, keepprob, length, crf_loss, trans, train_op = BiLSTM_CRF_tfmodel(sess, w2v_matrix)
  7116. #with sess.as_default():
  7117. sess.run(tf.global_variables_initializer())
  7118. # print(sess.run("time_distributed_1/kernel:0"))
  7119. # model.load_weights(filepath)
  7120. saver = tf.train.Saver()
  7121. saver.restore(sess, filepath)
  7122. # print("logits",sess.run(logits))
  7123. # print("#",sess.run("time_distributed_1/kernel:0"))
  7124. # x = load("codename_x.pk")
  7125. #y = model.predict(x)
  7126. # y = sess.run(model.output,feed_dict={model.input:x})
  7127. # for item in np.argmax(y,-1):
  7128. # print(item)
  7129. tf.saved_model.simple_save(
  7130. sess,
  7131. "./codename_savedmodel_tf/",
  7132. inputs={"inputs": char_input,
  7133. "inputs_length":length,
  7134. 'keepprob':keepprob},
  7135. outputs={"logits": logits,
  7136. "trans":trans}
  7137. )
  7138. def save_role_model():
  7139. '''
  7140. @summary: 保存model为savedModel,部署到PAI平台上调用
  7141. '''
  7142. model_role = PREMPredict().model_role
  7143. with model_role.graph.as_default():
  7144. model = model_role.getModel()
  7145. sess = tf.Session(graph=model_role.graph)
  7146. print(type(model.input))
  7147. sess.run(tf.global_variables_initializer())
  7148. h5_to_graph(sess, model_role.graph, model_role.model_role_file)
  7149. model = model_role.getModel()
  7150. tf.saved_model.simple_save(sess,
  7151. "./role_savedmodel/",
  7152. inputs={"input0":model.input[0],
  7153. "input1":model.input[1],
  7154. "input2":model.input[2]},
  7155. outputs={"outputs":model.output}
  7156. )
  7157. def save_money_model():
  7158. model_file = os.path.dirname(__file__)+"/../money/models/model_money_word.h5"
  7159. graph = tf.Graph()
  7160. with graph.as_default():
  7161. sess = tf.Session(graph=graph)
  7162. with sess.as_default():
  7163. # model = model_money.getModel()
  7164. # model.summary()
  7165. # sess.run(tf.global_variables_initializer())
  7166. # h5_to_graph(sess, model_money.graph, model_money.model_money_file)
  7167. model = models.load_model(model_file,custom_objects={'precision':precision,'recall':recall,'f1_score':f1_score})
  7168. model.summary()
  7169. print(model.weights)
  7170. tf.saved_model.simple_save(sess,
  7171. "./money_savedmodel2/",
  7172. inputs = {"input0":model.input[0],
  7173. "input1":model.input[1],
  7174. "input2":model.input[2]},
  7175. outputs = {"outputs":model.output}
  7176. )
  7177. def save_person_model():
  7178. model_person = EPCPredict().model_person
  7179. with model_person.graph.as_default():
  7180. x = load("person_x.pk")
  7181. _data = np.transpose(np.array(x),(1,0,2,3))
  7182. model = model_person.getModel()
  7183. sess = tf.Session(graph=model_person.graph)
  7184. with sess.as_default():
  7185. sess.run(tf.global_variables_initializer())
  7186. model_person.load_weights()
  7187. #h5_to_graph(sess, model_person.graph, model_person.model_person_file)
  7188. predict_y = sess.run(model.output,feed_dict={model.input[0]:_data[0],model.input[1]:_data[1]})
  7189. #predict_y = model.predict([_data[0],_data[1]])
  7190. print(np.argmax(predict_y,-1))
  7191. tf.saved_model.simple_save(sess,
  7192. "./person_savedmodel/",
  7193. inputs={"input0":model.input[0],
  7194. "input1":model.input[1]},
  7195. outputs = {"outputs":model.output})
  7196. def save_form_model():
  7197. model_form = FormPredictor()
  7198. with model_form.graph.as_default():
  7199. model = model_form.getModel("item")
  7200. sess = tf.Session(graph=model_form.graph)
  7201. sess.run(tf.global_variables_initializer())
  7202. h5_to_graph(sess, model_form.graph, model_form.model_file_item)
  7203. tf.saved_model.simple_save(sess,
  7204. "./form_savedmodel/",
  7205. inputs={"inputs":model.input},
  7206. outputs = {"outputs":model.output})
  7207. def save_codesplit_model():
  7208. filepath_code = "../../dl_dev/projectCode/models/model_code.hdf5"
  7209. graph = tf.Graph()
  7210. with graph.as_default():
  7211. model_code = models.load_model(filepath_code, custom_objects={'precision':precision,'recall':recall,'f1_score':f1_score})
  7212. sess = tf.Session()
  7213. sess.run(tf.global_variables_initializer())
  7214. h5_to_graph(sess, graph, filepath_code)
  7215. tf.saved_model.simple_save(sess,
  7216. "./codesplit_savedmodel/",
  7217. inputs={"input0":model_code.input[0],
  7218. "input1":model_code.input[1],
  7219. "input2":model_code.input[2]},
  7220. outputs={"outputs":model_code.output})
  7221. def save_timesplit_model():
  7222. filepath = '../time/model_label_time_classify.model.hdf5'
  7223. with tf.Graph().as_default() as graph:
  7224. time_model = models.load_model(filepath, custom_objects={'precision': precision, 'recall': recall, 'f1_score': f1_score})
  7225. with tf.Session() as sess:
  7226. sess.run(tf.global_variables_initializer())
  7227. h5_to_graph(sess, graph, filepath)
  7228. tf.saved_model.simple_save(sess,
  7229. "./timesplit_model/",
  7230. inputs={"input0":time_model.input[0],
  7231. "input1":time_model.input[1]},
  7232. outputs={"outputs":time_model.output})
  7233. if __name__=="__main__":
  7234. #save_role_model()
  7235. # save_codename_model()
  7236. # save_money_model()
  7237. #save_person_model()
  7238. #save_form_model()
  7239. #save_codesplit_model()
  7240. # save_timesplit_model()
  7241. '''
  7242. # with tf.Session(graph=tf.Graph()) as sess:
  7243. # from tensorflow.python.saved_model import tag_constants
  7244. # meta_graph_def = tf.saved_model.loader.load(sess, [tag_constants.SERVING], "./person_savedModel")
  7245. # graph = tf.get_default_graph()
  7246. # signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
  7247. # signature = meta_graph_def.signature_def
  7248. # input0 = sess.graph.get_tensor_by_name(signature[signature_key].inputs["input0"].name)
  7249. # input1 = sess.graph.get_tensor_by_name(signature[signature_key].inputs["input1"].name)
  7250. # outputs = sess.graph.get_tensor_by_name(signature[signature_key].outputs["outputs"].name)
  7251. # x = load("person_x.pk")
  7252. # _data = np.transpose(x,[1,0,2,3])
  7253. # y = sess.run(outputs,feed_dict={input0:_data[0],input1:_data[1]})
  7254. # print(np.argmax(y,-1))
  7255. '''
  7256. # MAX_LEN = 1000
  7257. # # vocabpath = os.path.dirname(__file__) + "/codename_vocab.pk"
  7258. # # vocab = load(vocabpath)
  7259. # # word2index = dict((w, i) for i, w in enumerate(np.array(vocab)))
  7260. # # index_unk = word2index.get("<unk>")
  7261. # # sentence = "招标人:广州市重点公共建设项目管理中心,联系人:李工,联系方式:020-22905689,招标代理:广东重工建设监理有限公司," \
  7262. # # "代理联系人:薛家伟,代理联系方式:13535014481,招标监督机构:广州市重点公共建设项目管理中心,监督电话:020-22905690," \
  7263. # # "备注:以上为招标公告简要描述,招标公告详细信息请查看“招标公告”附件,"
  7264. # # sentence = sentence*5
  7265. # # list_sentence = [sentence]*200
  7266. # # # print(list_sentence)
  7267. # # x = [[word2index.get(word, index_unk) for word in sentence] for sentence in
  7268. # # list_sentence]
  7269. # # x_len = [len(_x) if len(_x) < MAX_LEN else MAX_LEN for _x in x]
  7270. # # # print(x_len)
  7271. # # x = pad_sequences(x, maxlen=MAX_LEN, padding="post", truncating="post")
  7272. # #
  7273. # # requests_result = requests.post(API_URL + "/predict_codeName", json={"inouts": x.tolist(), "inouts_len": x_len},
  7274. # # verify=True)
  7275. # # # predict_y = json.loads(requests_result.text)['result']
  7276. # # print("cost_time:", json.loads(requests_result.text)['cost_time'])
  7277. # # print(MAX_LEN, len(sentence), len(list_sentence))
  7278. # # requests_result = requests.post(API_URL + "/predict_codeName", json={"inouts": x.tolist(), "inouts_len": x_len},
  7279. # # verify=True)
  7280. # # # predict_y = json.loads(requests_result.text)['result']
  7281. # # print("cost_time:", json.loads(requests_result.text)['cost_time'])
  7282. # # print(MAX_LEN, len(sentence), len(list_sentence))
  7283. # docid = ""
  7284. # title = ''
  7285. # with open('d:/html/2.html', 'r', encoding='utf-8') as f:
  7286. # html = f.read()
  7287. # product_attr = ProductAttributesPredictor()
  7288. # rs = product_attr.predict(docid='', html=html, page_time="")
  7289. # print(rs)
  7290. docid = ""
  7291. title = ''
  7292. with open('d:/html/2.html', 'r', encoding='utf-8') as f:
  7293. html = f.read()
  7294. tb_extract = TablePremExtractor()
  7295. rs = tb_extract.predict(html, [
  7296. "江苏中联铸本混凝土有限公司",
  7297. "鼓楼区协荣机械设备经销部"
  7298. ], web_source_name = '河钢供应链管理平台')
  7299. print('标段数:',len(rs[0]))
  7300. print(rs)
  7301. # # # ids = [199601430, 195636197, 123777031, 195191849, 163533442, 121845385, 217782764, 163370956, 238134423, 191700799, 148218772, 189295942, 145940984, 166830213, 119271266, 90157660, 180314485, 136564968, 119094883, 89822506, 209263355, 132839357, 85452163, 110204324, 204773640, 83910716, 126657693, 107244197, 79107109, 47810780, 233548561, 237887867, 79134266, 77124584, 75804469, 43206978, 237560666, 67472815, 42078089, 66307082, 38382419, 224367857, 224751772, 54913238, 237390205, 60511017, 33170000, 228578442, 69042200, 228535928, 79997322, 233492018, 51828144, 219494938, 240514770]
  7302. # # # ids = [42078089, 51828144, 54913238, 60511017, 67472815, 69042200, 75804469, 77124584, 79107109, 79997322, 83910716, 85452163, 89822506, 90157660, 107244197, 110204324, 119094883, 121845385, 123777031, 132839357, 136564968, 145940984, 148218772, 163370956, 163533442, 166830213, 180314485, 191700799, 195191849, 199601430, 204773640, 209263355, 217782764, 219494938, 224367857, 224751772, 228535928, 228578442, 233492018, 237390205, 237560666, 237887867, 238134423, 240514770]
  7303. # # # ids = [42078089, 51828144, 60511017, 69042200, 77124584, 79107109, 79997322, 83910716, 85452163, 89822506, 107244197, 110204324, 119094883, 121845385, 123777031, 132839357, 136564968, 145940984, 148218772, 163370956, 163533442, 166830213, 180314485, 191700799, 195191849, 199601430, 204773640, 209263355, 217782764, 219494938, 224367857, 224751772, 228535928, 228578442, 233492018, 237390205, 237560666, 237887867, 238134423, 240514770]
  7304. # # # ids = [ 224751772, 228535928, 228578442, 233492018, 237390205, 237560666, 237887867, 238134423, 240514770]
  7305. # # # ids = [37756133, 39743626, 42068246, 51176657, 70624901, 75687028, 85489552, 95342532, 97337474, 109601526, 111464967, 112548665, 116223553, 117329696, 117850214, 120619166, 121717252, 122345499, 128511969, 133403846, 133602236, 136564970, 137772969, 138020374, 140929169, 147414295, 152659064, 155485083, 186412244, 195546784, 196135909, 202981523, 214647448, 216377830, 217957372, 218789230, 225050691, 228064464, 228590691, 236342514, 237352780, 239814252]
  7306. # # # ids = [51176657, 70624901, 85489552, 95342532, 109601526, 111464967, 112548665, 116223553, 117329696, 117850214, 120619166, 121717252, 122345499, 128511969, 133403846, 133602236, 136564970, 137772969, 138020374, 140929169, 147414295, 152659064, 155485083, 186412244, 195546784, 196135909, 202981523, 214647448, 216377830, 217957372, 218789230, 225050691, 228064464, 228590691, 236342514, 237352780, 239814252]
  7307. # ids = [31995310, 33586422, 34213587, 36093749, 37238528, 37739743, 39150739, 39281429, 40038908, 40289771, 40581071, 40591331, 42200293, 42739447, 42923948, 43351479, 44237678, 44506815, 44592013, 45106514, 45469037, 48411467, 51822565, 52127391, 54236264, 54706723, 54894477, 54898083, 55934378, 56104538, 56218948, 59606477, 60116927, 60638934, 61523351, 61685037, 61706106, 62187765, 62203118, 62843892, 63850238, 64139401, 65707507, 66072846, 66137391, 66738991, 67676932, 67902417, 69795866, 70868740, 71180456, 71796375, 77613620, 77641817, 77748144, 77761818, 78250390, 78606698, 78717682, 78854831, 79597122, 79597366, 79819968, 80377018, 82461832, 84018089, 84134439, 84815332, 85123470, 85123525, 85456789, 87474450, 88129399, 88288685, 88329278, 88342999, 88747517, 89632339, 89861712, 89985134, 91538446, 93323837, 94609104, 95522891, 97476802, 97629540, 98662744, 100207494, 100558146, 100755026, 101009561, 101275254, 101348782, 101462933, 101857772, 102924005, 103432276, 103459091, 104062674, 106601819, 106812124, 107065735, 107559314, 108201680, 108455612, 108544389, 108832580, 108995821, 109196083, 110726641, 110780095, 111234020, 111588327, 111656418, 111797176, 111993708, 114376859, 115869547, 117725909, 118032923, 118349683, 119080451, 119224972, 120120112, 120304657, 120830324, 122331341, 122856799, 123439110, 123641276, 123733047, 123733333, 123874242, 123918651, 124253086, 124942182, 125372140, 125464462, 125568385, 126185770, 126305386, 126512513, 126840529, 126844209, 126902118, 127254675, 127510817, 127670247, 128441465, 128498056, 129557176, 129833289, 129875792, 130121559, 130554345, 130556979, 131051006, 131142204, 131480539, 133743564, 133834740, 133984477, 134796953, 135533772, 135986763, 136777096, 137403576, 137864604, 138148591, 139840028, 139974803, 140105753, 145439181, 149105875, 150129836, 150828866, 152675649, 153688731, 155564708, 155599250, 155600699, 156728197, 161246902, 161775170, 162476194, 162914022, 162963943, 164007344, 164775490, 165339842, 175705079, 176218853, 176944891, 178251502, 178372090, 179732253, 180379187, 181626147, 184044160, 184404217, 186383436, 188468811, 192103014, 192574092, 192754157, 193358322, 195686462, 195868255, 196060419, 199113788, 201588003, 201874243, 201879319, 204796942, 205348530, 206735492, 208308899, 210310963, 210313993, 212124901, 212363133, 212389173, 213573782, 213818877, 214044075, 214989980, 215356671, 215367201, 215646443, 216212563, 216377823, 216490415, 217483041, 217486509, 218429429, 219181483, 219411056, 219971724, 220400698, 220780247, 221398716, 222545237, 223267606, 223906281, 224074580, 224383778, 224995705, 225390819, 227536610, 227829175, 227908020, 227980430, 229421942, 229862241, 230217038, 230227848, 230391553, 230592027, 233836843, 234465556, 235108306, 235217324, 235995802, 236010068, 236359727, 236419142, 236997002, 238069580, 238106585, 238534142, 238567209, 238839802, 239260141, 240214254, 240263848, 240535275, 240680028]
  7308. # df = pd.read_csv('E:\产品单价数量/待预测数据html内容4.csv')
  7309. # print('公告数:', len(df), len(ids))
  7310. # df = df[df['docid'].isin(ids)]
  7311. # ids = []
  7312. # for docid,html in zip(df['docid'],df['dochtmlcon']):
  7313. # product_attr = ProductAttributesPredictor()
  7314. # rs, _ = product_attr.predict(docid='', html=html, page_time="")
  7315. # # print(docid, rs)
  7316. # # print(docid, rs[0]['product_attrs']['header_col'])
  7317. # # print('*'*20)
  7318. # if rs[0]['product_attrs']['header_col'] == []:
  7319. # ids.append(docid)
  7320. # print(docid, rs[0]['product_attrs']['header_col'])
  7321. # print('*' * 20)
  7322. # else:
  7323. # print(docid, rs[0]['product_attrs']['header_col'])
  7324. # print('*' * 20)
  7325. # print(len(ids), ids)
  7326. # role = RoleRulePredictor()
  7327. # labels = []
  7328. # keywords = []
  7329. # # df = pd.read_excel('E:\实体识别数据/2023-08-24所有公告_重新预测结果.xlsx')
  7330. # df = pd.read_excel('E:\实体识别数据/2023-08-24所有公告_重新预测结果60000-90000.xlsx')
  7331. # columns = ['docid', 'type', 'label', 'value', 'front', 'behind',
  7332. # 'front6', 'entity_text', 'behind6', 'front6_reverse', 'rule_label', 'keyword', 'pos']
  7333. # print(df.columns)
  7334. # df.fillna('', inplace=True)
  7335. # for front, center, behind, entity_text in zip(df['front'], df['entity_text'], df['behind'], df['entity_text']):
  7336. # front = str(front)
  7337. # behind = str(behind)
  7338. # label, _prob, _flag, keyword = role.rule_predict(front, center, behind, entity_text)
  7339. # labels.append(label)
  7340. # keywords.append(keyword)
  7341. # df['rule_label'] = pd.Series(labels)
  7342. # df['keyword'] = pd.Series(keywords)
  7343. # df['front6'] = df['front'].apply(lambda x: str(x)[-6:])
  7344. # df['behind6'] = df['behind'].apply(lambda x: str(x)[:6])
  7345. # df['pos'] = df.apply(lambda x: 1 if x['label']==x['rule_label'] else 0, axis=1)
  7346. # # df.to_excel('E:\实体识别数据/2023-08-24所有公告_重新预测结果_rule_predict.xlsx', index=False, columns=columns)
  7347. # df.to_excel('E:\实体识别数据/2023-08-24所有公告_重新预测结果60000-90000_rule_predict.xlsx', index=False, columns=columns)