input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'3375552':{'en': 'Legos'},
'3375553':{'en': 'Legos'},
'3375554':{'en': 'Legos'},
'3375555':{'en': 'Euroinformation Telecom'},
'3375556':{'en': 'Intercall'},
'3375557':{'en': 'Intercall'},
'3375558':{'en': 'Sewan communications'},
'3375559':{'en': 'Sewan communications'},
'3375560':{'en': 'Prixtel'},
'3375561':{'en': 'Prixtel'},
'3375562':{'en': 'Prixtel'},
'3375563':{'en': 'Prixtel'},
'3375564':{'en': 'Prixtel'},
'3375565':{'en': 'Sewan communications'},
'3375566':{'en': 'Euroinformation Telecom'},
'3375567':{'en': 'Euroinformation Telecom'},
'3375568':{'en': 'Euroinformation Telecom'},
'3375569':{'en': 'Axialys'},
'337560':{'en': 'Euroinformation Telecom'},
'337561':{'en': 'Euroinformation Telecom'},
'337562':{'en': 'Euroinformation Telecom'},
'3375630':{'en': 'Euroinformation Telecom'},
'3375631':{'en': 'Euroinformation Telecom'},
'3375632':{'en': 'Euroinformation Telecom'},
'3375633':{'en': 'Euroinformation Telecom'},
'3375634':{'en': 'Euroinformation Telecom'},
'337565':{'en': 'Transatel'},
'337566':{'en': 'Transatel'},
'337567':{'en': 'Transatel'},
'337568':{'en': 'Transatel'},
'337569':{'en': 'Transatel'},
'3375700':{'en': 'Sewan communications'},
'3375701':{'en': 'Mobiweb telecom limited'},
'3375702':{'en': 'Mobiweb telecom limited'},
'3375703':{'en': 'Mobiweb telecom limited'},
'3375704':{'en': 'Mobiweb telecom limited'},
'3375705':{'en': 'Mobiweb telecom limited'},
'3375706':{'en': 'Nordnet'},
'3375707':{'en': 'Keyyo'},
'3375717':{'en': 'Keyyo'},
'337572':{'en': 'Mobiquithings'},
'337573':{'en': 'Mobiquithings'},
'337574':{'en': 'Coriolis Telecom'},
'3375750':{'en': 'Coriolis Telecom'},
'3375751':{'en': 'Coriolis Telecom'},
'3375752':{'en': 'Coriolis Telecom'},
'3375753':{'en': 'Coriolis Telecom'},
'3375754':{'en': 'Coriolis Telecom'},
'3375755':{'en': 'Coriolis Telecom'},
'3375756':{'en': 'Coriolis Telecom'},
'3375757':{'en': 'Euroinformation Telecom'},
'3375758':{'en': 'Euroinformation Telecom'},
'3375763':{'en': 'Euroinformation Telecom'},
'3375767':{'en': 'Euroinformation Telecom'},
'3375777':{'en': 'Euroinformation Telecom'},
'3375779':{'en': 'Halys'},
'3375787':{'en': 'Euroinformation Telecom'},
'3375788':{'en': 'BJT'},
'3375789':{'en': 'BJT'},
'337579':{'en': 'Legos'},
'33758':{'en': 'Lycamobile'},
'33759':{'en': 'Vectone mobile'},
'3376':{'en': 'Bouygues'},
'33766':{'en': 'Free Mobile'},
'33767':{'en': 'Free Mobile'},
'33768':{'en': 'Free Mobile'},
'33769':{'en': 'Free Mobile'},
'337700':{'en': 'Orange France'},
'337701':{'en': 'Orange France'},
'337702':{'en': 'Orange France'},
'337703':{'en': 'SFR'},
'337704':{'en': 'SFR'},
'337705':{'en': 'Euroinformation Telecom'},
'337706':{'en': 'Euroinformation Telecom'},
'337707':{'en': 'Euroinformation Telecom'},
'337708':{'en': 'Euroinformation Telecom'},
'337709':{'en': 'Euroinformation Telecom'},
'337710':{'en': 'Euroinformation Telecom'},
'337711':{'en': 'Euroinformation Telecom'},
'337712':{'en': 'Euroinformation Telecom'},
'337713':{'en': 'SFR'},
'337714':{'en': 'SFR'},
'3377150':{'en': 'SFR'},
'3377151':{'en': 'SFR'},
'3377152':{'en': 'SFR'},
'3377153':{'en': 'SFR'},
'3377154':{'en': 'SFR'},
'3377155':{'en': 'Euroinformation Telecom'},
'3377156':{'en': 'Euroinformation Telecom'},
'3377157':{'en': 'Euroinformation Telecom'},
'3377158':{'en': 'Euroinformation Telecom'},
'3377159':{'en': 'Euroinformation Telecom'},
'337716':{'en': 'Euroinformation Telecom'},
'337717':{'en': 'Euroinformation Telecom'},
'337718':{'en': 'Euroinformation Telecom'},
'3377190':{'en': 'Euroinformation Telecom'},
'3377191':{'en': 'Euroinformation Telecom'},
'3377192':{'en': 'Euroinformation Telecom'},
'3377193':{'en': 'Euroinformation Telecom'},
'3377194':{'en': 'Euroinformation Telecom'},
'33772':{'en': 'Orange France'},
'33773':{'en': 'Syma mobile'},
'33774':{'en': 'Syma mobile'},
'337750':{'en': 'SFR'},
'337751':{'en': 'SFR'},
'337752':{'en': 'SFR'},
'337753':{'en': 'SFR'},
'337754':{'en': 'SFR'},
'337755':{'en': 'Mobiquithings'},
'337756':{'en': 'Mobiquithings'},
'337757':{'en': 'Free Mobile'},
'33776':{'en': 'SFR'},
'33777':{'en': 'SFR'},
'33778':{'en': 'SFR'},
'33779':{'en': 'SFR'},
'3378':{'en': 'Orange France'},
'33780':{'en': 'Afone'},
'337807':{'en': 'Lebara France Limited'},
'337808':{'en': 'Lebara France Limited'},
'337809':{'en': 'Onoff telecom'},
'33781':{'en': 'Free Mobile'},
'33782':{'en': 'Free Mobile'},
'33783':{'en': 'Free Mobile'},
'337846':{'en': 'La poste telecom'},
'337847':{'en': 'La poste telecom'},
'337848':{'en': 'La poste telecom'},
'337849':{'en': 'Euroinformation Telecom'},
'34600':{'en': 'Vodafone'},
'34601':{'en': 'Vodafone'},
'346016':{'en': 'Orange'},
'346018':{'en': 'Orange'},
'346019':{'en': 'Orange'},
'346020':{'en': 'Lycamobile'},
'346021':{'en': 'Lycamobile'},
'3460220':{'en': 'Orange'},
'3460221':{'en': 'Ion mobile'},
'3460222':{'en': 'Vozelia'},
'3460223':{'en': 'Orange'},
'3460224':{'en': 'Oceans'},
'3460225':{'en': 'VozTelecom'},
'3460226':{'en': 'Orange'},
'3460227':{'en': 'Orange'},
'3460228':{'en': 'Orange'},
'3460229':{'en': 'Boutique'},
'346023':{'en': 'Lycamobile'},
'346024':{'en': 'Lebara'},
'346025':{'en': 'Lebara'},
'346026':{'en': 'Lebara'},
'346027':{'en': 'Lebara'},
'346028':{'en': 'Lycamobile'},
'346029':{'en': 'DIA'},
'3460300':{'en': 'Vodafone'},
'3460301':{'en': 'Vodafone'},
'3460302':{'en': 'Vodafone'},
'3460303':{'en': 'Vodafone'},
'3460304':{'en': 'Vodafone'},
'3460305':{'en': 'Lebara'},
'3460306':{'en': 'Lebara'},
'3460307':{'en': 'Lebara'},
'3460308':{'en': 'Lebara'},
'3460309':{'en': 'Lebara'},
'346031':{'en': 'Lebara'},
'346032':{'en': 'Lebara'},
'346033':{'en': 'Lebara'},
'346034':{'en': 'Vodafone'},
'346035':{'en': 'Vodafone'},
'346036':{'en': 'Vodafone'},
'346037':{'en': 'Vodafone'},
'346038':{'en': 'Vodafone'},
'346039':{'en': 'Lebara'},
'34604':{'en': 'Lebara'},
'346040':{'en': 'Orange'},
'346045':{'en': 'Orange'},
'34605':{'en': 'Orange'},
'3460529':{'en': 'MasMovil'},
'34606':{'en': 'Movistar'},
'34607':{'en': 'Vodafone'},
'34608':{'en': 'Movistar'},
'34609':{'en': 'Movistar'},
'34610':{'en': 'Vodafone'},
'34611':{'en': 'Republica Movil'},
'346110':{'en': 'Orange'},
'346112':{'en': 'Lebara'},
'346113':{'en': 'Lebara'},
'34612':{'en': 'Syma'},
'346122':{'en': 'Lycamobile'},
'346124':{'en': 'Lycamobile'},
'346125':{'en': 'Lycamobile'},
'34615':{'en': 'Orange'},
'34616':{'en': 'Movistar'},
'34617':{'en': 'Vodafone'},
'34618':{'en': 'Movistar'},
'34619':{'en': 'Movistar'},
'34620':{'en': 'Movistar'},
'346210':{'en': 'Republica Movil'},
'346211':{'en': 'Republica Movil'},
'346212':{'en': 'Movistar'},
'346213':{'en': 'Republica Movil'},
'346214':{'en': 'Republica Movil'},
'346215':{'en': 'Republica Movil'},
'346216':{'en': 'Republica Movil'},
'34622':{'en': 'Yoigo'},
'346230':{'en': 'Yoigo'},
'346231':{'en': 'Yoigo'},
'346236':{'en': 'Altecom'},
'34625':{'en': 'Orange'},
'3462529':{'en': 'Yoigo'},
'34626':{'en': 'Movistar'},
'34627':{'en': 'Vodafone'},
'34628':{'en': 'Movistar'},
'34629':{'en': 'Movistar'},
'34630':{'en': 'Movistar'},
'34631':{'en': 'Lycamobile'},
'34632':{'en': 'Lycamobile'},
'34633':{'en': 'Yoigo'},
'34634':{'en': 'Vodafone'},
'346340':{'en': 'Lebara'},
'346341':{'en': 'Lebara'},
'346343':{'en': 'Carrier Enabler'},
'346345':{'en': 'Movistar'},
'34635':{'en': 'Orange'},
'3463529':{'en': 'Yoigo'},
'34636':{'en': 'Movistar'},
'34637':{'en': 'Vodafone'},
'34638':{'en': 'Movistar'},
'34639':{'en': 'Movistar'},
'34640':{'en': 'Orange'},
'34641':{'en': 'Movistar'},
'34642':{'en': 'DigiMobil'},
'346430':{'en': 'DigiMobil'},
'346431':{'en': 'DigiMobil'},
'346432':{'en': 'DigiMobil'},
'346433':{'en': 'DigiMobil'},
'346434':{'en': 'DigiMobil'},
'346435':{'en': 'DigiMobil'},
'346436':{'en': 'DigiMobil'},
'346437':{'en': 'DigiMobil'},
'34644':{'en': 'Orange'},
'34645':{'en': 'Orange'},
'3464529':{'en': 'Yoigo'},
'34646':{'en': 'Movistar'},
'34647':{'en': 'Vodafone'},
'34648':{'en': 'Movistar'},
'34649':{'en': 'Movistar'},
'3465':{'en': 'Orange'},
'34650':{'en': 'Movistar'},
'3465229':{'en': 'Yoigo'},
'3465329':{'en': 'DIA'},
'3465429':{'en': 'DIA'},
'3465529':{'en': 'DIA'},
'3465729':{'en': 'DIA'},
'3465829':{'en': 'DIA'},
'34659':{'en': 'Movistar'},
'34660':{'en': 'Movistar'},
'34661':{'en': 'Vodafone'},
'34662':{'en': 'Vodafone'},
'34663':{'en': 'Vodafone'},
'34664':{'en': 'Vodafone'},
'34665':{'en': 'Orange'},
'34666':{'en': 'Vodafone'},
'34667':{'en': 'Vodafone'},
'346681':{'en': 'Truphone'},
'346685':{'en': 'Orange'},
'346686':{'en': 'Parlem'},
'346688':{'en': 'Parlem'},
'34669':{'en': 'Movistar'},
'3467':{'en': 'Vodafone'},
'346725':{'en': 'Lebara'},
'346728':{'en': 'Lebara'},
'346729':{'en': 'Lebara'},
'34675':{'en': 'Orange'},
'34676':{'en': 'Movistar'},
'34679':{'en': 'Movistar'},
'34680':{'en': 'Movistar'},
'346810':{'en': 'Movistar'},
'346811':{'en': 'Movistar'},
'346812':{'en': 'Movistar'},
'346813':{'en': 'Movistar'},
'346814':{'en': 'Movistar'},
'346815':{'en': 'Movistar'},
'346816':{'en': 'Yoigo'},
'34682':{'en': 'Movistar'},
'34683':{'en': 'Movistar'},
'346840':{'en': 'Movistar'},
'346841':{'en': 'Movistar'},
'346842':{'en': 'Movistar'},
'346843':{'en': 'Movistar'},
'3468440':{'en': 'Eurona'},
'3468441':{'en': 'Lemonvil'},
'3468442':{'en': 'BluePhone'},
'3468443':{'en': 'BT'},
'3468444':{'en': 'BT'},
'3468445':{'en': 'Aire Networks'},
'3468447':{'en': 'Quattre'},
'3468448':{'en': 'Nethits'},
'346845':{'en': 'Movistar'},
'346846':{'en': 'Telecable'},
'34685':{'en': 'Orange'},
'3468529':{'en': 'Carrefour'},
'34686':{'en': 'Movistar'},
'34687':{'en': 'Vodafone'},
'346880':{'en': 'YouMobile'},
'346881':{'en': 'YouMobile'},
'346882':{'en': 'Yoigo'},
'346883':{'en': 'Yoigo'},
'346884':{'en': 'Yoigo'},
'346885':{'en': 'YouMobile'},
'346886':{'en': 'Euskaltel'},
'346887':{'en': 'Euskaltel'},
'3468870':{'en': 'OpenMovil'},
'346888':{'en': 'Euskaltel'},
'3468883':{'en': 'Sarenet'},
'346889':{'en': 'PepePhone'},
'34689':{'en': 'Movistar'},
'34690':{'en': 'Movistar'},
'34691':{'en': 'Orange'},
'346919':{'en': 'Yoigo'},
'3469190':{'en': 'MasMovil'},
'3469198':{'en': 'Carrefour'},
'3469199':{'en': 'Carrefour'},
'34692':{'en': 'Orange'},
'3469229':{'en': 'Carrefour'},
'346927':{'en': 'Carrefour'},
'3469300':{'en': 'MasMovil'},
'3469301':{'en': 'Yoigo'},
'3469302':{'en': 'Yoigo'},
'3469303':{'en': 'Yoigo'},
'3469304':{'en': 'Yoigo'},
'3469305':{'en': 'Yoigo'},
'3469306':{'en': 'Yoigo'},
'346931':{'en': 'Orange'},
'3469310':{'en': 'MasMovil'},
'346932':{'en': 'Yoigo'},
'3469320':{'en': 'Carrefour'},
'3469321':{'en': 'Carrefour'},
'3469329':{'en': 'Orange'},
'346933':{'en': 'Carrefour'},
'3469336':{'en': 'Yoigo'},
'3469337':{'en': 'Yoigo'},
'3469340':{'en': 'DIA'},
'3469341':{'en': 'DIA'},
'3469342':{'en': 'DIA'},
'3469343':{'en': 'DIA'},
'3469344':{'en': 'DIA'},
'3469345':{'en': 'Yoigo'},
'3469346':{'en': 'Yoigo'},
'3469347':{'en': 'Yoigo'},
'3469348':{'en': 'Yoigo'},
'3469349':{'en': 'Yoigo'},
'346935':{'en': 'Yoigo'},
'3469360':{'en': 'DIA'},
'3469361':{'en': 'DIA'},
'3469362':{'en': 'DIA'},
'3469363':{'en': 'DIA'},
'3469364':{'en': 'DIA'},
'3469365':{'en': 'Carrefour'},
'3469366':{'en': 'Carrefour'},
'3469367':{'en': 'Yoigo'},
'3469368':{'en': 'Yoigo'},
'3469369':{'en': 'Yoigo'},
'346937':{'en': 'Yoigo'},
'346938':{'en': 'Yoigo'},
'346939':{'en': 'Yoigo'},
'34694':{'en': 'Movistar'},
'346944':{'en': 'Yoigo'},
'346945':{'en': 'Yoigo'},
'346946':{'en': 'Yoigo'},
'34695':{'en': 'Orange'},
'34696':{'en': 'Movistar'},
'34697':{'en': 'Vodafone'},
'34698':{'en': 'Yoigo'},
'346981':{'en': 'R'},
'346989':{'en': 'Vodafone'},
'34699':{'en': 'Movistar'},
'347110':{'en': 'Zinnia'},
'347111':{'en': 'Vodafone'},
'347117':{'en': 'Vodafone'},
'347121':{'en': 'Yoigo'},
'347122':{'en': 'Yoigo'},
'347123':{'en': 'Yoigo'},
'347124':{'en': 'Yoigo'},
'347125':{'en': 'Yoigo'},
'347126':{'en': 'Yoigo'},
'347127':{'en': 'Yoigo'},
'347128':{'en': 'Yoigo'},
'347170':{'en': 'Movistar'},
'347171':{'en': 'Vodafone'},
'347177':{'en': 'Movistar'},
'3471770':{'en': 'PepePhone'},
'3471771':{'en': 'PepePhone'},
'3471777':{'en': 'PepePhone'},
'347221':{'en': 'Yoigo'},
'347222':{'en': 'Yoigo'},
'347223':{'en': 'Yoigo'},
'347224':{'en': 'Yoigo'},
'347225':{'en': 'Yoigo'},
'347226':{'en': 'Yoigo'},
'3472260':{'en': 'MasMovil'},
'3472261':{'en': 'PepePhone'},
'347227':{'en': 'Yoigo'},
'347228':{'en': 'Yoigo'},
'347277':{'en': 'Vodafone'},
'3474442':{'en': 'Deion'},
'3474443':{'en': 'InfoVOIP'},
'3474447':{'en': 'Jetnet'},
'3474448':{'en': 'Aire Networks'},
'3474449':{'en': 'Alai'},
'347446':{'en': 'PTV'},
'347477':{'en': 'Orange'},
'347478':{'en': 'Orange'},
'3505':{'en': 'GibTel'},
'35060':{'en': 'GibTel'},
'35062':{'en': 'Limba'},
'351609':{'en': 'NOS'},
'35163':{'en': 'NOS'},
'35165':{'en': 'NOS'},
'35166':{'en': 'NOS'},
'35191':{'en': 'Vodafone'},
'3519200':{'en': 'Lycamobile'},
'3519201':{'en': 'Lycamobile'},
'3519202':{'en': 'Lycamobile'},
'3519203':{'en': 'Lycamobile'},
'3519204':{'en': 'Lycamobile'},
'3519205':{'en': 'Lycamobile'},
'351921':{'en': 'Vodafone'},
'3519220':{'en': 'Vodafone'},
'3519221':{'en': 'MEO'},
'3519222':{'en': 'MEO'},
'3519230':{'en': 'NOS'},
'3519231':{'en': 'NOS'},
'3519232':{'en': 'NOS'},
'3519233':{'en': 'NOS'},
'3519234':{'en': 'NOS'},
'3519240':{'en': 'MEO'},
'3519241':{'en': 'MEO'},
'3519242':{'en': 'MEO'},
'3519243':{'en': 'MEO'},
'3519244':{'en': 'MEO'},
'351925':{'en': 'MEO'},
'351926':{'en': 'MEO'},
'351927':{'en': 'MEO'},
'3519280':{'en': 'NOWO'},
'3519281':{'en': 'NOWO'},
'3519285':{'en': 'ONITELECOM'},
'3519290':{'en': 'NOS'},
'3519291':{'en': 'NOS'},
'3519292':{'en': 'NOS'},
'3519293':{'en': 'NOS'},
'3519294':{'en': 'NOS'},
'35193':{'en': 'NOS'},
'35196':{'en': 'MEO'},
'35262':{'en': 'POST'},
'352651':{'en': 'POST'},
'352658':{'en': 'POST'},
'35266':{'en': 'Orange'},
'352671':{'en': 'JOIN'},
'352678':{'en': 'JOIN'},
'35269':{'en': 'Tango'},
'35383':{'en': '3'},
'35385':{'en': 'Meteor'},
'35386':{'en': 'O2'},
'35387':{'en': 'Vodafone'},
'35388':{'en': 'eMobile'},
'35389':{'en': 'Tesco Mobile'},
'3538900':{'en': 'Eircom'},
'353892':{'en': 'Liffey Telecom'},
'353894':{'en': 'Liffey Telecom'},
'353895':{'en': '3'},
'3538960':{'en': 'Virgin Media'},
'3538961':{'en': 'Virgin Media'},
'3538962':{'en': 'Virgin Media'},
'3538970':{'en': 'Carphone Warehouse Ireland Mobile Limited'},
'3538971':{'en': 'Carphone Warehouse Ireland Mobile Limited'},
'3538994':{'en': 'Lycamobile'},
'3538995':{'en': 'Lycamobile'},
'3538996':{'en': 'Lycamobile'},
'3538997':{'en': 'Lycamobile'},
'3538998':{'en': 'Lycamobile'},
'354385':{'en': u('S\u00edminn')},
'354388':{'en': 'IMC'},
'354389':{'en': 'IMC'},
'35461':{'en': 'Vodafone'},
'35462':{'en': 'Vodafone'},
'354630':{'en': 'IMC'},
'354632':{'en': 'Tismi'},
'354637':{'en': u('\u00d6ryggisfjarskipti')},
'354638':{'en': u('\u00d6ryggisfjarskipti')},
'354639':{'en': u('\u00d6ryggisfjarskipti')},
'354640':{'en': u('\u00d6ryggisfjarskipti')},
'354641':{'en': u('\u00d6ryggisfjarskipti')},
'354644':{'en': 'Nova'},
'354646':{'en': 'IMC'},
'354647':{'en': 'IMC'},
'354649':{'en': 'Vodafone'},
'354650':{'en': 'IMC'},
'354651':{'en': 'IMC'},
'354655':{'en': 'Vodafone'},
'354659':{'en': 'Vodafone'},
'35466':{'en': 'Vodafone'},
'35467':{'en': 'Vodafone'},
'354680':{'en': 'Vodafone'},
'354686':{'en': 'Vodafone'},
'354687':{'en': 'Vodafone'},
'354688':{'en': 'Vodafone'},
'35469':{'en': 'Vodafone'},
'354750':{'en': u('S\u00edminn')},
'354755':{'en': u('S\u00edminn')},
'354757':{'en': 'Vodafone'},
'35476':{'en': 'Nova'},
'35477':{'en': 'Nova'},
'35478':{'en': 'Nova'},
'35479':{'en': 'Nova'},
'35482':{'en': 'Vodafone'},
'35483':{'en': u('S\u00edminn')},
'35484':{'en': u('S\u00edminn')},
'35485':{'en': u('S\u00edminn')},
'35486':{'en': u('S\u00edminn')},
'354882':{'en': u('S\u00edminn')},
'354888':{'en': u('S\u00edminn')},
'35489':{'en': u('S\u00edminn')},
'35567':{'en': 'ALBtelecom'},
'35568':{'en': 'Telekom'},
'35569':{'en': 'Vodafone'},
'35672':{'en': 'GO Mobile'},
'35677':{'en': 'Melita Mobile'},
'35679':{'en': 'GO Mobile'},
'35692':{'en': 'Vodafone'},
'35696':{'en': 'YOM'},
'356981':{'en': 'Melita Mobile'},
'356988':{'en': 'GO Mobile'},
'356989':{'en': 'Vodafone'},
'35699':{'en': 'Vodafone'},
'35794':{'en': 'Lemontel'},
'35795':{'en': 'PrimeTel'},
'35796':{'en': 'MTN'},
'35797':{'en': 'Cytamobile-Vodafone'},
'35799':{'en': 'Cytamobile-Vodafone'},
'35840':{'en': 'Telia'},
'35841':{'en': 'DNA'},
'35842':{'en': 'Telia'},
'3584320':{'en': 'Cuuma'},
'3584321':{'en': 'Cuuma'},
'3584322':{'en': 'Benemen Oy'},
'3584323':{'en': 'Top Connect OU'},
'3584324':{'en': 'Nord Connect SIA'},
'358436':{'en': 'DNA'},
'358438':{'en': 'DNA'},
'35844':{'en': 'DNA'},
'358450':{'en': 'Telia'},
'358451':{'en': 'Elisa'},
'358452':{'en': 'Elisa'},
'358453':{'en': 'Elisa'},
'3584540':{'en': 'MobiWeb'},
'3584541':{'en': 'AinaCom'},
'3584542':{'en': 'Nokia'},
'3584543':{'en': 'Nokia'},
'3584544':{'en': 'Nokia'},
'3584545':{'en': 'Interactive Digital Media'},
'3584546':{'en': 'NextGen Mobile / CardBoardFish'},
'3584547':{'en': 'SMS Provider Corp'},
'3584548':{'en': 'Voxbone'},
'3584549':{'en': 'Beepsend'},
'3584550':{'en': 'Suomen Virveverkko'},
'3584552':{'en': 'Suomen Virveverkko'},
'3584554':{'en': 'Suomen Virveverkko'},
'3584555':{'en': 'Nokia Solutions and Networks'},
'3584556':{'en': 'Liikennevirasto'},
'3584557':{'en': 'Compatel'},
'3584558':{'en': 'Suomen Virveverkko'},
'3584559':{'en': 'MI'},
'358456':{'en': 'Elisa'},
| |
<gh_stars>1-10
''' This program runs on the main computer. It requests and receives
images of the chess board and analyses them. It identifies and
displays the position. It uses a chess engine make replies.
'''
import cv2, chess, chess.engine, chess.pgn
import numpy as np
import socket, io, time, pathlib
import tkinter as tk
from tkinter import messagebox, simpledialog
from PIL import ImageTk, Image, ImageDraw
from datetime import date
TITLE = 'CamChess'
PATH = pathlib.Path.home().joinpath('CamChess')
GAMES_PATH = PATH.joinpath('Chess_Games.txt')
PIECES_PATH = PATH.joinpath('Pieces')
ENGINE_PATH = '/usr/games/stockfish'
SQSIZE = 50 # Chess board square size in pixels.
BDSIZE = 8 * SQSIZE # Board size in pixels.
D = int((SQSIZE+19) / 20) # Width of border to be omitted from squares.
MIN_AREA = SQSIZE*SQSIZE // 15 # Minimum area for a piece image.
# Create a mask for the largest circular region within a square
# minus its border. The mask is 255 within the circle and 0 outside it.
DIM = SQSIZE-2*D
MASK = np.zeros((DIM, DIM), np.uint8)
CENTRE = (DIM-1) / 2
RSQ = ((DIM-1) / 2)**2
print('DIM', DIM, 'CENTRE', CENTRE, 'RSQ', RSQ)
for y in range(DIM):
for x in range(DIM):
if (x-CENTRE)**2 + (y-CENTRE)**2 < RSQ:
MASK[y,x] = 255
count = MASK[MASK == 0].size # Number of pixels ouside the circle.
MULT = DIM*DIM/count
print('Number outside circle', count, 'MULT', MULT)
White_bottom = True # White at the bottom of diagrams.
START_POSN = ['W']*16 +['X']*32 + ['B']*16
PIECES = {} # Load the chess piece images.
for ch in 'pnbrqkPNBRQK':
PIECES[ch] = Image.open(PIECES_PATH.joinpath('%s.png' % ch))
board = chess.Board() # Create a python-chess board.
engine_on = False
engine_move = chess.Move.null()
# Set up the chess engine.
engine = chess.engine.SimpleEngine.popen_uci(ENGINE_PATH)
# Print the engine options.
print('Engine Option Type Default'\
' Min Max Var')
eo = engine.options
def show_empty(x): return x if x != '' else "''"
for k in eo:
option = str(k).ljust(22) + str(eo[k].type).ljust(8) + \
show_empty(str(eo[k].default)).ljust(22) + \
str(eo[k].min).ljust(7) + str(eo[k].max).ljust(8) + \
' '.join([str(v) for v in eo[k].var])
print(option)
close_enabled = True # Main window close button enabled.
host = 'raspberrypi.local' # Server IP address.
port = 60000 # Reserve a port.
def Get_Image():
''' Connect to the Raspberry Pi Zero camera server, receive an
encoded image over the connection, and close the connection.
Return an OpenCV image.
'''
host = 'raspberrypi.local' # Server host name or IP address.
port = 60000 # Reserve a port.
t1 = time.time()
try:
with socket.socket() as sock:
sock.connect((host, port))
print('Connected to', host, 'port', port)
# Receive the image over the connection.
stream = io.BytesIO()
while True:
chunk = sock.recv(2048)
if not chunk: break
stream.write(chunk)
t2 = time.time()
print('Received the image', stream.tell(), 'bytes',
t2-t1, 'seconds')
stream.seek(0)
image = cv2.imdecode(np.frombuffer(stream.read(), np.uint8),\
cv2.IMREAD_COLOR)
except:
print('Image Capture Failed')
image = None
return image
def Find_Corners(image):
''' Find approximate coordinates for the four outer corners of the
chess board from an image of the empty board. The board is assumed
to be a standard vinyl roll-up board, as used by chess clubs.
'''
red = image[:,:,2] # The red channel has the maximum contrast.
# Find the 49 inner corners of the chess board.
found, in_corners = cv2.findChessboardCorners(red, (7,7),
flags=cv2.CALIB_CB_NORMALIZE_IMAGE|cv2.CALIB_CB_ADAPTIVE_THRESH)
if not found: return None
in_corners = in_corners.reshape((49,2))
# Add the x and y values for each inner corner. The top-left corner
# will have the smallest sum, and the bottom-right corner the
# largest.
xpy = in_corners.sum(axis = 1)
inTL = in_corners[np.argmin(xpy)]
inBR = in_corners[np.argmax(xpy)]
# Subtract the y value from the x value for each inner corner. The
# top-right corner will have the smallest difference, and the
# bottom-left corner the largest.
xmy = np.diff(in_corners, axis = 1)
inTR = in_corners[np.argmin(xmy)]
inBL = in_corners[np.argmax(xmy)]
print('Inner corners TL', inTL, 'TR', inTR, 'BR', inBR, 'BL', inBL)
def out_corner(p1, p2):
# Find the approximate location of one of the four outer
# corners of the board.
# p2 is the inner corner nearest to the required outer corner.
# p1 is the inner corner farthest from the required outer
# corner.
# (p3x, p3y) is the approximate location.
p3x = p2[0] + (p2[0] - p1[0])/6
p3y = p2[1] + (p2[1] - p1[1])/6
return p3x, p3y
# Find approximate locations for the four outer corners.
outTL, outTR = out_corner(inBR, inTL), out_corner(inBL, inTR)
outBR, outBL = out_corner(inTL, inBR), out_corner(inTR, inBL)
print('Approx outer corners', outTL, outTR, outBR, outBL)
return outTL, outTR, outBR, outBL
def Transform_Image(Corners, image):
''' Tranform an image of the chess board to a cropped
square BDSIZE x BDSIZE image. The inputs are approximate
coordinates for the four corners of the board and a gray
scale image of the board. The function returns the transformed
image and accurate coordinates for the four corners.
'''
red = image[:,:,2] # The red channel has the maximum contrast.
def find_corner(x, y):
# Find accurate coordinates for an outer corner of the board.
x, y = int(x+0.5), int(y+0.5)
d = SQSIZE // 5
try:
# Construct a Region Of Interest centred on the
# approximate corner location.
roi = red[y-d:y+d+1, x-d:x+d+1]
# Find up to two strongest corners in the ROI.
qcorners = cv2.goodFeaturesToTrack(roi, 2, 0.1, d/2)
qcorners = qcorners.reshape(-1,2)
except:
print('Corner not found A', x, y)
raise ValueError
if qcorners is None:
print('Corner not found B', x, y)
raise ValueError
# Find the corner nearest to the centre of the ROI.
min_delta = 3*d*d
nqcorners = qcorners.shape[0]
for c in range(nqcorners):
px, py = qcorners[c]
delta = (px - d)*(px - d) + (py - d)*(py - d)
if delta < min_delta:
min_delta = delta
min_c = c
qcorner = qcorners[min_c,0]+x-d, qcorners[min_c,1]+y-d
return qcorner
try:
QCorners = [find_corner(p[0], p[1]) for p in Corners]
except ValueError:
return None, Corners
print('Accurate outer corners', QCorners)
# Construct the prespective transformation matrix.
pts1 = np.float32(QCorners)
s = BDSIZE-1
pts2 = np.float32([[0,0], [s,0], [s,s], [0,s]])
Matrix = cv2.getPerspectiveTransform(pts1, pts2)
# Transform the image.
board_image = cv2.warpPerspective(image, Matrix, (BDSIZE,BDSIZE))
return board_image, QCorners
def Optimise_Thresholds(board_image):
''' Use a cropped square image of the start position to optimise
four threshold values that will be used to identify other positions.
The blue, green and red channels are used to optimise the threshold
values. These thresholds identify White pieces on Black squares,
Black pieces on Black squares, White pieces on White squares, and
descriminate between Black and White pieces on white squares. These
thresholds are converted into threshold ratios by dividing them by
the mean green channel value in the corners of the square concerned.
This refinement enables the threshold values to adapt to varying
light levels accross the board, and changes in these levels. The
function returns the four threshold ratios.
'''
# Sets of python-chess square numbers for the start position.
# White pieces on White squares is sqWoW etc.
sqWoW = {1,3,5,7,8,10,12,14}
sqWoB = {0,2,4,6,9,11,13,15}
sqBoW = {49,51,53,55,56,58,60,62}
sqBoB = {48,50,52,54,57,59,61,63}
# Split the board image into the blue, green and red channels.
(blue, green, red) = cv2.split(board_image)
def thresh_ratio(gray, squares):
# Find the optimum threshold ratio for a set of squares for
# a colour channel.
n_squares = len(squares)
thresh_ratio_sum = 0
for square in squares:
x, y = square % 8, 7 - square // 8
x1, x2 = x*SQSIZE+D, (x+1)*SQSIZE-D
y1, y2 = y*SQSIZE+D, (y+1)*SQSIZE-D
# Find the mean green value in the corners of the Region
# Of Interest (ROI), i.e. the square minus a margin.
roiG = green[y1:y2, x1:x2]
masked = np.bitwise_and(roiG, 255-MASK)
mean_cnrs = np.mean(masked)*MULT
# Find the optimum threshold value in the ROI.
ROI = gray[y1:y2, x1:x2]
tv, mask = cv2.threshold(ROI, 0, 255, cv2.THRESH_OTSU)
thresh_ratio_sum += tv / mean_cnrs
#print('tv', tv, 'mean', mean_cnrs)
return thresh_ratio_sum / n_squares
# Find the threshold ratio for White pieces on Black squares
# in red light. White pieces should have red values greater
# than those of the Black squares.
trWoB = thresh_ratio(red, sqWoB)
print('trWoB', trWoB)
# Find the threshold ratio for Black pieces on Black squares
# in green light. Black pieces should have green values less
# than those of the Black (actually green) squares.
trBoB = thresh_ratio(green, sqBoB)
print('trBoB', trBoB)
# Find the threshold ratio for White | |
if target < 0 or not isinstance(target, int):
raise ValueError("Target gate should be not-negative integer.")
# Add one qubit gates
def X(self, target:int):
"""Add X gate (logical NOT) to the stack of current moment.
Args:
target: An index of a qubit node on which X gate acts.
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
# if gates applied on all quibits, evaluate current moment and start
# to fill out next moment
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
self.gate_patch[target] = 'X'
def Y(self, target:int):
"""Add Y gate to the stack of current moment.
Args:
target: An index of a qubit node on which Y gate acts
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
self.gate_patch[target] = 'Y'
def Z(self, target:int):
"""Add Z gate to the stack of current moment.
Args:
target: An index of a qubit node on which Z gate acts
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
self.gate_patch[target] = 'Z'
def H(self, target:int):
"""Add H gate (Hadamard Gate) to the stack of current moment.
Hadamara Gate brings the initial state vector to its superposition state.
Args:
target: An index of a qubit node on which H gate acts
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
self.gate_patch[target] = 'H'
def T(self, target:int):
"""Add T gate to the stack of current moment.
Args:
target: An index of a qubit node on which T gate acts
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
self.gate_patch[target] = 'T'
def R(self, phi:float, target:int):
"""Add R gate to the stack of current moment.
Args:
target: An index of a qubit node on which R gate acts.
phi: an angle in radians which corresponds to the rotation of
the qubit state around the z axis by the given value of phi.
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
# store the value(s) of angle passed
self.arguments[target] = np.exp(1.j * phi)
self.gate_patch[target] = 'R'
# = = = = = = = = = == = = = = = = = = = == = = = = = = = = = = = = = = = = = = == = =
def RX(self, phi:float, target:int):
"""Add RX gate to the stack of current moment.
Args:
target: An index of a qubit node on which R gate acts.
phi: an angle in radians which corresponds to the rotation of
the qubit state around the X axis by the given value of phi.
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
# store the value(s) of angle passed
self.arguments[target] = phi / 2
self.gate_patch[target] = 'RX'
def RY(self, phi:float, target:int):
"""Add RY gate to the stack of current moment.
Args:
target: An index of a qubit node on which R gate acts.
phi: an angle in radians which corresponds to the rotation of
the qubit state around the Y axis by the given value of phi.
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
# store the value(s) of angle passed
self.arguments[target] = phi / 2
self.gate_patch[target] = 'RY'
def RZ(self, phi:float, target:int):
"""Add RZ gate to the stack of current moment.
Args:
target: An index of a qubit node on which R gate acts.
phi: an angle in radians which corresponds to the rotation of
the qubit state around the Z axis by the given value of phi.
Returns: None.
Raise: ValueError if an index of the target quibit out of circuit size.
ValueError if target quibit is a float or negative number.
"""
self.check_input_one_gate(target)
if (self.gate_patch[target] != 'I'):
self.evaluate_patch()
# store the value(s) of angle passed
self.arguments[target] = phi / 2
self.gate_patch[target] = 'RZ'
# = = = = = = = = = == = = = = = = = = = == = = = = = = = = = = = = = = = = = = == = =
# checks for incorrect arguments for many qubits gate
def check_input_control_gate(self, control:List, target:int):
""""
Check for the basics inputs of the controll gates.
Args:
control: a list of the controlled qubits.
target: a target qubit.
Return: None.
Raise:
Value Errors.
"""
if not isinstance(control, list):
raise ValueError("Control must be a list.")
if not len(control):
raise ValueError("No control qubits has been provided.")
if target > self.num_qubits - 1:
raise ValueError("Qubit's index exceed the specidied size of the cirquit.")
if target < 0 or not isinstance(target, int):
raise ValueError("Target gate should be not-negative integer.")
for control_qubit in control:
if control_qubit > self.num_qubits - 1:
raise ValueError("Qubit's index exceed the specidied size of the cirquit.")
if control_qubit < 0 or not isinstance(control_qubit, int):
raise ValueError("Control gate should be not-negative integer.")
if target in control:
raise ValueError("Target qubit was sent as a control.")
if (not len(set(control)) == len(control)):
raise ValueError("Control list contains repeating elements.")
# Add two qubits gates
def CX(self, control:List, target:int):
"""Add CX (CNOT) gate to the stack of current moment.
Args:
control: An indices of qubits that serve as a control elements
target: An index of a qubit node on which CX gate acts
Returns: None.
Raise: ValueError if indices of quibit out of circuit size.
ValueError if target and control indeces are equal.
ValueError if target or control quibit is a float or negative number.
"""
self.check_input_control_gate(control, target)
# if gates applied on all quibits, evaluate current moment and start
# to fill out next moment
if (self.gate_patch[target] != 'I' or any(self.gate_patch[control_qubit] != 'I' for control_qubit in control)):
self.evaluate_patch()
self.gate_patch[target] = 'CX_Target_' + str(target)
for control_qubit in control:
self.gate_patch[control_qubit] = 'CX_Control_' + str(target)
self.check_input_control_gate(control, target)
self.control_gates_patch.append((control, target))
def CZ(self, control:List, target:int):
"""Add CZ gate to the stack of current moment.
Args:
control: An indices of qubits that serve as a control elements
target: An index of a qubit node on which CZ gate acts
Returns: None.
Raise: ValueError if indices of quibit out of circuit size.
ValueError if target and control indeces are equal.
ValueError if target or control quibit is a float or negative number.
"""
self.check_input_control_gate(control, target)
if (self.gate_patch[target] != 'I' or any(self.gate_patch[control_qubit] != 'I' for control_qubit in control)):
self.evaluate_patch()
self.gate_patch[target] = 'CZ_Target_' + str(target)
for control_qubit in control:
self.gate_patch[control_qubit] = 'CZ_Control_' + str(target)
self.control_gates_patch.append((control, target))
def CY(self, control:List, target:int):
"""Add CY gate to the stack of current moment.
Args:
control: An indices of qubits that serve as a control elements
target: An index of a qubit node on which CY gate acts
Returns: None.
Raise: ValueError if indices of quibit out of circuit size.
ValueError if target and control indeces are equal.
ValueError if target or control quibit is a float or negative number.
"""
self.check_input_control_gate(control, target)
if (self.gate_patch[target] | |
import numpy as np
import scipy
import torch
perm_dict = {
"xyz": [0, 1, 2, 0, 2],
"zyx": [2, 1, 0, 1, 3],
}
def coords3inds(coords, nz, ny, nx):
coords = coords.to(torch.int32)
# gperm = nx * ny * nz
# gperm1 = nx * ny
# gperm2 = nx
gperm = torch.tensor(nx * ny * nz, dtype=torch.int32, device="cuda")
gperm1 = torch.tensor(nx * ny, dtype=torch.int32, device="cuda")
gperm2 = torch.tensor(nx, dtype=torch.int32, device="cuda")
bdim = coords[:, 0] * gperm
zdim = coords[:, 1] * gperm1
ydim = coords[:, 2] * gperm2
xdim = coords[:, 3]
inds = bdim + zdim + ydim + xdim
return inds.to(torch.int32)
def inds3coords(inds, nz, ny, nx):
gperm = torch.tensor(nx * ny * nz, dtype=torch.int32, device="cuda")
gperm1 = torch.tensor(nx * ny, dtype=torch.int32, device="cuda")
gperm2 = torch.tensor(nx, dtype=torch.int32, device="cuda")
b_dim = torch.div(inds, gperm)
inds_leftover = inds - b_dim * gperm
z_dim = torch.div(inds_leftover, gperm1)
inds_leftover = inds_leftover - z_dim * gperm1
y_dim = torch.div(inds_leftover, gperm2)
x_dim = inds_leftover - y_dim * gperm2
coords = torch.stack((b_dim, z_dim, y_dim, x_dim), axis=-1)
return coords
def coords6inds(coords, nz, ny, nx, sz, sy, sx):
# coords -> n * bnznynxszsysx
gperm0 = nz * ny * nx * sz * sy * sx
gperm1 = ny * nx * sz * sy * sx
gperm2 = nx * sz * sy * sx
gperm3 = sz * sy * sx
gperm4 = sy * sx
gperm5 = sx
gperm = torch.tensor(
[gperm0, gperm1, gperm2, gperm3, gperm4, gperm5, 1],
dtype=torch.int32,
device="cuda",
).view(1, -1)
coords = coords.to(torch.int32) * gperm
return torch.sum(coords, axis=1)
def inds6coords(inds, nz, ny, nx, sz, sy, sx):
# coords -> n * bnznynxszsysx
inds = inds.to(torch.int32)
gperm0 = nz * ny * nx * sz * sy * sx
gperm1 = ny * nx * sz * sy * sx
gperm2 = nx * sz * sy * sx
gperm3 = sz * sy * sx
gperm4 = sy * sx
gperm5 = sx
gperm = torch.tensor(
[gperm0, gperm1, gperm2, gperm3, gperm4, gperm5, 1],
dtype=torch.int32,
device="cuda",
).view(1, -1)
mods = torch.tensor(
[9000000, nz, ny, nx, sz, sy, sx], dtype=torch.int32, device="cuda"
).view(1, -1)
return torch.fmod(torch.div(inds.view(-1, 1), gperm), mods)
def coords4inds(coords, nz, ny, nx, nl):
# coords -> n * bnznynxszsysx
gperm0 = nz * ny * nx * nl
gperm1 = ny * nx * nl
gperm2 = nx * nl
gperm3 = nl
gperm = torch.tensor(
[gperm0, gperm1, gperm2, gperm3, 1], dtype=torch.int32, device="cuda"
).view(1, -1)
coords = coords.to(torch.int32) * gperm
return torch.sum(coords, axis=1)
def inds4coords(inds, nz, ny, nx, nl):
# coords -> n * bnznynxszsysx
inds = inds.to(torch.int32)
gperm0 = nz * ny * nx * nl
gperm1 = ny * nx * nl
gperm2 = nx * nl
gperm3 = nl
gperm = torch.tensor(
[gperm0, gperm1, gperm2, gperm3, 1], dtype=torch.int32, device="cuda"
).view(1, -1)
mods = torch.tensor([90000, nz, ny, nx, nl], dtype=torch.int32, device="cuda").view(
1, -1
)
return torch.fmod(torch.div(inds.view(-1, 1), gperm), mods)
def voxel_perm(mask, bs, nz, ny, nx, sz, sy, sx, perm=[0, 3, 5, 2, 4, 6, 1], f_dims=1):
perm_mask = mask.view(bs, f_dims, nz * sz, ny, sy, nx, sx)
perm_mask = perm_mask.permute(perm)
return perm_mask.reshape(bs, ny, nx, nz * sz * sy * sx, f_dims)
def voxelize_pad(voxel_num_points, inds, inverse_indices):
cluster_num = voxel_num_points.size()[0]
max_points = torch.max(voxel_num_points).data.cpu().numpy()
P = inds.shape[0]
inverse_indices = inverse_indices[inds] # 0, 0, 0, 1, 1, 1
range_indices = torch.arange(0, P, device="cuda")
voxel_num_points_addaxis = torch.cumsum(
torch.cat(
[torch.zeros([1], dtype=torch.int64, device="cuda"), voxel_num_points[:-1]],
dim=0,
),
dim=0,
)
indices_voxel = range_indices - voxel_num_points_addaxis[inverse_indices]
return (
torch.stack([inverse_indices, indices_voxel], axis=-1),
inds,
[cluster_num, max_points],
)
def unq_subvoxel_pid_padding(
valid_coords_szsysx, valid_coords_bnznynx, valid_point_label, sz, sy, sx, num_class
):
valid_coords_bnznynx, cluster_inds = torch.unique(
valid_coords_bnznynx, return_inverse=True, dim=0
)
valid_coords_cszsysxl = torch.cat(
[
torch.unsqueeze(cluster_inds, -1),
valid_coords_szsysx,
torch.unsqueeze(valid_point_label, -1),
],
axis=-1,
) # C, 4
# oneD_cszsysxl = coords4inds(valid_coords_cszsysxl, sz, sy, sx, num_class)
# unique_coord_inds, inverse_indices, voxel_num_points = torch.unique(oneD_cszsysxl, sorted=False, return_inverse=True, return_counts=True)
# _, inds = torch.sort(inverse_indices)
# voxel_cszsysxl = inds4coords(unique_coord_inds, sz, sy, sx, num_class).to(torch.int64)
voxel_cszsysxl, inverse_indices, voxel_num_points = torch.unique(
valid_coords_cszsysxl,
dim=0,
sorted=False,
return_inverse=True,
return_counts=True,
)
_, inds = torch.sort(inverse_indices)
subvoxel_inds, point_inds, vp_size = voxelize_pad(
voxel_num_points, inds, inverse_indices
)
voxel_coords_bnznynx = valid_coords_bnznynx[voxel_cszsysxl[:, 0]]
voxel_coords_szsysxl = voxel_cszsysxl[:, 1:]
voxel_bnynxnzszsysxl = [
voxel_coords_bnznynx[:, 0],
voxel_coords_bnznynx[:, 2],
voxel_coords_bnznynx[:, 3],
voxel_coords_bnznynx[:, 1],
voxel_coords_szsysxl[:, 0],
voxel_coords_szsysxl[:, 1],
voxel_coords_szsysxl[:, 2],
voxel_coords_szsysxl[:, 3],
]
voxel_bnysynxsx = [
voxel_coords_bnznynx[:, 0],
voxel_coords_bnznynx[:, 2],
voxel_coords_szsysxl[:, 1],
voxel_coords_bnznynx[:, 3],
voxel_coords_szsysxl[:, 2],
]
# print("point_inds", point_inds, unique_coord_inds, unique_coord_inds.shape)
return (
valid_coords_bnznynx,
valid_coords_cszsysxl,
voxel_coords_bnznynx,
voxel_coords_szsysxl,
voxel_bnynxnzszsysxl,
voxel_bnysynxsx,
subvoxel_inds,
point_inds,
vp_size,
voxel_num_points,
)
def unq_subvoxel(valid_coords_cszsysxl, valid_coords_bnznynx, valid_inds, sz, sy, sx):
valid_coords_cszsysx = valid_coords_cszsysxl[..., :-1] # C, 4
# oneD_cszsysx = coords3inds(valid_coords_cszsysx, sz, sy, sx)
# unique_coord_inds, inverse_indices, voxel_num_points = torch.unique(oneD_cszsysx, sorted=False, return_inverse=True, return_counts=True)
# _, inds = torch.sort(inverse_indices)
# voxel_cszsysx = inds3coords(unique_coord_inds, sz, sy, sx).to(torch.int64)
voxel_cszsysx, inverse_indices, voxel_num_points = torch.unique(
valid_coords_cszsysx,
dim=0,
sorted=False,
return_inverse=True,
return_counts=True,
)
_, inds = torch.sort(inverse_indices)
subvoxel_inds, point_inds, vp_size = voxelize_pad(
voxel_num_points, inds, inverse_indices
)
ind_vox = torch.zeros([vp_size[0], vp_size[1], 2], dtype=torch.int64, device="cuda")
ind_vox[subvoxel_inds[:, 0], subvoxel_inds[:, 1]] = valid_inds[point_inds, :] + 1
voxel_coords_bnznynx = valid_coords_bnznynx[voxel_cszsysx[:, 0]]
voxel_coords_szsysx = voxel_cszsysx[:, 1:]
voxel_bnynxnzszsysx = [
voxel_coords_bnznynx[:, 0],
voxel_coords_bnznynx[:, 2],
voxel_coords_bnznynx[:, 3],
voxel_coords_bnznynx[:, 1],
voxel_coords_szsysx[:, 0],
voxel_coords_szsysx[:, 1],
voxel_coords_szsysx[:, 2],
]
return ind_vox, voxel_bnynxnzszsysx, vp_size[0]
def get_all_voxel_centers_xyz(bs, finer_grids_num, finer_grid_origin, finer_voxel_size):
nx, ny, nz = finer_grids_num[0], finer_grids_num[1], finer_grids_num[2]
x_ind = torch.arange(nx, device="cuda")
y_ind = torch.arange(ny, device="cuda")
z_ind = torch.arange(nz, device="cuda")
x, y, z = torch.meshgrid(x_ind, y_ind, z_ind)
xyz = torch.stack([x, y, z], axis=-1)
# print("xyz", xyz.shape, "finer_voxel_size", finer_voxel_size.shape, "finer_grid_origin", finer_grid_origin.shape)
voxel_centers = (0.5 + xyz.to(torch.float32)) * finer_voxel_size.view(
1, 1, 1, 1, 3
) + finer_grid_origin.view(1, 1, 1, 1, 3)
voxel_centers = voxel_centers.repeat(bs, 1, 1, 1, 1)
return voxel_centers
def get_all_voxel_centers_zyx(bs, grids_num, grid_origin, voxel_size):
"""
Return voxel centers with z, y, x order
"""
voxel_size = torch.tensor(
[voxel_size[2], voxel_size[1], voxel_size[0]], device="cuda"
) # oz, oy, ox
grid_origin = torch.tensor(
[grid_origin[2], grid_origin[1], grid_origin[0]], device="cuda"
) # oz, oy, ox
nx, ny, nz = grids_num[0], grids_num[1], grids_num[2]
x_ind = torch.arange(nx, device="cuda")
y_ind = torch.arange(ny, device="cuda")
z_ind = torch.arange(nz, device="cuda")
z, y, x = torch.meshgrid(z_ind, y_ind, x_ind)
zyx = torch.stack([z, y, x], axis=0)
voxel_centers = (0.5 + zyx.to(torch.float32)) * voxel_size.view(
3, 1, 1, 1
) + grid_origin.view(3, 1, 1, 1)
voxel_centers = voxel_centers.view(1, 3, nz, ny, nx).repeat(bs, 1, 1, 1, 1)
return voxel_centers
def sphere_uvd2absxyz(sphere_x, sphere_y, sphere_z, dim=-1):
xydist = sphere_x * torch.cos(sphere_z * np.pi / 180.0)
carte_coords_absx = xydist * torch.cos(sphere_y * np.pi / 180.0)
carte_coords_absy = -xydist * torch.sin(sphere_y * np.pi / 180.0)
carte_coords_absz = sphere_x * torch.sin(sphere_z * np.pi / 180.0)
occpnt_absxyz = torch.stack(
[carte_coords_absx, carte_coords_absy, carte_coords_absz], dim=dim
)
return occpnt_absxyz
def sphere_uvd2absxyz_np(sphere_x, sphere_y, sphere_z, dim=-1):
xydist = sphere_x * np.cos(sphere_z * np.pi / 180.0)
carte_coords_absx = xydist * np.cos(sphere_y * np.pi / 180.0)
carte_coords_absy = -xydist * np.sin(sphere_y * np.pi / 180.0)
carte_coords_absz = sphere_x * np.sin(sphere_z * np.pi / 180.0)
occpnt_absxyz = np.stack(
[carte_coords_absx, carte_coords_absy, carte_coords_absz], axis=dim
)
return occpnt_absxyz
def cylinder_uvd2absxyz(cylin_x, cylin_y, cylin_z, dim=-1):
"""
Converts cylinder coordinates to absolute value cartesian
"""
xydist = cylin_x
carte_coords_absx = xydist * torch.cos(cylin_y * np.pi / 180.0)
carte_coords_absy = -xydist * torch.sin(cylin_y * np.pi / 180.0)
carte_coords_absz = cylin_z
occpnt_absxyz = torch.stack(
[carte_coords_absx, carte_coords_absy, carte_coords_absz], dim=dim
)
return occpnt_absxyz
def cylinder_uvd2absxyz_np(cylin_x, cylin_y, cylin_z, dim=-1):
xydist = cylin_x
carte_coords_absx = xydist * np.cos(cylin_y * np.pi / 180.0)
carte_coords_absy = -xydist * np.sin(cylin_y * np.pi / 180.0)
carte_coords_absz = cylin_z
occpnt_absxyz = np.stack(
[carte_coords_absx, carte_coords_absy, carte_coords_absz], axis=dim
)
return occpnt_absxyz
def cartesian_sphere_coords(cartesian_points, perm="xyz"):
cartesian_sqr = torch.square(cartesian_points)
i, j, k, l, m = perm_dict[perm]
x, y, z = (
cartesian_points[..., i],
cartesian_points[..., j],
cartesian_points[..., k],
)
dist, xydist = torch.sqrt(torch.sum(cartesian_sqr, dim=1)), torch.sqrt(
torch.sum(cartesian_sqr[..., l:m], dim=-1)
)
sphere_x = dist
sphere_y = torch.atan2(-y, x) * (180.0 / np.pi)
sphere_z = torch.atan2(z, xydist) * (180.0 / np.pi)
out = [sphere_x, sphere_y, sphere_z]
sphere_coords_points = torch.stack([out[i], out[j], out[k]], dim=-1)
return sphere_coords_points
def cartesian_cylinder_coords(cartesian_points, perm="xyz"):
cartesian_sqr = torch.square(cartesian_points)
i, j, k, l, m = perm_dict[perm]
x, y, z = (
cartesian_points[..., i],
cartesian_points[..., j],
cartesian_points[..., k],
)
xydist = torch.sqrt(torch.sum(cartesian_sqr[..., l:m], dim=-1))
cylin_x = xydist
cylin_y = torch.atan2(-y, x) * (180.0 / | |
<reponame>freingruber/JavaScript-Raider<filename>modes/import_corpus.py<gh_stars>10-100
# Copyright 2022 @ReneFreingruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This mode can be started by passing the "--import_corpus_mode" flag to the fuzzer
# or by starting the fuzzer the first time (when no OUTPUT directory exists yet).
#
# The script imports new testcases into the current corpus.
# Please note that the progress of the script is not linear (especially when creating an initial corpus).
# The script will start slow (because it will find a lot of testcases with new behavior and this requires
# standardization, minimization & state creation.
# These operations are slow because they require to restart the JS engine multiple times,
# and therefore it will take a longer time. After some time, the import-mode will be faster because it finds less files
# with new coverage. At the end, the mode will again be slow (or maybe very slow) because it's processing the
# bigger testcases (testcases are sorted based on file size and handled from small files to big files).
# State creation for big input files is extremely slow.
# It's maybe better to skip these big testcases and continue because later testcases can maybe further be
# minimized (which would then be again fast). => I created my initial corpus with a different script,
# skipping the big testcases is therefore not implemented here yet (and must manually be done).
# TODO: In my original code I also removed v8 native functions because they quickly lead to crashes
# But I couldn't find the code anymore. I guess this should be implemented in this file somewhere at the end?
# This affect at least the functions:
# %ProfileCreateSnapshotDataBlob
# %LiveEditPatchScript
# %IsWasmCode
# %IsAsmWasmCode
# %ConstructConsString
# %HaveSameMap
# %IsJSReceiver
# %HasSmiElements
# %HasObjectElements
# %HasDoubleElements
# %HasDictionaryElements
# %HasHoleyElements
# %HasSloppyArgumentsElements
# %HaveSameMap
# %HasFastProperties
# %HasPackedElements
#
# More information can be found in my master thesis page 115.
import utils
import os
import config as cfg
import native_code.speed_optimized_functions as speed_optimized_functions
from native_code.executor import Execution_Status
import sys
import random
import string
import re
code_prefix = "function my_opt_func() {\n"
code_suffix1 = """
}
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
code_suffix2 = """
}
%PrepareFunctionForOptimization(my_opt_func);
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
code_suffix3 = """
}
my_opt_func();
%PrepareFunctionForOptimization(my_opt_func);
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
# These are just used for debugging
debugging_number_exceptions = 0
debugging_number_success = 0
debugging_number_new_coverage = 0
def import_corpus_mode(input_dir_to_import):
global code_prefix, code_suffix1, code_suffix2, code_suffix3
utils.msg("[i] Going to import another corpus to the current corpus...")
utils.msg("[i] Corpus dir which will be imported is: %s" % input_dir_to_import)
files_to_handle = []
already_seen_file_hashes = set()
utils.msg("[i] Going to read all files in directory... (this can take some time)")
for filename_to_import in os.listdir(input_dir_to_import):
if filename_to_import.endswith(".js"):
input_file_to_import = os.path.join(input_dir_to_import, filename_to_import)
# Just get file size
with open(input_file_to_import, 'r') as fobj:
content = fobj.read().rstrip()
sample_hash = utils.calc_hash(content)
if sample_hash not in already_seen_file_hashes:
# new file
files_to_handle.append((input_file_to_import, len(content)))
already_seen_file_hashes.add(sample_hash)
utils.msg("[i] Finished reading files. Going to sort files based on file size...")
# Sort based on filesize => start with small files => this ensures that the minimizer is faster
files_to_handle.sort(key=lambda x: x[1])
utils.msg("[i] Finished sorting, going to start importing...")
# Now start to import file by file
cfg.my_status_screen.set_current_operation("Importing")
total_number_files_to_import = len(files_to_handle)
number_files_already_imported = 0
for entry in files_to_handle:
(input_file_to_import, filesize) = entry
number_files_already_imported += 1
utils.msg("[i] Importing file (%d/%d): %s" % (number_files_already_imported, total_number_files_to_import, input_file_to_import))
with open(input_file_to_import, 'r') as fobj:
content = fobj.read().rstrip()
if len(content) > 200000: # 200 KB
continue # big files are too slow and are bad for mutation, so skip them
if '\x00' in content:
continue # ignore files with null bytes for the moment because the Python to C conversation does not support this
# Check normal execution:
check_if_testcase_triggers_new_behavior(content)
# Check adapted execution (e.g. with removed testsuite functions)
samples = preprocess_testcase(content)
for sample in samples:
check_if_testcase_triggers_new_behavior(sample)
# Now check if it triggers more coverage if the code gets compiled:
check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix1)
check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix2)
check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix3)
if cfg.deterministic_preprocessing_enabled:
# And now start to preprocess all imported files! This can take a VERY long runtime
# => I would not recommend running this because it can easily take several weeks of runtime.
# It maybe makes sense for the first small testcases
cfg.deterministically_preprocess_queue_func()
return total_number_files_to_import
def check_if_testcase_triggers_new_behavior(content):
if len(content) > 10000: # 10 KB
# big files are too slow and are bad for mutation, so skip them
# Side note: I'm checking here for 10KB and in the above function for 200KB
# because this function is maybe invoked with sub-functionality from the main script
# which can be a lot smaller
return
previous_stats_new_behavior = cfg.my_status_screen.get_stats_new_behavior()
# Restart the engine so that every testcase starts in a new v8 process
# (=> this slows down the process but having a good input corpus is important)
# If you want to be faster, you can maybe skip the engine restart here
cfg.exec_engine.restart_engine()
cfg.perform_execution_func(content, state=None)
current_stats_new_behavior = cfg.my_status_screen.get_stats_new_behavior()
if current_stats_new_behavior == previous_stats_new_behavior:
# File didn't result in new coverage and was therefore not imported (importing would be done by perform_execution() )!
# Just to get sure that there was not a flawed execution, I try it again here
cfg.perform_execution_func(content, state=None)
# This is a debug version of the above one.
# The above one does all the required calculations (standardization, minimization, state creation)
# which is very slow. But If I just want to quickly check how many files I can import,
# then I'm using this debugging versions (which skips all these steps)
# This version does also not restart the exec engine.
# To use it, just replace the call with this function
def check_if_testcase_triggers_new_behavior_debugging(content):
global debugging_number_exceptions, debugging_number_success, debugging_number_new_coverage
if len(content) > 10000: # 10 KB
return
result = cfg.exec_engine.execute_safe(content)
if result.status == Execution_Status.SUCCESS:
debugging_number_success += 1
if result.num_new_edges > 0:
debugging_number_new_coverage += 1
# Dump the new coverage statistics
number_triggered_edges, total_number_possible_edges = cfg.exec_engine.get_number_triggered_edges()
if total_number_possible_edges == 0:
total_number_possible_edges = 1 # avoid division by zero
triggered_edges_in_percent = (100 * number_triggered_edges) / float(total_number_possible_edges)
utils.msg("[i] Found new coverage! (%d success, %d exceptions, %d new coverage); New Coverage: %.4f %%" % (debugging_number_success, debugging_number_exceptions, debugging_number_new_coverage, triggered_edges_in_percent))
elif result.status == Execution_Status.EXCEPTION_THROWN:
debugging_number_exceptions += 1
# TODO: This is pretty old code and needs a lot of refactoring/improvement ...
# TODO: Also better implement these whole "\t" and " " and "\ņ" checking...
# One testcase file can contain multiple testcases
# That's why this function returns a list of samples
def preprocess_testcase(code):
ret = []
tmp = ""
for line in code.split("\n"):
line_check = line.strip()
if line_check.startswith("import ") \
or line_check.startswith("import(") \
or line_check.startswith("export ") \
or line_check.startswith("loaded++") \
or line_check.startswith("await import"):
continue # remove import and export statements
tmp += line + "\n"
code = tmp
# All the following function replacements where manually found
# The replacements can be found by starting this script and
# dumping all testcases which trigger an exception
# Then the testcases can manually be analyzed to understand
# why they lead to an exception. By doing this, the following
# functions were identified which are not defined as default
# JavaScript functions (in v8).
# Identification of these functions took a long time and corpus
# coverage can still greatly be improved by identifying more such
# functions. However, this is a time consuming task.
# Example: Replace wscript.echo() function calls with console.log()
pattern = re.compile("wscript.echo", re.IGNORECASE)
code = pattern.sub("console.log", code)
pattern = re.compile("CollectGarbage", re.IGNORECASE)
code = pattern.sub("gc", code)
code = code.replace("writeLine", "console.log")
code = code.replace("WScript.SetTimeout", "setTimeout")
code = code.replace("helpers.writeln", "console.log")
code = code.replace("$ERROR", "console.log")
code = code.replace("helpers.printObject", "console.log")
code = code.replace("WScript.Arguments", "[]")
code = code.replace("assert.unreachable()", "")
code = code.replace("assertUnreachable()", "")
code = code.replace("$DONOTEVALUATE()", "")
code = code.replace("assertStmt", "eval")
code = code.replace("inSection", "Number")
code = code.replace("numberOfDFGCompiles", "Number")
code = code.replace("optimizeNextInvocation", "%OptimizeFunctionOnNextCall")
code = code.replace("printBugNumber", "console.log")
code = | |
<filename>tests/TestModules/HRewrite_doubleRHS.py<gh_stars>1-10
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
from uuid import UUID
class HRewrite_doubleRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HRewrite_doubleRHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HRewrite_doubleRHS, self).__init__(name='HRewrite_doubleRHS', num_nodes=8, edges=[])
# Add the edges
self.add_edges([(5, 0), (0, 3), (2, 1), (1, 4), (5, 6), (6, 2), (3, 7), (7, 4)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_post__PoliceStationMM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_action__"] = """#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
"""
self["name"] = """"""
self["GUID__"] = UUID('f56de1af-2618-4042-976f-cfec4b679461')
# Set the node attributes
self.vs[0]["MT_post__associationType"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[0]["MT_label__"] = """21"""
self.vs[0]["mm__"] = """MT_post__directLink_T"""
self.vs[0]["GUID__"] = UUID('8e2e875e-5a87-4e9f-b693-49caf2163a72')
self.vs[1]["MT_post__associationType"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[1]["MT_label__"] = """20"""
self.vs[1]["mm__"] = """MT_post__directLink_S"""
self.vs[1]["GUID__"] = UUID('908923fe-d1e4-490e-917f-63deb691137d')
self.vs[2]["MT_post__cardinality"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["MT_label__"] = """1"""
self.vs[2]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["mm__"] = """MT_post__Station_S"""
self.vs[2]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[2]["GUID__"] = UUID('c83a44af-d763-4c3a-bb08-92094a9799b1')
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[3]["mm__"] = """MT_post__Male_T"""
self.vs[3]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[3]["GUID__"] = UUID('f44e7096-3756-4211-973c-03940594b3e8')
self.vs[4]["MT_post__cardinality"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[4]["MT_label__"] = """2"""
self.vs[4]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[4]["mm__"] = """MT_post__Male_S"""
self.vs[4]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[4]["GUID__"] = UUID('3908a186-6a4b-46f7-ac5e-5fcb1ad25907')
self.vs[5]["MT_label__"] = """3"""
self.vs[5]["MT_post__name"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[5]["mm__"] = """MT_post__Station_T"""
self.vs[5]["MT_post__classtype"] = """
#===============================================================================
# You can access the value of the current node's attribute value by: attr_value.
# If the current node shall be created you MUST initialize it here!
# You can access a node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# Note that the attribute values are those before the match is rewritten.
# The order in which this code is executed depends on the label value
# of the encapsulating node.
# The given action must return the new value of the attribute.
#===============================================================================
return attr_value
"""
self.vs[5]["GUID__"] = UUID('94b0d832-532a-4cc0-bf6f-0604e1e8fb32')
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["mm__"] = """MT_post__backward_link"""
self.vs[6]["GUID__"] = UUID('dcc5f2d3-1a53-4612-8afc-288adca8220a')
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["mm__"] = """MT_post__backward_link"""
self.vs[7]["GUID__"] = UUID('b0a20cff-2986-403a-8f02-07af3780c840')
from HRewrite_doubleLHS import HRewrite_doubleLHS
self.pre = HRewrite_doubleLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You | |
weights in network
- options are core, accessory or euclidean distance
Returns:
processed_weights (list)
Edge weights
"""
processed_weights = []
if weights_type is not None and distMat is not None:
# Check weights type is valid
if weights_type not in accepted_weights_types:
sys.stderr.write("Unable to calculate distance type " + str(weights_type) + "; "
"accepted types are " + str(accepted_weights_types) + "\n")
if weights_type == 'euclidean':
processed_weights = np.linalg.norm(distMat, axis = 1).tolist()
elif weights_type == 'core':
processed_weights = distMat[:, 0].tolist()
elif weights_type == 'accessory':
processed_weights = distMat[:, 1].tolist()
else:
sys.stderr.write('Require distance matrix to calculate distances\n')
return processed_weights
def process_previous_network(previous_network = None, adding_qq_dists = False, old_ids = None,
previous_pkl = None, vertex_labels = None, weights = False, use_gpu = False):
"""Extract edge types from an existing network
Args:
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
ordered based on the original network construction
vertex_labels (list)
Ordered list of sequence labels
weights (bool)
Whether weights should be extracted from the previous network
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
extra_sources (list)
List of source node identifiers
extra_targets (list)
List of destination node identifiers
extra_weights (list or None)
List of edge weights
"""
if previous_pkl is not None or old_ids is not None:
if weights:
# Extract from network
extra_sources, extra_targets, extra_weights = network_to_edges(previous_network,
vertex_labels,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
weights = True,
use_gpu = use_gpu)
else:
# Extract from network
extra_sources, extra_targets = network_to_edges(previous_network,
vertex_labels,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
weights = False,
use_gpu = use_gpu)
extra_weights = None
else:
sys.stderr.write('A distance pkl corresponding to ' + previous_pkl + ' is required for loading\n')
sys.exit(1)
return extra_sources, extra_targets, extra_weights
def construct_network_from_edge_list(rlist,
qlist,
edge_list,
weights = None,
distMat = None,
previous_network = None,
adding_qq_dists = False,
old_ids = None,
previous_pkl = None,
betweenness_sample = betweenness_sample_default,
summarise = True,
use_gpu = False):
"""Construct an undirected network using a data frame of edges. Nodes are samples and
edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
G_df (cudf or pandas data frame)
Data frame in which the first two columns are the nodes linked by edges
weights (list)
List of edge weights
distMat (2 column ndarray)
Numpy array of pairwise distances
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or the already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# data structures
vertex_labels, self_comparison = initial_graph_properties(rlist, qlist)
# Create new network
if use_gpu:
# benchmarking concurs with https://stackoverflow.com/questions/55922162/recommended-cudf-dataframe-construction
if len(edge_list) > 1:
edge_array = cp.array(edge_list, dtype = np.int32)
edge_gpu_matrix = cuda.to_device(edge_array)
G_df = cudf.DataFrame(edge_gpu_matrix, columns = ['source','destination'])
else:
# Cannot generate an array when one edge
G_df = cudf.DataFrame(columns = ['source','destination'])
G_df['source'] = [edge_list[0][0]]
G_df['destination'] = [edge_list[0][1]]
if weights is not None:
G_df['weights'] = weights
G = construct_network_from_df(rlist, qlist, G_df,
weights = (weights is not None),
distMat = distMat,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_network = previous_network,
previous_pkl = previous_pkl,
summarise = False,
use_gpu = use_gpu)
else:
# Load previous network
if previous_network is not None:
extra_sources, extra_targets, extra_weights = \
process_previous_network(previous_network = previous_network,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
vertex_labels = vertex_labels,
weights = (weights is not None),
use_gpu = use_gpu)
# Construct list of tuples for graph-tool
# Include information from previous graph if supplied
if weights is not None:
weighted_edges = []
for ((src, dest), weight) in zip(edge_list, weights):
weighted_edges.append((src, dest, weight))
if previous_network is not None:
for (src, dest, weight) in zip(extra_sources, extra_targets, extra_weights):
weighted_edges.append((src, dest, weight))
edge_list = weighted_edges
else:
if previous_network is not None:
for (src, dest) in zip(extra_sources, extra_targets):
edge_list.append((src, dest))
# build the graph
G = gt.Graph(directed = False)
G.add_vertex(len(vertex_labels))
if weights is not None:
eweight = G.new_ep("float")
G.add_edge_list(edge_list, eprops = [eweight])
G.edge_properties["weight"] = eweight
else:
G.add_edge_list(edge_list)
if summarise:
print_network_summary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
return G
def construct_network_from_df(rlist,
qlist,
G_df,
weights = False,
distMat = None,
previous_network = None,
adding_qq_dists = False,
old_ids = None,
previous_pkl = None,
betweenness_sample = betweenness_sample_default,
summarise = True,
use_gpu = False):
"""Construct an undirected network using a data frame of edges. Nodes are samples and
edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
G_df (cudf or pandas data frame)
Data frame in which the first two columns are the nodes linked by edges
weights (bool)
Whether weights in the G_df data frame should be included in the network
distMat (2 column ndarray)
Numpy array of pairwise distances
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or the already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# data structures
vertex_labels, self_comparison = initial_graph_properties(rlist, qlist)
# Check df format is correct
if weights:
G_df.columns = ['source','destination','weights']
else:
G_df.columns = ['source','destination']
# Load previous network
if previous_network is not None:
extra_sources, extra_targets, extra_weights = process_previous_network(previous_network = previous_network,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
vertex_labels = vertex_labels,
weights = weights,
use_gpu = use_gpu)
if use_gpu:
G_extra_df = cudf.DataFrame()
else:
G_extra_df = pd.DataFrame()
G_extra_df['source'] = extra_sources
G_extra_df['destination'] = extra_targets
if extra_weights is not None:
G_extra_df['weights'] = extra_weights
G_df = cudf.concat([G_df,G_extra_df], ignore_index = True)
if use_gpu:
# direct conversion
# ensure the highest-integer node is included in the edge list
# by adding a self-loop if necessary; see https://github.com/rapidsai/cugraph/issues/1206
max_in_vertex_labels = len(vertex_labels)-1
use_weights = False
if weights:
use_weights = True
G = add_self_loop(G_df, max_in_vertex_labels, weights = use_weights, renumber = False)
else:
# Convert bool to list of weights or None
if weights:
weights = G_df['weights']
else:
weights = None
# Convert data frame to list of | |
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EcommerceStore(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'list_id': 'str',
'name': 'str',
'platform': 'str',
'domain': 'str',
'is_syncing': 'bool',
'email_address': 'str',
'currency_code': 'str',
'money_format': 'str',
'primary_locale': 'str',
'timezone': 'str',
'phone': 'str',
'address': 'Address1',
'connected_site': 'ConnectedSite2',
'automations': 'Automations',
'list_is_active': 'bool',
'created_at': 'datetime',
'updated_at': 'datetime',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'list_id': 'list_id',
'name': 'name',
'platform': 'platform',
'domain': 'domain',
'is_syncing': 'is_syncing',
'email_address': 'email_address',
'currency_code': 'currency_code',
'money_format': 'money_format',
'primary_locale': 'primary_locale',
'timezone': 'timezone',
'phone': 'phone',
'address': 'address',
'connected_site': 'connected_site',
'automations': 'automations',
'list_is_active': 'list_is_active',
'created_at': 'created_at',
'updated_at': 'updated_at',
'links': '_links'
}
def __init__(self, id=None, list_id=None, name=None, platform=None, domain=None, is_syncing=None, email_address=None, currency_code=None, money_format=None, primary_locale=None, timezone=None, phone=None, address=None, connected_site=None, automations=None, list_is_active=None, created_at=None, updated_at=None, links=None): # noqa: E501
"""EcommerceStore - a model defined in Swagger""" # noqa: E501
self._id = None
self._list_id = None
self._name = None
self._platform = None
self._domain = None
self._is_syncing = None
self._email_address = None
self._currency_code = None
self._money_format = None
self._primary_locale = None
self._timezone = None
self._phone = None
self._address = None
self._connected_site = None
self._automations = None
self._list_is_active = None
self._created_at = None
self._updated_at = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if list_id is not None:
self.list_id = list_id
if name is not None:
self.name = name
if platform is not None:
self.platform = platform
if domain is not None:
self.domain = domain
if is_syncing is not None:
self.is_syncing = is_syncing
if email_address is not None:
self.email_address = email_address
if currency_code is not None:
self.currency_code = currency_code
if money_format is not None:
self.money_format = money_format
if primary_locale is not None:
self.primary_locale = primary_locale
if timezone is not None:
self.timezone = timezone
if phone is not None:
self.phone = phone
if address is not None:
self.address = address
if connected_site is not None:
self.connected_site = connected_site
if automations is not None:
self.automations = automations
if list_is_active is not None:
self.list_is_active = list_is_active
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this EcommerceStore. # noqa: E501
The unique identifier for the store. # noqa: E501
:return: The id of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EcommerceStore.
The unique identifier for the store. # noqa: E501
:param id: The id of this EcommerceStore. # noqa: E501
:type: str
"""
self._id = id
@property
def list_id(self):
"""Gets the list_id of this EcommerceStore. # noqa: E501
The unique identifier for the list that's associated with the store. The `list_id` for a specific store can't change. # noqa: E501
:return: The list_id of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this EcommerceStore.
The unique identifier for the list that's associated with the store. The `list_id` for a specific store can't change. # noqa: E501
:param list_id: The list_id of this EcommerceStore. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def name(self):
"""Gets the name of this EcommerceStore. # noqa: E501
The name of the store. # noqa: E501
:return: The name of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EcommerceStore.
The name of the store. # noqa: E501
:param name: The name of this EcommerceStore. # noqa: E501
:type: str
"""
self._name = name
@property
def platform(self):
"""Gets the platform of this EcommerceStore. # noqa: E501
The e-commerce platform of the store. # noqa: E501
:return: The platform of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this EcommerceStore.
The e-commerce platform of the store. # noqa: E501
:param platform: The platform of this EcommerceStore. # noqa: E501
:type: str
"""
self._platform = platform
@property
def domain(self):
"""Gets the domain of this EcommerceStore. # noqa: E501
The store domain. The store domain must be unique within a user account. # noqa: E501
:return: The domain of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this EcommerceStore.
The store domain. The store domain must be unique within a user account. # noqa: E501
:param domain: The domain of this EcommerceStore. # noqa: E501
:type: str
"""
self._domain = domain
@property
def is_syncing(self):
"""Gets the is_syncing of this EcommerceStore. # noqa: E501
Whether to disable automations because the store is currently [syncing](https://mailchimp.com/developer/marketing/docs/e-commerce/#pausing-store-automations). # noqa: E501
:return: The is_syncing of this EcommerceStore. # noqa: E501
:rtype: bool
"""
return self._is_syncing
@is_syncing.setter
def is_syncing(self, is_syncing):
"""Sets the is_syncing of this EcommerceStore.
Whether to disable automations because the store is currently [syncing](https://mailchimp.com/developer/marketing/docs/e-commerce/#pausing-store-automations). # noqa: E501
:param is_syncing: The is_syncing of this EcommerceStore. # noqa: E501
:type: bool
"""
self._is_syncing = is_syncing
@property
def email_address(self):
"""Gets the email_address of this EcommerceStore. # noqa: E501
The email address for the store. # noqa: E501
:return: The email_address of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this EcommerceStore.
The email address for the store. # noqa: E501
:param email_address: The email_address of this EcommerceStore. # noqa: E501
:type: str
"""
self._email_address = email_address
@property
def currency_code(self):
"""Gets the currency_code of this EcommerceStore. # noqa: E501
The three-letter ISO 4217 code for the currency that the store accepts. # noqa: E501
:return: The currency_code of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this EcommerceStore.
The three-letter ISO 4217 code for the currency that the store accepts. # noqa: E501
:param currency_code: The currency_code of this EcommerceStore. # noqa: E501
:type: str
"""
self._currency_code = currency_code
@property
def money_format(self):
"""Gets the money_format of this EcommerceStore. # noqa: E501
The currency format for the store. For example: `$`, `£`, etc. # noqa: E501
:return: The money_format of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._money_format
@money_format.setter
def money_format(self, money_format):
"""Sets the money_format of this EcommerceStore.
The currency format for the store. For example: `$`, `£`, etc. # noqa: E501
:param money_format: The money_format of this EcommerceStore. # noqa: E501
:type: str
"""
self._money_format = money_format
@property
def primary_locale(self):
"""Gets the primary_locale of this EcommerceStore. # noqa: E501
The primary locale for the store. For example: `en`, `de`, etc. # noqa: E501
:return: The primary_locale of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._primary_locale
@primary_locale.setter
def primary_locale(self, primary_locale):
"""Sets the primary_locale of this EcommerceStore.
The primary locale for the store. For example: `en`, `de`, etc. # noqa: E501
:param primary_locale: The primary_locale of this EcommerceStore. # noqa: E501
:type: str
"""
self._primary_locale = primary_locale
@property
def timezone(self):
"""Gets the timezone of this EcommerceStore. # noqa: E501
The timezone for the store. # noqa: E501
:return: The timezone of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
"""Sets the timezone of this EcommerceStore.
The timezone for the store. # noqa: E501
:param timezone: The timezone of this EcommerceStore. # noqa: E501
:type: str
"""
self._timezone = timezone
@property
def phone(self):
"""Gets the phone of this EcommerceStore. # noqa: E501
The store phone number. # noqa: E501
:return: The | |
<gh_stars>1-10
import re
import logging
import argparse
import ruamel.yaml
import pandas as pd
from python_tools.constants import VARIANTS_INPUTS, SV_INPUTS, VERSION_PARAM
from python_tools.util import (
find_bams_in_directory,
include_yaml_resources,
include_version_info,
create_yaml_file_objects,
extract_sample_id_from_bam_path
)
##########
# Pipeline Inputs generation for the ACCESS-Variants pipeline
#
# Todo:
# - better way to ensure proper sort order of samples
# - combine this with create_ scripts
# - singularity
#
# Usage:
#
# generate_access_variants_inputs \
# -pn \
# Variant_Calling_Project \
# -o \
# inputs.yaml \
# -dn default_hi_depth_normal__cl_aln_srt_MD_IR_FX_BR.bam \
# -p \
# ./test_pairs.tsv \
# -tb \
# ~/PROJECT_tumor_bams/duplex_bams \
# -nb \
# ~/PROJECT_normal_bams/duplex_bams \
# -sb \
# ~/PROJECT_normal_bams/simplex_bams \
# -cbd \
# ~/ACCESSv1-VAL-20180003_curated_bams \
# -cbs \
# ~/ACCESSv1-VAL-20180003_curated_bams_simplex
# -m
# Regex for finding bam files
BAM_REGEX = re.compile('.*\.bam')
# Delimiter for printing logs
DELIMITER = '\n' + '*' * 20 + '\n'
# Delimiter for inputs file sections
INPUTS_FILE_DELIMITER = '\n\n' + '# ' + '--' * 30 + '\n\n'
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
logger = logging.getLogger('access_variants_pipeline_kickoff')
def parse_arguments():
"""
Parse arguments for Variant calling pipeline inputs generation
:return: argparse.ArgumentParser object
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-o',
'--output_file_name',
help='Filename for yaml file to be used as pipeline inputs',
required=True
)
parser.add_argument(
'-pn',
'--project_name',
help='Project name for this run',
required=True
)
parser.add_argument(
'-m',
'--matched_mode',
action='store_true',
help='Create inputs from matched T/N pairs (True), or use default Normal (False)',
required=False
)
parser.add_argument(
'-p',
'--pairing_file_path',
help='tsv file with tumor sample IDs mapped to normal sample IDs',
required=False
)
parser.add_argument(
'-dn',
'--default_normal_path',
help='Normal used in unmatched mode, or in matched mode if no matching normal found for tumor sample',
required=True
)
parser.add_argument(
'-tb',
'--tumor_bams_directory',
help='Directory that contains all tumor bams to be used in variant calling',
required=True
)
parser.add_argument(
'-nb',
'--normal_bams_directory',
help='Directory that contains all normal bams to be used in variant calling and genotyping '
'(if using matched mode, otherwise only used for genotyping)',
required=False
)
parser.add_argument(
'-sb',
'--simplex_bams_directory',
help='Directory that contains additional simplex bams to be used for genotyping',
required=True
)
# Note: For ACCESS, we will often genotype from the same folders of curated bams
parser.add_argument(
'-cbd',
'--curated_bams_duplex_directory',
help='Directory that contains additional duplex curated bams to be used for genotyping',
required=True
)
parser.add_argument(
'-cbs',
'--curated_bams_simplex_directory',
help='Directory that contains additional simplex curated bams to be used for genotyping',
required=True
)
parser.add_argument(
'-stdb',
'--standard_bams_directory',
help='If you would like SV calling, this is the directory that contains standard bams to be paired with the \
default normal. Note: This argument is to be paired with the ACCESS_Variants.cwl workflow.',
required=False
)
parser.add_argument(
'-dstdn',
'--default_stdnormal_path',
help='Normal used in unmatched mode for structural variant calling',
required=False
)
args = parser.parse_args()
return args
def validate_pairing_file(pairing_file, tumor_samples, normal_samples):
"""
Validate T/N pairs
1. We allow normal_id to be blank in pairing file
2. If normal_id is not blank, and id is not found in `normal_samples`, raise error
3. Tumor ID can never be blank
4. Tumor ID must be found in tumor_samples
5. If both are found, continue
:param pairing_file:
:param tumor_samples: str[] of tumor bam files paths
:param normal_samples: str[] of normal bam files paths
:return:
"""
for i, tn_pair in pairing_file.iterrows():
tumor_id = tn_pair['tumor_id']
normal_id = tn_pair['normal_id']
assert tumor_id, 'Missing tumor sample ID in pairing file'
# Find the path to the bam that contains this tumor sample ID
tumor_sample = filter(lambda t: tumor_id in t , tumor_samples)
assert len(tumor_sample) == 1, 'Incorrect # of matches for tumor sample {}'.format(tumor_sample)
if normal_id and normal_id != '':
normal_sample = filter(lambda n: normal_id in n, normal_samples)
assert len(normal_sample) == 1, 'Incorrect # of matches ({}) for paired normal for tumor sample {}'.format(len(normal_sample), tumor_sample)
def parse_tumor_normal_pairing(pairing_file, tumor_samples, normal_samples, default_normal_path):
"""
Build tumor-normal pairs from pairing file and tumor / normal bam directories.
Default to `default_normal_path` if matched normal not found.
:param path:
:return:
"""
ordered_tumor_samples = []
ordered_normal_samples = []
ordered_fillout_samples = []
# This flag will prevent us from trying to genotype the default normal more than once
default_added_for_genotyping = False
for i, tn_pair in pairing_file.iterrows():
tumor_id = tn_pair['tumor_id']
normal_id = tn_pair['normal_id']
# Find the path to the bam that contains this tumor sample ID
# (after pairing file validation this should return exactly 1 result)
tumor_sample = filter(lambda t: tumor_id in t, tumor_samples)[0]
# Leaving the normal ID blank will cause the default normal to be used
# Only tumor is used for genotyping
if normal_id == '':
ordered_tumor_samples.append(tumor_sample)
ordered_normal_samples.append(default_normal_path)
ordered_fillout_samples.append(tumor_sample)
if not default_added_for_genotyping:
ordered_fillout_samples.append(default_normal_path)
default_added_for_genotyping = True
# Use the matching normal bam that contains this normal sample ID
# Both samples are added for genotyping
elif any(normal_id in n for n in normal_samples):
matching_normal_samples = filter(lambda n: normal_id in n, normal_samples)
if len(matching_normal_samples) > 1:
# If we have multiple matches for this normal sample ID, make sure that they are exactly the same,
# to avoid the following case: Sample_1 != Sample_1A
assert all([all([x == y for x in matching_normal_samples]) for y in matching_normal_samples])
normal_sample = matching_normal_samples[0]
ordered_tumor_samples.append(tumor_sample)
ordered_normal_samples.append(normal_sample)
ordered_fillout_samples.append(tumor_sample)
# Only genotype each normal once, even if it is paired with multiple tumors
if not normal_sample in ordered_fillout_samples:
ordered_fillout_samples.append(normal_sample)
return ordered_tumor_samples, ordered_normal_samples, ordered_fillout_samples
def create_inputs_file(args):
"""
Create the inputs.yaml file for the ACCESS Variant calling pipeline (modules 3 + 4)
:param args: argparse.ArgumentParser object
"""
validate_args(args)
tumor_bam_paths = find_bams_in_directory(args.tumor_bams_directory)
simplex_bam_paths = find_bams_in_directory(args.simplex_bams_directory)
curated_bam_duplex_paths = find_bams_in_directory(args.curated_bams_duplex_directory)
curated_bam_simplex_paths = find_bams_in_directory(args.curated_bams_simplex_directory)
# Normal bams paths are either from the bams directory, or repeating the default normal
# Todo: remove! this logic should be based on the args.matched_mode param
if args.normal_bams_directory:
normal_bam_paths = find_bams_in_directory(args.normal_bams_directory)
else:
normal_bam_paths = [args.default_normal_path] * len(tumor_bam_paths)
fh = open(args.output_file_name, 'w')
write_yaml_bams(
fh,
args,
tumor_bam_paths,
normal_bam_paths,
simplex_bam_paths,
curated_bam_duplex_paths,
curated_bam_simplex_paths,
)
include_yaml_resources(fh, VARIANTS_INPUTS)
if args.standard_bams_directory:
include_sv_inputs(args, fh)
fh.write('project_name: {}'.format(args.project_name))
fh.write(INPUTS_FILE_DELIMITER)
try:
include_yaml_resources(fh, VERSION_PARAM)
except IOError:
# that is if version.yaml is absent
fh.write(INPUTS_FILE_DELIMITER)
fh.write("# Pipeline Run Version:\n")
include_version_info(fh)
fh.close()
def write_yaml_bams(
fh,
args,
tumor_bam_paths,
normal_bam_paths,
simplex_bam_paths,
curated_bam_duplex_paths,
curated_bam_simplex_paths
):
"""
Write the lists of tumor, normal, and genotyping bams to the inputs file, along with their sample IDs
Todo: clean this up a bit
:param fh: inputs file file handle
:param args: argparse.ArgumentParser object with bam directory attribute
:return:
"""
# 1. Build lists of bams
if args.pairing_file_path:
pairing_file = pd.read_csv(args.pairing_file_path, sep='\t', header='infer').fillna('')
# Filter simplex bams to only those needed from pairing file
tumor_bam_ids = pairing_file['tumor_id']
simplex_bam_paths = [s for s in simplex_bam_paths if any([id in s for id in tumor_bam_ids])]
validate_pairing_file(pairing_file, tumor_bam_paths, normal_bam_paths)
ordered_tumor_bams, ordered_normal_bams, ordered_tn_genotyping_bams = parse_tumor_normal_pairing(
pairing_file,
tumor_bam_paths,
normal_bam_paths,
args.default_normal_path
)
if not args.matched_mode:
# If we aren't in matched mode, do variant calling with default normal
# (pairing file is only used for genotyping)
ordered_normal_bams = [args.default_normal_path] * len(ordered_tumor_bams)
# Todo: Need to genotype default normal?
# ordered_tn_genotyping_bams = ordered_tn_genotyping_bams + [args.default_normal_path]
matched_normal_ids = [n for n in pairing_file['normal_id']]
matched_normal_ids = [correct_sample_id(n, normal_bam_paths) if n else '' for n in matched_normal_ids]
else:
# In unmatched mode, the sample pairing is much simpler (just use the supplied default normal)
ordered_tumor_bams = tumor_bam_paths
ordered_normal_bams = [args.default_normal_path] * len(tumor_bam_paths)
# Only add the default normal once
ordered_tn_genotyping_bams = ordered_tumor_bams + [args.default_normal_path]
matched_normal_ids = [''] * len(ordered_tumor_bams)
# 2. Build lists of Sample IDs
if args.matched_mode:
# Use pairing file in matched mode
tumor_sample_ids = [correct_sample_id(t, ordered_tumor_bams) for t in pairing_file['tumor_id']]
normal_sample_ids = [n if n else extract_sample_id_from_bam_path(args.default_normal_path) for n in pairing_file['normal_id']]
elif args.pairing_file_path:
# Use pairing file in matched mode
tumor_sample_ids = [correct_sample_id(t, ordered_tumor_bams) for t in pairing_file['tumor_id']]
normal_sample_ids = [extract_sample_id_from_bam_path(args.default_normal_path)] * len(tumor_sample_ids)
else:
# Otherwise use default normal
tumor_sample_ids = [extract_sample_id_from_bam_path(b) for b in tumor_bam_paths]
normal_sample_ids = [extract_sample_id_from_bam_path(args.default_normal_path)] * len(tumor_sample_ids)
# 3. Convert bam paths to CWL File objects
tumor_bams = create_yaml_file_objects(ordered_tumor_bams)
normal_bams = create_yaml_file_objects(ordered_normal_bams)
tn_genotyping_bams = create_yaml_file_objects(ordered_tn_genotyping_bams)
simplex_genotyping_bams = create_yaml_file_objects(simplex_bam_paths)
curated_duplex_genotyping_bams = create_yaml_file_objects(curated_bam_duplex_paths)
curated_simplex_genotyping_bams = create_yaml_file_objects(curated_bam_simplex_paths)
# 4. Genotyping sample IDs must be extracted from the bams themselves
merged_tn_sample_ids = [extract_sample_id_from_bam_path(b['path']) for b in tn_genotyping_bams]
simplex_genotyping_ids = [extract_sample_id_from_bam_path(b['path']) + '-SIMPLEX' for b in simplex_genotyping_bams]
curated_duplex_genotyping_ids = [extract_sample_id_from_bam_path(b['path']) + '-CURATED-DUPLEX' for b in curated_duplex_genotyping_bams]
curated_simplex_genotyping_ids = [extract_sample_id_from_bam_path(b['path']) + '-CURATED-SIMPLEX' for b in curated_simplex_genotyping_bams]
genotyping_bams = tn_genotyping_bams + simplex_genotyping_bams + curated_duplex_genotyping_bams + curated_simplex_genotyping_bams
genotyping_bams_ids | |
<filename>tests/macsec/test_macsec.py<gh_stars>0
from time import sleep
import pytest
import logging
import re
import scapy.all as scapy
import ptf.testutils as testutils
from tests.common.utilities import wait_until
from tests.common.devices.eos import EosHost
from macsec_helper import *
from macsec_config_helper import *
from macsec_platform_helper import *
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.macsec_required,
pytest.mark.topology("t0", "t2"),
]
class TestControlPlane():
def test_wpa_supplicant_processes(self, duthost, ctrl_links):
def _test_wpa_supplicant_processes():
for port_name, nbr in ctrl_links.items():
check_wpa_supplicant_process(duthost, port_name)
if isinstance(nbr["host"], EosHost):
continue
check_wpa_supplicant_process(nbr["host"], nbr["port"])
return True
assert wait_until(300, 1, 1, _test_wpa_supplicant_processes)
def test_appl_db(self, duthost, ctrl_links, policy, cipher_suite, send_sci):
def _test_appl_db():
for port_name, nbr in ctrl_links.items():
if isinstance(nbr["host"], EosHost):
continue
check_appl_db(duthost, port_name, nbr["host"],
nbr["port"], policy, cipher_suite, send_sci)
return True
assert wait_until(300, 6, 12, _test_appl_db)
def test_mka_session(self, duthost, ctrl_links, policy, cipher_suite, send_sci):
def _test_mka_session():
# If the DUT isn't a virtual switch that cannot support "get mka session" by "ip macsec show"
# So, skip this test for physical switch
# TODO: Support "get mka session" in the physical switch
if u"x86_64-kvm_x86_64" not in get_platform(duthost):
# TODO: add check mka session later, now wait some time for session ready
sleep(30)
logging.info(
"Skip to check mka session due to the DUT isn't a virtual switch")
return True
dut_mka_session = get_mka_session(duthost)
assert len(dut_mka_session) == len(ctrl_links)
for port_name, nbr in ctrl_links.items():
if isinstance(nbr["host"], EosHost):
assert nbr["host"].iface_macsec_ok(nbr["port"])
continue
nbr_mka_session = get_mka_session(nbr["host"])
dut_macsec_port = get_macsec_ifname(duthost, port_name)
nbr_macsec_port = get_macsec_ifname(
nbr["host"], nbr["port"])
dut_macaddress = duthost.get_dut_iface_mac(port_name)
nbr_macaddress = nbr["host"].get_dut_iface_mac(nbr["port"])
dut_sci = get_sci(dut_macaddress, order="host")
nbr_sci = get_sci(nbr_macaddress, order="host")
check_mka_session(dut_mka_session[dut_macsec_port], dut_sci,
nbr_mka_session[nbr_macsec_port], nbr_sci,
policy, cipher_suite, send_sci)
return True
assert wait_until(300, 5, 3, _test_mka_session)
def test_rekey_by_period(self, duthost, ctrl_links, upstream_links, rekey_period):
if rekey_period == 0:
pytest.skip("If the rekey period is 0 which means rekey by period isn't active.")
assert len(ctrl_links) > 0
# Only pick one link to test
port_name, nbr = ctrl_links.items()[0]
_, _, _, last_dut_egress_sa_table, last_dut_ingress_sa_table = get_appl_db(
duthost, port_name, nbr["host"], nbr["port"])
up_link = upstream_links[port_name]
output = duthost.command("ping {} -w {} -q -i 0.1".format(up_link["local_ipv4_addr"], rekey_period * 2))["stdout_lines"]
_, _, _, new_dut_egress_sa_table, new_dut_ingress_sa_table = get_appl_db(
duthost, port_name, nbr["host"], nbr["port"])
assert last_dut_egress_sa_table != new_dut_egress_sa_table
assert last_dut_ingress_sa_table != new_dut_ingress_sa_table
assert float(re.search(r"([\d\.]+)% packet loss", output[-2]).group(1)) < 1.0
class TestDataPlane():
BATCH_COUNT = 10
def test_server_to_neighbor(self, duthost, ctrl_links, downstream_links, upstream_links, ptfadapter):
ptfadapter.dataplane.set_qlen(TestDataPlane.BATCH_COUNT * 10)
down_link = downstream_links.values()[0]
dut_macaddress = duthost.get_dut_iface_mac(ctrl_links.keys()[0])
setattr(ptfadapter, "force_reload_macsec", True)
for portchannel in get_portchannel(duthost).values():
members = portchannel["members"]
if not members:
continue
is_protected_link = members[0] in ctrl_links
peer_ports = []
ptf_injected_ports = []
for port_name in members:
if is_protected_link:
assert port_name in ctrl_links
peer_ports.append(
int(re.search(r"(\d+)", ctrl_links[port_name]["port"]).group(1)))
ptf_injected_ports.append(
upstream_links[port_name]["ptf_port_id"])
else:
assert port_name not in ctrl_links
if not is_protected_link:
continue
up_link = upstream_links[members[0]]
up_host_name = up_link["name"]
up_host_ip = up_link["local_ipv4_addr"]
payload = "{} -> {}".format(down_link["name"], up_host_name)
logging.info(payload)
# Source mac address is not useful in this test case and we use an arbitrary mac address as the source
pkt = create_pkt(
"00:01:02:03:04:05", dut_macaddress, "1.2.3.4", up_host_ip, bytes(payload))
exp_pkt = create_exp_pkt(pkt, pkt[scapy.IP].ttl - 1)
fail_message = ""
for port_name in members:
up_link = upstream_links[port_name]
testutils.send_packet(
ptfadapter, down_link["ptf_port_id"], pkt, TestDataPlane.BATCH_COUNT)
result = check_macsec_pkt(test=ptfadapter,
ptf_port_id=up_link["ptf_port_id"], exp_pkt=exp_pkt, timeout=3)
if result is None:
return
fail_message += result
pytest.fail(fail_message)
def test_dut_to_neighbor(self, duthost, ctrl_links, upstream_links):
for up_port, up_link in upstream_links.items():
ret = duthost.command(
"ping -c {} {}".format(4, up_link['local_ipv4_addr']))
assert not ret['failed']
def test_neighbor_to_neighbor(self, duthost, ctrl_links, upstream_links, nbr_device_numbers):
portchannels = get_portchannel(duthost).values()
for i in range(len(portchannels)):
assert portchannels[i]["members"]
requester = upstream_links[portchannels[i]["members"][0]]
# Set DUT as the gateway of requester
requester["host"].shell("ip route add 0.0.0.0/0 via {}".format(
requester["peer_ipv4_addr"]), module_ignore_errors=True)
for j in range(i + 1, len(portchannels)):
if portchannels[i]["members"][0] not in ctrl_links and portchannels[j]["members"][0] not in ctrl_links:
continue
responser = upstream_links[portchannels[j]["members"][0]]
# Set DUT as the gateway of responser
responser["host"].shell("ip route add 0.0.0.0/0 via {}".format(
responser["peer_ipv4_addr"]), module_ignore_errors=True)
# Ping from requester to responser
assert not requester["host"].shell(
"ping -c 6 -v {}".format(responser["local_ipv4_addr"]))["failed"]
responser["host"].shell("ip route del 0.0.0.0/0 via {}".format(
responser["peer_ipv4_addr"]), module_ignore_errors=True)
requester["host"].shell("ip route del 0.0.0.0/0 via {}".format(
requester["peer_ipv4_addr"]), module_ignore_errors=True)
class TestFaultHandling():
MKA_TIMEOUT = 6
LACP_TIMEOUT = 90
def test_link_flap(self, duthost, ctrl_links):
# Only pick one link for link flap test
assert ctrl_links
port_name, nbr = ctrl_links.items()[0]
nbr_eth_port = get_eth_ifname(
nbr["host"], nbr["port"])
_, _, _, dut_egress_sa_table_orig, dut_ingress_sa_table_orig = get_appl_db(
duthost, port_name, nbr["host"], nbr["port"])
# Flap < 6 seconds
# Not working on eos neighbour
if not isinstance(nbr["host"], EosHost):
# Rekey may happen during the following assertions, so we need to get the SA tables again
retry = 3
while retry > 0:
retry -= 1
try:
nbr["host"].shell("ifconfig {} down && sleep 1 && ifconfig {} up".format(
nbr_eth_port, nbr_eth_port))
_, _, _, dut_egress_sa_table_new, dut_ingress_sa_table_new = get_appl_db(
duthost, port_name, nbr["host"], nbr["port"])
assert dut_egress_sa_table_orig == dut_egress_sa_table_new
assert dut_ingress_sa_table_orig == dut_ingress_sa_table_new
break
except AssertionError as e:
if retry == 0:
raise e
dut_egress_sa_table_orig, dut_ingress_sa_table_orig = dut_egress_sa_table_new, dut_ingress_sa_table_new
# Flap > 6 seconds but < 90 seconds
if isinstance(nbr["host"], EosHost):
nbr["host"].shutdown(nbr_eth_port)
sleep(TestFaultHandling.MKA_TIMEOUT)
nbr["host"].no_shutdown(nbr_eth_port)
else:
nbr["host"].shell("ifconfig {} down && sleep {} && ifconfig {} up".format(
nbr_eth_port, TestFaultHandling.MKA_TIMEOUT, nbr_eth_port))
def check_new_mka_session():
_, _, _, dut_egress_sa_table_new, dut_ingress_sa_table_new = get_appl_db(
duthost, port_name, nbr["host"], nbr["port"])
assert dut_egress_sa_table_new
assert dut_ingress_sa_table_new
assert dut_egress_sa_table_orig != dut_egress_sa_table_new
assert dut_ingress_sa_table_orig != dut_ingress_sa_table_new
return True
assert wait_until(30, 5, 2, check_new_mka_session)
# Flap > 90 seconds
assert wait_until(12, 1, 0, lambda: find_portchannel_from_member(
port_name, get_portchannel(duthost))["status"] == "Up")
if isinstance(nbr["host"], EosHost):
nbr["host"].shutdown(nbr_eth_port)
sleep(TestFaultHandling.LACP_TIMEOUT)
else:
nbr["host"].shell("ifconfig {} down && sleep {}".format(
nbr_eth_port, TestFaultHandling.LACP_TIMEOUT))
assert wait_until(6, 1, 0, lambda: find_portchannel_from_member(
port_name, get_portchannel(duthost))["status"] == "Dw")
if isinstance(nbr["host"], EosHost):
nbr["host"].no_shutdown(nbr_eth_port)
else:
nbr["host"].shell("ifconfig {} up".format(nbr_eth_port))
assert wait_until(12, 1, 0, lambda: find_portchannel_from_member(
port_name, get_portchannel(duthost))["status"] == "Up")
def test_mismatch_macsec_configuration(self, duthost, unctrl_links,
profile_name, default_priority, cipher_suite,
primary_cak, primary_ckn, policy, send_sci, request):
# Only pick one uncontrolled link for mismatch macsec configuration test
assert unctrl_links
port_name, nbr = unctrl_links.items()[0]
disable_macsec_port(duthost, port_name)
disable_macsec_port(nbr["host"], nbr["port"])
delete_macsec_profile(nbr["host"], nbr["port"], profile_name)
# Set a wrong cak to the profile
primary_cak = "0" * len(primary_cak)
enable_macsec_port(duthost, port_name, profile_name)
set_macsec_profile(nbr["host"], nbr["port"], profile_name, default_priority,
cipher_suite, primary_cak, primary_ckn, policy, send_sci)
enable_macsec_port(nbr["host"], nbr["port"], profile_name)
def check_mka_establishment():
_, _, dut_ingress_sc_table, dut_egress_sa_table, dut_ingress_sa_table = get_appl_db(
duthost, port_name, nbr["host"], nbr["port"])
return dut_ingress_sc_table or dut_egress_sa_table or dut_ingress_sa_table
# The mka should be establishing or established
# To check whether the MKA establishment happened within 90 seconds
assert not wait_until(90, 1, 12, check_mka_establishment)
# Teardown
disable_macsec_port(duthost, port_name)
disable_macsec_port(nbr["host"], nbr["port"])
delete_macsec_profile(nbr["host"], nbr["port"], profile_name)
class TestInteropProtocol():
'''
Macsec interop with other protocols
'''
def test_port_channel(self, duthost, ctrl_links):
'''Verify lacp
'''
ctrl_port, _ = ctrl_links.items()[0]
pc = find_portchannel_from_member(ctrl_port, get_portchannel(duthost))
assert pc["status"] == "Up"
# Remove ethernet interface <ctrl_port> from PortChannel interface <pc>
duthost.command("sudo config portchannel member del {} {}".format(
pc["name"], ctrl_port))
assert wait_until(20, 1, 0, lambda: get_portchannel(
duthost)[pc["name"]]["status"] == "Dw")
# Add ethernet interface <ctrl_port> back to PortChannel interface <pc>
duthost.command("sudo config portchannel member add {} {}".format(
pc["name"], ctrl_port))
assert wait_until(20, 1, 0, lambda: find_portchannel_from_member(
ctrl_port, get_portchannel(duthost))["status"] == "Up")
def test_lldp(self, duthost, ctrl_links, profile_name):
'''Verify lldp
'''
LLDP_ADVERTISEMENT_INTERVAL = 30 # default interval in seconds
LLDP_HOLD_MULTIPLIER = 4 # default multiplier number
LLDP_TIMEOUT = LLDP_ADVERTISEMENT_INTERVAL * LLDP_HOLD_MULTIPLIER
# select one macsec link
for ctrl_port, nbr in ctrl_links.items():
assert wait_until(LLDP_TIMEOUT, LLDP_ADVERTISEMENT_INTERVAL, 0,
lambda: nbr["name"] in get_lldp_list(duthost))
disable_macsec_port(duthost, ctrl_port)
disable_macsec_port(nbr["host"], nbr["port"])
wait_until(20, 3, 0,
lambda: not duthost.iface_macsec_ok(ctrl_port) and
not nbr["host"].iface_macsec_ok(nbr["port"]))
assert wait_until(LLDP_TIMEOUT, LLDP_ADVERTISEMENT_INTERVAL, 0,
lambda: nbr["name"] in get_lldp_list(duthost))
enable_macsec_port(duthost, ctrl_port, profile_name)
enable_macsec_port(nbr["host"], nbr["port"], profile_name)
wait_until(20, 3, 0,
lambda: duthost.iface_macsec_ok(ctrl_port) and
nbr["host"].iface_macsec_ok(nbr["port"]))
assert wait_until(1, 1, LLDP_TIMEOUT,
lambda: nbr["name"] in get_lldp_list(duthost))
def test_bgp(self, duthost, ctrl_links, upstream_links, profile_name):
'''Verify BGP neighbourship
'''
bgp_config = duthost.get_running_config_facts()[
"BGP_NEIGHBOR"].values()[0]
BGP_KEEPALIVE = int(bgp_config["keepalive"])
BGP_HOLDTIME = int(bgp_config["holdtime"])
def check_bgp_established(up_link):
command = "sonic-db-cli STATE_DB HGETALL 'NEIGH_STATE_TABLE|{}'".format(
up_link["local_ipv4_addr"])
fact = sonic_db_cli(duthost, command)
logger.info("bgp state {}".format(fact))
return fact["state"] == "Established"
# Ensure the BGP sessions have been established
for ctrl_port in ctrl_links.keys():
assert wait_until(30, 5, 0,
check_bgp_established, upstream_links[ctrl_port])
# Check the BGP sessions are present after port macsec disabled
for ctrl_port, nbr in ctrl_links.items():
disable_macsec_port(duthost, ctrl_port)
disable_macsec_port(nbr["host"], nbr["port"])
wait_until(20, 3, 0,
lambda: not duthost.iface_macsec_ok(ctrl_port) and
not nbr["host"].iface_macsec_ok(nbr["port"]))
# BGP session should keep established even after holdtime
assert wait_until(BGP_HOLDTIME * 2, BGP_KEEPALIVE, BGP_HOLDTIME,
check_bgp_established, upstream_links[ctrl_port])
# Check the BGP sessions are present after port macsec enabled
for ctrl_port, nbr in ctrl_links.items():
enable_macsec_port(duthost, ctrl_port, profile_name)
enable_macsec_port(nbr["host"], nbr["port"], profile_name)
wait_until(20, 3, 0,
lambda: duthost.iface_macsec_ok(ctrl_port) and
nbr["host"].iface_macsec_ok(nbr["port"]))
# Wait PortChannel up, which might flap if having one port member
wait_until(20, | |
the weights corresponding to the movements
between regions. Edges must contain weight and delta_adjustment attributes (assumed 1.0)
:type graph: networkx.Digraph
:param currentState: The current state for every region.
:type currentState: A dictionary with the region as a key and the value is a dictionary of
states in the format {(age, state): number of individuals in this state}.
:param movementMultiplier: a multiplier applied to each edge (movement) in the network.
:type movementMultiplier: float
:param infectiousStates: States that are considered infectious
:type infectiousStates: list of strings
:param stochastic: Whether to run the model in a stochastic or deterministic mode
:type stochastic: bool
:param random_state: Random number generator used for the model
:type random_state: np.random.Generator, None
:return: the number of new infections in each region.
:rtype: A dictionary with the region as key and the number of new infections as the value.
"""
infectiousByNode: Dict[NodeName, float] = {}
totalByNode: Dict[NodeName, float] = {}
# Precompute this so that we avoid expensive calls inside the O(n^2) part of the algorithm as most as we can
for name, node in currentState.items():
infectiousByNode[name] = getTotalInfectious(node, infectiousStates)
totalByNode[name] = totalIndividuals(node)
contactsByNode: Dict[NodeName, float] = {}
# Surprisingly, iterating over graph.edges is actually slower than going through the dicts and calling
# graph.predecessor when needed
for receivingVertex in currentState:
totalSusceptHere = getTotalSuscept(currentState[receivingVertex])
contactsByNode[receivingVertex] = 0
if totalSusceptHere > 0:
for givingVertex in graph.predecessors(receivingVertex):
if givingVertex == receivingVertex:
continue
totalInfectedGiving = infectiousByNode[givingVertex]
if totalInfectedGiving > 0:
weight = getWeight(graph, givingVertex, receivingVertex, movementMultiplier)
fractionGivingInfected = totalInfectedGiving / totalByNode[givingVertex]
fractionReceivingSus = totalSusceptHere / totalByNode[receivingVertex]
contactsByNode[receivingVertex] += _computeInfectiousCommutes(
weight,
fractionGivingInfected,
fractionReceivingSus,
stochastic,
random_state
)
return contactsByNode
def _computeInfectiousCommutes(weight, fractionGivingInfected, fractionReceivingSus, stochastic, random_state):
"""Transforms the weights (commutes) into potentially infectious commutes, that
originate from infectious people, and target susceptible people. Two modes:
1) Deterministic
We multiply the commute by the proportion of infectious people in giving node,
and by the proportion of susceptible people in receiving age group
2) Stochastic
We assume commutes are randomly distributed across people.
We sample int(weight) from a binomial distribution with p=fractionReceivingSus.
This gives the number of commutes that target susceptible people. Then we sample
the result into another binomial distribution with p=fractionGivingInfected. This gives
the number of commutes that target susceptible people, and originate from infectious
people.
:param weight: Raw number of commutes
:type weight: float
:param fractionGivingInfected: Fraction of infectious people in source node of commute
:type fractionGivingInfected: float
:param fractionReceivingSus: Fraction of susceptible people in destination node of commute
:type fractionReceivingSus: float
:param stochastic: Whether to run the model in a stochastic or deterministic mode
:type stochastic: bool
:param random_state: Random number generator used for the model
:type random_state: np.random.Generator, None
:return: the number of new infections in each region.
:rtype: A dictionary with the region as key and the number of new infections as the value.
"""
if stochastic:
# weight can be fractional because of the movement multiplier, round it
contacts = stats.binom.rvs(int(round(weight)), fractionReceivingSus, random_state=random_state)
contacts = stats.binom.rvs(contacts, fractionGivingInfected, random_state=random_state)
return contacts
else:
return weight * fractionGivingInfected * fractionReceivingSus
def getWeight(graph, orig, dest, multiplier):
"""Get the weight of the edge from orig to dest in the graph. This weight is expected to be
proportional to the movement between nodes. If the edge doesn't have a weight, 1.0 is assumed
and the returned weight is adjusted by the multiplier and any delta_adjustment on the edge.
:param graph: A graph with each region as a node and the weights corresponding to the commutes
between regions. Edges must contain weight and delta_adjustment attributes (assumed 1.0)
:type graph: networkx.DiGraph
:param orig: The vertex people are coming from.
:type orig: str
:param dest: The vertex people are going to.
:type dest: str
:param multiplier: Value that will dampen or heighten movements between nodes.
:type multiplier" float
:return: The final weight value
:rtype: float
"""
edge = graph.get_edge_data(orig, dest)
if "weight" not in edge:
logger.error("No weight available for edge %s,%s assuming 1.0", orig, dest)
weight = 1.0
else:
weight = edge["weight"]
if "delta_adjustment" not in edge:
logger.error("delta_adjustment not available for edge %s,%s assuming 1.0", orig, dest)
delta_adjustment = 1.0
else:
delta_adjustment = edge["delta_adjustment"]
delta = weight - (weight * multiplier)
# The delta_adjustment is applied on the delta. It can either completely cancel any changes (factor = 0.0) or
# enable it fully (factor = 1.0). If the movement multiplier doesn't make any changes to the node's movements (ie.
# multiplier = 1.0), then the delta_adjustment will have no effect.
return weight - (delta * delta_adjustment)
# CurrentlyInUse
def getExternalInfectiousContacts(graph, nodes, movementMultiplier, infectiousStates, stochastic, random_state):
"""Calculate the number of new infections in each region. The infections are distributed
proportionally to the number of susceptibles in the destination node and infected in the origin
node. The infections are distributed to each age group according to the number of susceptible
people in them.
:param graph: A graph with each region as a node and the weights corresponding to the movements
between regions. Edges must contain weight and delta_adjustment attributes (assumed 1.0)
:type graph: networkx.Digraph
:param nodes: The disease status in each region stratified by age.
:type nodes: A dictionary with the region as a key and the disease state as values.
The states are a dictionary with a tuple of (age, state) as keys and the number
of individuals in that state as values.
:param movementMultiplier: A multiplier applied to each edge (movement between nodes) in the
network.
:type movementMultiplier: float
:param infectiousStates: States that are considered infectious
:type infectiousStates: list of strings
:param stochastic: Whether to run the model in a stochastic or deterministic mode
:type stochastic: bool
:param random_state: Random number generator used for the model
:type random_state: np.random.Generator, None
:return: The number of new infections in each region stratified by age.
:rtype: A dictionary with the region as a key and a dictionary of {age: number of new
infections} as values.
"""
infectionsByNode = {}
incomingContacts = getIncomingInfectiousContactsByNode(
graph,
nodes,
movementMultiplier,
infectiousStates,
stochastic,
random_state
)
for name, vertex in incomingContacts.items():
infectionsByNode[name] = distributeContactsOverAges(nodes[name], vertex, stochastic, random_state)
return infectionsByNode
# CurrentlyInUse
def getInternalInfectiousContactsInNode(
currentInternalStateDict,
mixingMatrix,
contactsMultiplier,
infectiousStates,
stochastic,
random_state
):
"""Calculate the new infections due to mixing within the region and stratify them by age.
:param currentInternalStateDict: The disease status of the population stratified by age.
:type currentInternalStateDict: A dict with a tuple of (age, state) as keys and the
number of individuals in that state as values.
:param mixingMatrix: Stores expected numbers of interactions between people of
different ages.
:type mixingMatrix: A dict with age range object as a key and Mixing Ratio as
a value.
:param contactsMultiplier: Multiplier applied to the number of infectious contacts.
:type contactsMultiplier: float
:param infectiousStates: States that are considered infectious
:type infectiousStates: list of strings
:param stochastic: Whether to run the model in a stochastic or deterministic mode
:type stochastic: bool
:param random_state: Random number generator used for the model
:type random_state: np.random.Generator, None
:return: The number of new infections stratified by age.
:rtype: A dictionary of {age: number of new infections}
"""
infectiousContacts: Dict[Age, float] = {}
for ageTo in mixingMatrix:
infectiousContacts[ageTo] = 0
susceptibles = getSusceptibles(ageTo, currentInternalStateDict)
totalInAge = getTotalInAge(currentInternalStateDict, ageTo)
if susceptibles > 0 and totalInAge > 0.0:
for ageFrom in mixingMatrix[ageTo]:
infectious = getInfectious(ageFrom, currentInternalStateDict, infectiousStates)
infectiousContacts[ageTo] += _computeInfectiousContacts(
mixingMatrix[ageTo][ageFrom] * contactsMultiplier,
infectious,
susceptibles,
totalInAge,
stochastic,
random_state
)
return infectiousContacts
def _computeInfectiousContacts(contacts, infectious, susceptibles, totalInAge, stochastic, random_state):
"""From raw contacts (between any two people in different age groups), filters
only those contacts that originated from an infectious person and received
by a susceptible person. The contact did not yet lead to a new infection,
we need to multiply by the infection probability. Two modes:
1) Deterministic
We simply multiply the contacts by number of infectious, and by proportion of susceptibles
2) Stochastic
For each infectious individual we sample from a Poisson distribution with mean the number
| |
<filename>chroma/camera.py
#!/usr/bin/env python
import numpy as np
import itertools
import threading
import multiprocessing
import os
from subprocess import call
import shutil
import tempfile
import sys
import pycuda.driver as cuda
from pycuda import gpuarray as ga
from chroma.geometry import Mesh, Solid, Geometry, vacuum
from chroma.transform import rotate, make_rotation_matrix
from chroma.sample import uniform_sphere
from chroma.tools import from_film
from chroma import make
from chroma import gpu
from chroma.loader import create_geometry_from_obj
import pygame
from pygame.locals import *
def bvh_mesh(geometry, layer):
lower_bounds, upper_bounds = geometry.bvh.get_layer(layer).get_bounds()
if len(lower_bounds) == 0 or len(upper_bounds) == 0:
raise Exception('no nodes at layer %i' % layer)
dx, dy, dz = upper_bounds[0] - lower_bounds[0]
center = np.mean([upper_bounds[0],lower_bounds[0]], axis=0)
mesh = make.box(dx, dy, dz, center)
for center, dx, dy, dz in zip(np.mean([lower_bounds,upper_bounds],axis=0),
*zip(*upper_bounds-lower_bounds))[1:]:
mesh += make.box(dx,dy,dz,center)
return mesh
def encode_movie(dir):
root, ext = 'movie', 'avi'
for i in itertools.count():
path = '.'.join([root + str(i).zfill(5), ext])
if not os.path.exists(path):
break
call(['mencoder', 'mf://' + dir + '/*.png', '-mf', 'fps=10', '-o',
path, '-ovc', 'xvid', '-xvidencopts', 'bitrate=3000'])
shutil.rmtree(dir)
print 'movie saved to %s.' % path
class Camera(multiprocessing.Process):
"The camera class is used to render a Geometry object."
def __init__(self, geometry, size=(800,600), device_id=None):
super(Camera, self).__init__()
self.geometry = geometry
self.device_id = device_id
self.size = size
self.bvh_layer_count = len(self.geometry.bvh.layer_offsets)
self.currentlayer = None
self.bvh_layers = {}
self.display3d = False
self.green_magenta = False
self.max_alpha_depth = 10
self.alpha_depth = 10
try:
import spnav as spnav_module
self.spnav_module = spnav_module
self.spnav = True
except:
self.spnav = False
def init_gpu(self):
self.context = gpu.create_cuda_context(self.device_id)
self.gpu_geometry = gpu.GPUGeometry(self.geometry)
self.gpu_funcs = gpu.GPUFuncs(gpu.get_cu_module('mesh.h'))
self.hybrid_funcs = gpu.GPUFuncs(gpu.get_cu_module('hybrid_render.cu'))
self.gpu_geometries = [self.gpu_geometry]
self.width, self.height = self.size
self.npixels = self.width*self.height
self.clock = pygame.time.Clock()
self.doom_mode = False
try:
if self.width == 640: # SECRET DOOM MODE!
print 'shotgun activated!'
self.doom_hud = pygame.image.load('images/doomhud.png').convert_alpha()
rect = self.doom_hud.get_rect()
self.doom_rect = rect.move(0, self.height - rect.height)
self.doom_mode = True
except:
pass
lower_bound, upper_bound = self.geometry.mesh.get_bounds()
self.mesh_diagonal_norm = np.linalg.norm(upper_bound-lower_bound)
self.scale = self.mesh_diagonal_norm
self.motion = 'coarse'
self.nblocks = 64
self.point = np.array([0, -self.mesh_diagonal_norm,
(lower_bound[2]+upper_bound[2])/2])
self.axis1 = np.array([0,0,1], float)
self.axis2 = np.array([1,0,0], float)
self.film_width = 35.0 # mm
pos, dir = from_film(self.point, axis1=self.axis1, axis2=self.axis2,
size=self.size, width=self.film_width)
self.rays = gpu.GPURays(pos, dir, max_alpha_depth=self.max_alpha_depth)
self.pixels_gpu = ga.empty(self.npixels, dtype=np.uint32)
self.movie = False
self.movie_index = 0
self.movie_dir = None
self.hybrid_render = False
def disable3d(self):
pos, dir = from_film(self.point, axis1=self.axis1, axis2=self.axis2,
size=self.size, width=self.film_width)
self.rays = gpu.GPURays(pos, dir, max_alpha_depth=self.max_alpha_depth)
self.display3d = False
def enable3d(self):
self.point1 = self.point-(self.mesh_diagonal_norm/60)*self.axis2
self.point2 = self.point+(self.mesh_diagonal_norm/60)*self.axis2
self.viewing_angle = 0.0
pos1, dir1 = from_film(self.point1, axis1=self.axis1, axis2=self.axis2,
size=self.size, width=self.film_width)
pos2, dir2 = from_film(self.point2, axis1=self.axis1, axis2=self.axis2,
size=self.size, width=self.film_width)
self.rays1 = gpu.GPURays(pos1, dir1,
max_alpha_depth=self.max_alpha_depth)
self.rays2 = gpu.GPURays(pos2, dir2,
max_alpha_depth=self.max_alpha_depth)
scope_size = (self.size[0]//4, self.size[0]//4)
scope_pos, scope_dir = from_film(self.point, axis1=self.axis1,
axis2=self.axis2, size=scope_size,
width=self.film_width/4.0)
self.scope_rays = gpu.GPURays(scope_pos, scope_dir)
self.scope_pixels_gpu = ga.empty(self.scope_rays.pos.size, dtype=np.uint32)
self.pixels1_gpu = ga.empty(self.width*self.height, dtype=np.uint32)
self.pixels2_gpu = ga.empty(self.width*self.height, dtype=np.uint32)
self.distances_gpu = ga.empty(self.scope_rays.pos.size,
dtype=np.float32)
self.display3d = True
def initialize_render(self):
self.rng_states_gpu = gpu.get_rng_states(self.npixels)
self.xyz_lookup1_gpu = ga.zeros(len(self.geometry.mesh.triangles),
dtype=ga.vec.float3)
self.xyz_lookup2_gpu = ga.zeros(len(self.geometry.mesh.triangles),
dtype=ga.vec.float3)
if self.display3d:
self.image1_gpu = ga.zeros(self.npixels, dtype=ga.vec.float3)
self.image2_gpu = ga.zeros(self.npixels, dtype=ga.vec.float3)
else:
self.image_gpu = ga.zeros(self.npixels, dtype=ga.vec.float3)
self.source_position = self.point
self.nimages = 0
self.nlookup_calls = 0
self.max_steps = 10
def clear_xyz_lookup(self):
self.xyz_lookup1_gpu.fill(ga.vec.make_float3(0.0,0.0,0.0))
self.xyz_lookup2_gpu.fill(ga.vec.make_float3(0.0,0.0,0.0))
self.nlookup_calls = 0
def update_xyz_lookup(self, source_position):
for wavelength, rgb_tuple in \
zip([685.0, 545.0, 445.0],[(1,0,0),(0,1,0),(0,0,1)]):
for i in range(self.xyz_lookup1_gpu.size//(self.npixels)+1):
self.hybrid_funcs.update_xyz_lookup(np.int32(self.npixels), np.int32(self.xyz_lookup1_gpu.size), np.int32(i*self.npixels), ga.vec.make_float3(*source_position), self.rng_states_gpu, np.float32(wavelength), ga.vec.make_float3(*rgb_tuple), self.xyz_lookup1_gpu, self.xyz_lookup2_gpu, np.int32(self.max_steps), self.gpu_geometry.gpudata, block=(self.nblocks,1,1), grid=(self.npixels//self.nblocks+1,1))
self.nlookup_calls += 1
def clear_image(self):
if self.display3d:
self.image1_gpu.fill(ga.vec.make_float3(0.0,0.0,0.0))
self.image2_gpu.fill(ga.vec.make_float3(0.0,0.0,0.0))
else:
self.image_gpu.fill(ga.vec.make_float3(0.0,0.0,0.0))
self.nimages = 0
def update_image_from_rays(self, image, rays):
for wavelength, rgb_tuple in \
zip([685.0, 545.0, 445.0],[(1,0,0),(0,1,0),(0,0,1)]):
self.hybrid_funcs.update_xyz_image(np.int32(rays.pos.size), self.rng_states_gpu, rays.pos, rays.dir, np.float32(wavelength), ga.vec.make_float3(*rgb_tuple), self.xyz_lookup1_gpu, self.xyz_lookup2_gpu, image, np.int32(self.nlookup_calls), np.int32(self.max_steps), self.gpu_geometry.gpudata, block=(self.nblocks,1,1), grid=(rays.pos.size//self.nblocks+1,1))
def update_image(self):
if self.display3d:
self.update_image_from_rays(self.image1_gpu, self.rays1)
self.update_image_from_rays(self.image2_gpu, self.rays2)
else:
self.update_image_from_rays(self.image_gpu, self.rays)
self.nimages += 1
def process_image(self):
if self.display3d:
self.hybrid_funcs.process_image(np.int32(self.pixels1_gpu.size), self.image1_gpu, self.pixels1_gpu, np.int32(self.nimages), block=(self.nblocks,1,1), grid=((self.pixels1_gpu.size)//self.nblocks+1,1))
self.hybrid_funcs.process_image(np.int32(self.pixels2_gpu.size), self.image2_gpu, self.pixels2_gpu, np.int32(self.nimages), block=(self.nblocks,1,1), grid=((self.pixels2_gpu.size)//self.nblocks+1,1))
else:
self.hybrid_funcs.process_image(np.int32(self.pixels_gpu.size), self.image_gpu, self.pixels_gpu, np.int32(self.nimages), block=(self.nblocks,1,1), grid=((self.pixels_gpu.size)//self.nblocks+1,1))
def screenshot(self, dir='', start=0):
root, ext = 'screenshot', 'png'
for i in itertools.count(start):
path = os.path.join(dir, '.'.join([root + str(i).zfill(5), ext]))
if not os.path.exists(path):
break
try:
pygame.image.save(self.screen, path)
except ImportError:
import Image
mode = 'RGBA'
data = self.screen.get_buffer()
im = Image.frombuffer(mode,self.size,data,'raw',mode,0,1)
im.save(path)
print 'image saved to %s' % path
def rotate(self, phi, n):
if self.display3d:
self.rays1.rotate(phi, n)
self.rays2.rotate(phi, n)
self.scope_rays.rotate(phi, n)
self.point1 = rotate(self.point1, phi, n)
self.point2 = rotate(self.point2, phi, n)
else:
self.rays.rotate(phi, n)
self.point = rotate(self.point, phi, n)
self.axis1 = rotate(self.axis1, phi, n)
self.axis2 = rotate(self.axis2, phi, n)
if self.hybrid_render:
self.clear_image()
self.update()
def rotate_around_point(self, phi, n, point, redraw=True):
self.axis1 = rotate(self.axis1, phi, n)
self.axis2 = rotate(self.axis2, phi, n)
if self.display3d:
self.rays1.rotate_around_point(phi, n, point)
self.rays2.rotate_around_point(phi, n, point)
self.scope_rays.rotate_around_point(phi, n, point)
else:
self.rays.rotate_around_point(phi, n, point)
if redraw:
if self.hybrid_render:
self.clear_image()
self.update()
def translate(self, v, redraw=True):
self.point += v
if self.display3d:
self.rays1.translate(v)
self.rays2.translate(v)
self.scope_rays.translate(v)
self.point1 += v
self.point2 += v
else:
self.rays.translate(v)
if redraw:
if self.hybrid_render:
self.clear_image()
self.update()
def update_pixels(self, gpu_geometry=None, keep_last_render=False):
if gpu_geometry is None:
gpu_geometry = self.gpu_geometry
if self.hybrid_render:
while self.nlookup_calls < 10:
self.update_xyz_lookup(self.source_position)
self.update_image()
self.process_image()
else:
if self.display3d:
self.rays1.render(gpu_geometry, self.pixels1_gpu,
self.alpha_depth, keep_last_render)
self.rays2.render(gpu_geometry, self.pixels2_gpu,
self.alpha_depth, keep_last_render)
else:
self.rays.render(gpu_geometry, self.pixels_gpu,
self.alpha_depth, keep_last_render)
def update_viewing_angle(self):
if self.display3d:
distance_gpu = ga.empty(self.scope_rays.pos.size, dtype=np.float32)
distance_gpu.fill(1e9)
for i, gpu_geometry in enumerate(self.gpu_geometries):
self.gpu_funcs.distance_to_mesh(np.int32(self.scope_rays.pos.size), self.scope_rays.pos, self.scope_rays.dir, gpu_geometry.gpudata, distance_gpu, block=(self.nblocks,1,1), grid=(self.scope_rays.pos.size//self.nblocks,1))
if i == 0:
distance = distance_gpu.get()
else:
distance = np.minimum(distance, distance_gpu.get())
baseline = distance.min()
if baseline < 1e9:
d1 = self.point1 - self.point
v1 = d1/np.linalg.norm(d1)
v1 *= baseline/60 - np.linalg.norm(d1)
self.rays1.translate(v1)
self.point1 += v1
d2 = self.point2 - self.point
v2 = d2/np.linalg.norm(d2)
v2 *= baseline/60 - np.linalg.norm(d2)
self.rays2.translate(v2)
self.point2 += v2
direction = np.cross(self.axis1,self.axis2)
direction /= np.linalg.norm(direction)
direction1 = self.point + direction*baseline - self.point1
direction1 /= np.linalg.norm(direction1)
new_viewing_angle = np.arccos(direction1.dot(direction))
phi = new_viewing_angle - self.viewing_angle
self.rays1.rotate_around_point(phi, self.axis1, self.point1)
self.rays2.rotate_around_point(-phi, self.axis1, self.point2)
self.viewing_angle = new_viewing_angle
def update(self):
if self.display3d:
self.update_viewing_angle()
n = len(self.gpu_geometries)
for i, gpu_geometry in enumerate(self.gpu_geometries):
if i == 0:
self.update_pixels(gpu_geometry)
else:
self.update_pixels(gpu_geometry, keep_last_render=True)
if self.display3d:
pixels1 = self.pixels1_gpu.get()
pixels2 = self.pixels2_gpu.get()
if self.green_magenta:
pixels = (pixels1 & 0x00ff00) | (pixels2 & 0xff00ff)
else:
pixels = (pixels1 & 0xff0000) | (pixels2 & 0x00ffff)
alpha = ((0xff & (pixels1 >> 24)) + (0xff & (pixels2 >> 24)))/2
pixels |= (alpha << 24)
else:
pixels = self.pixels_gpu.get()
pygame.surfarray.blit_array(self.screen, pixels.reshape(self.size))
if self.doom_mode:
self.screen.blit(self.doom_hud, self.doom_rect)
self.window.fill(0)
self.window.blit(self.screen, (0,0))
pygame.display.flip()
if self.movie:
self.screenshot(self.movie_dir, self.movie_index)
self.movie_index += 1
def loadlayer(self, layer):
if layer is None:
self.gpu_geometries = [self.gpu_geometry]
else:
try:
gpu_geometry = self.bvh_layers[layer]
except KeyError:
geometry = create_geometry_from_obj(bvh_mesh(self.geometry, layer))
gpu_geometry = gpu.GPUGeometry(geometry, print_usage=False)
self.bvh_layers[layer] = gpu_geometry
self.gpu_geometries = [self.gpu_geometry, gpu_geometry]
self.update()
def process_event(self, event):
if event.type == MOUSEBUTTONDOWN:
if event.button == 4:
v = self.scale*np.cross(self.axis1,self.axis2)/10.0
self.translate(v)
elif event.button == 5:
v = -self.scale*np.cross(self.axis1,self.axis2)/10.0
self.translate(v)
elif event.button == 1:
mouse_position = pygame.mouse.get_rel()
self.clicked = True
elif event.type == MOUSEBUTTONUP:
if event.button == 1:
self.clicked = False
elif event.type == MOUSEMOTION and self.clicked:
movement = np.array(pygame.mouse.get_rel())
if (movement == 0).all():
return
length = np.linalg.norm(movement)
mouse_direction = movement[0]*self.axis2 - movement[1]*self.axis1
mouse_direction /= np.linalg.norm(mouse_direction)
if pygame.key.get_mods() & (KMOD_LSHIFT | KMOD_RSHIFT):
v = -mouse_direction*self.scale*length/float(self.width)
self.translate(v)
else:
phi = np.float32(2*np.pi*length/float(self.width))
n = rotate(mouse_direction, np.pi/2,
np.cross(self.axis1,self.axis2))
if pygame.key.get_mods() & KMOD_LCTRL:
self.rotate_around_point(phi, n, self.point)
else:
self.rotate(phi, n)
elif event.type == KEYDOWN:
if event.key == K_LALT or event.key == K_RALT:
if self.motion == 'coarse':
self.scale = self.mesh_diagonal_norm/20.0
self.motion = 'fine'
elif self.motion == 'fine':
self.scale = self.mesh_diagonal_norm/400.0
self.motion = 'superfine'
elif self.motion == 'superfine':
self.scale = self.mesh_diagonal_norm
self.motion = 'coarse'
elif event.key == K_F6:
self.clear_xyz_lookup()
self.clear_image()
self.source_position = self.point
elif event.key == K_F7:
for i in range(100):
self.update_xyz_lookup(self.point)
self.source_position = self.point
elif event.key == K_F11:
pygame.display.toggle_fullscreen()
elif event.key == K_ESCAPE:
self.done = True
return
elif event.key == K_EQUALS:
if self.alpha_depth < self.max_alpha_depth:
self.alpha_depth += 1
self.update()
elif event.key == K_MINUS:
if self.alpha_depth > 1:
self.alpha_depth -= 1
self.update()
elif event.key == K_PAGEDOWN:
if self.currentlayer is None:
self.currentlayer = self.bvh_layer_count - 1
else:
if self.currentlayer > 0:
self.currentlayer -= 1
else:
self.currentlayer = None
self.loadlayer(self.currentlayer)
elif event.key == K_PAGEUP:
if self.currentlayer is None:
self.currentlayer = 0
else:
if self.currentlayer < self.bvh_layer_count:
self.currentlayer += 1
else:
self.currentlayer = None
self.loadlayer(self.currentlayer)
elif event.key == K_3:
if self.display3d:
self.disable3d()
else:
self.enable3d()
self.update()
elif event.key == K_g:
self.green_magenta = not self.green_magenta
self.update()
elif event.key == K_F12:
self.screenshot()
elif event.key == K_F5:
if not hasattr(self, 'rng_states_gpu'):
self.initialize_render()
self.hybrid_render = | |
# coding: utf-8
"""
see README.md
"""
__version__ = '1.0'
__autor__ = u"<NAME>; <NAME>.; <NAME>."
def Bezout(f, g, every_step=False):
"""
Compute Bezout's divisor of f and g, where f an g can be
- bivariate polynomials
- affine curves
- projective curves
- homogenous polynomials in three variables
The answer is a tuple (K, e, L), consisting of a field K, which
will be some extension of the (common) ground field of f and g,
and embedding e of the ground field of f and g into K,
and L a list of pairs (projective point in P^2(K), multiplicity).
Consequently, if f and g are polynomials of some ring L[x,y,z],
and K, e, Points = Bezout(f,g),
then f.change_ring(e) and g.change_ring(e) will give polynomials
in K[x,y,z].
"""
C1 = projective_closure(Curve(f))
C2 = projective_closure(Curve(g))
assert(C1.ambient_space()==C2.ambient_space())
A = C1.defining_polynomial()
B = C2.defining_polynomial()
if every_step: print "Bezout of (%s,%s)"%(A,B)
if gcd(A,B)!=1:
raise ValueError, "The curves have a common factor(s)."
L = euclidean_reduction(A,B)
if every_step: print "euclidean_reduction:", L
e = common_splitting_field([Gi for (mi,Fi,Gi) in L])
if every_step: print "Right commmon_field:", e
L = right_distribute(e,L)
if every_step: print "right_distribute:", L
L = linear_reduction(L)
if every_step: print "linear_reduction:", L
f = common_splitting_field([Fi for (mi,Fi,Gi) in L])
if every_step: print "Left common_splitting_field:", f
L = left_distribute(f,L)
if every_step: print "Linear cycles:", L
embedding = f*e
field = embedding.codomain()
# Compute the intersection points of the linear cycles in P^2.
PP = ProjectiveSpace(2)/field
L = [ [mi, PP(lin_poly_solve(Li,Mi))] for (mi,Li,Mi) in L]
if every_step: print "points:", L
# Group multiplicities
d = {}
for (m, P) in L:
if P in d:
d[P] += m
else:
d[P] = m
return (field, embedding, [(P,v) for (P,v) in d.items() if v>0])
def projective_closure(C):
"""
Return the projective closure of the curve C (given as an affine scheme)
as a projective scheme with. The homogeneising variable is the last one.
>>> X,Y=AffineSpace(2, QQ).gens()
>>> f=Curve(X^3-Y^2)
>>> projective_closure(f)
Projective Curve over Rational Field defined by x0^3 - x1^2*x2
"""
if C.is_projective():
return C
o0,o1,o2=C.ambient_space().projective_embedding().codomain().coordinate_ring().gens()
f=C.defining_polynomial()
x,y=f.parent().gens()
F=f.subs({x: o0, y:o1}).homogenize(o2)
return Curve(F)
def lin_poly_solve(L, M):
"""
Return the point defined as the intersection of two lines
(given as degree-one polynomials in some field).
"""
x0,x1,x2 = M.parent().gens()
a0 = L.coefficient(x0)
a1 = L.coefficient(x1)
a2 = L.coefficient(x2)
b0 = M.coefficient(x0)
b1 = M.coefficient(x1)
b2 = M.coefficient(x2)
# a0 a1 a2
# b0 b1 b2
return [a1*b2-a2*b1, -a0*b2+a2*b0, a0*b1-a1*b0]
def linear_reduction(L):
"""
Input: L is a list of elements [m, Fi, Li], where
mi is accumulated multiplicity
Fi is a poly in three variables
Li is a linear form in two variables
Return:
Linear reduction of each Fi*Li.
"""
cycles = []
x0,x1,x2 = L[0][1].parent().gens()
for [mi, Fi, Li] in L:
a = Li.monomial_coefficient(x1)
b = Li.monomial_coefficient(x2)
if a==0:
cycles.append([mi, Fi(x0,x1,0), Li])
else:
cycles.append([mi, Fi(x0, -b/a*x2, x2), Li])
return cycles
def right_distribute(K,L):
"""
`L` is assumed to be a list of triples `[mi, Fi, Gi]`, as returned
by `euclidean_reduction` or `linear_reduction`.
K is an embedding into a big enough field.
Factor every G_i over a big enough field and apply cycle
distribution. Note that every polynomial in the ouput
has base field the big field.
"""
return [[mi*ri, Fi.change_ring(K), Li]
for [mi, Fi, Gi] in L
for (Li, ri) in Gi.change_ring(K).factor() ]
def left_distribute(K,L):
"""
Left cycle distribution. See `right_distribute`.
"""
switch = lambda l: [(mi, Gi, Fi) for (mi, Fi, Gi) in l]
return switch(right_distribute(K,switch(L)))
def euclidean_reduction(A,B):
"""
Input: `A` and `B` are homogeneous polynomials in a ring of three variables.
Output: A list of triples `[m_i, F_i, G_i]` such that the intersection
cycle [A,B] equals sum(m_i*[F_i,G_i]). The resulting `G_i` cycles
will not have the first variable.
"""
assert(A.parent()==B.parent())
x0,x1,x2 = A.parent().gens()
F,G = A,B
# Construct T=K(y1,y2)[y0]=K(x1,x2)[x0]
S = F.parent()
S0 = PolynomialRing(F.base_ring(), "y1,y2")
y1, y2 = S0.gens()
T = PolynomialRing(S0.fraction_field(), "y0")
y0 = T.gen()
# Construct Morphisms
phi = lambda W: T(W(y0,y1,y2))
U = PolynomialRing(F.base_ring(),"y0,y1,y2")
psi1 = Hom(U,S)([x0,x1,x2])
psi = lambda W: psi1(U(W))
# After all this definitions, we have the following
# rings and morphisms:
# S=R[x0,x1,x2] --phi-> T=R(a,b)[c] --psi-> S.
if F.degree(x0)<G.degree(x0):
F,G = G,F
cycle=[]
while G.degree(x0)>0:
q,r = phi(F).quo_rem(phi(G)) # q,r in T
H = T(lcm(S0(q.denominator()),S0(r.denominator())))
Q = q*H # in T
R = r*H # in T
# print "(H,Q,R):", (H,Q,R) , "=", (psi(H), psi(Q), psi(R))
assert(psi(H).parent()==F.parent())
assert(psi(H)*F==psi(Q)*G+psi(R)) # in S
E = gcd(G, psi(R)) # in S, E == gcd(G,R) == gcd(H,G)
assert(E in S)
assert(E==gcd(psi(H), G))
# In T:
E_T = phi(E)
H0 = H/E_T
G0 = phi(G)/E_T
R0 = R/E_T
# In original ring:
R0S = psi(R0)
G0S = psi(G0)
H0S = psi(H0)
# print "(E, H0, G0, R0):", (E,H0S,G0S,R0S)
assert(H0S*F==psi(Q)*G0S+R0S)
cycle.append([1, F, E])
cycle.append([-1, G0S, H0S])
# print "cycle:", cycle
F = G0S
G = R0S
assert(F.degree(x0)>=G.degree(x0))
cycle.append([1,F,G])
# print "cycle:", cycle
non_trivials = [[m, Fi, Gi] for [m, Fi, Gi] in cycle if not Gi.is_unit()]
# print "non trivial cycle:", non_trivials
return non_trivials
def make_univariate(F, T):
"""
Given a homogeneous bivariate polynomial `F(xi,xj)`,
sitting inside a ring `K[x0,x1,x2]`, dehomogenise it
into ring T (univariate), for later factoring.
"""
assert(F.is_homogeneous() and len(F.variables())<3)
R = F.base_ring()
S = F.parent()
x0,x1,x2 = S.gens()
eta = T.gen()
if F.degree(x0)>0:
Fa = F(eta,1,1)
elif F.degree(x1)>0:
Fa = F(1,eta,1)
else:
Fa = F(1,1,eta)
return Fa
def common_splitting_field(L):
"""
Input: `L` is a list of homogeneous bivariate polynomials
inside the same ring K[x0,x1,x2].
This function will construct a field in which every
polynomial splits into linear forms.
Output: An embedding, e, of the field K into the splitting
field. You can recover the common splitting field
with e.codomain().
Also, note that if F is a polynomial in K[x0,x1,x2],
then F.change_ring(e) will embed F into e(K)[x0,x1,x2].
Note: we have to compute the common splitting field in this
fancy way because we might have more than one algebraic
extension K1 \subset K2 \subset K3, and the morphisms that
embed one into the other may not be canonically natural.
Hence, SAGEMATH needs help in "remembering" which embedding
it used to compute the splitting field in the first place.
"""
assert(len(set([parent(f) for f in L]))<=1)
T = PolynomialRing(L[0].base_ring(), "w")
l = [make_univariate(f,T) for f in L]
Ri = l[0].base_ring() # start here.
ei = Ri.Hom(Ri)(Ri.gens()) # identity Morphism
for p in l:
t = p.change_ring(ei)
Ri1, psi1 = t.splitting_field('alpha',simplify_all=True,simplify=True,map=True)
if Ri1.degree()>1:
Ri = Ri1
ei = psi1 *ei
return ei
def check_bezout(h,g, results=None):
# compute Bezout if not previously computed
if results:
K, e, Points = results
else:
K, e, Points = Bezout(h,g)
print Points, e
d = h.degree()*g.degree()
# checks:
# 1: the polynomials can be embedded into the appropriate extension
# 2: the values of the polynomials over the intersection points is 0
# 3: the computed multiplicities of the points add up to d, the product
# of the degrees
valuesF = [h.change_ring(e)(P[0], P[1], P[2]) for (P,v) in Points]
valuesG = [g.change_ring(e)(P[0], P[1], P[2]) for (P,v) in Points]
return d == sum([v for (P,v) in Points]) and not any(valuesF) and not any(valuesG)
# ============================================
# = Milnor number of a bivariate polynomial. =
# ============================================
def Milnor(f):
"""docstring for Milnor"""
assert(len(f.parent().gens())==2)
x, y = f.parent().gens()
K, e, points = Bezout(diff(f,x), diff(f,y))
affine, infinity = [], []
for (P,m) in points:
(infinity, affine)[P[2]!=0].append((P,m))
# print affine, infinity
fe = f.change_ring(e)
mu_f = add([v for (P,v) in affine if fe(*P.dehomogenize(2))==0])
mu_oo = sum([v for (P,v) in infinity]) if len(infinity)<=1 else None
return mu_f, mu_oo
def form_decomposition(F):
t = F.total_degree()
l = (t+1)*[0]
for (u,m) in F:
l[m.total_degree()] += u*m
return l
def least_degree_form(F):
if F==0:
return F
else: # return first non-zero form.
return next(( (i,F.parent()(f))
for (i,f) in enumerate(form_decomposition(F)) if f!=0))
def Sufficient_Noether_Conditions(F, G, H, bezout_divisor=None):
"""
F, G, H must be projective plane curves with no common | |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from absl import logging
import flax
import jax
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import sys
if sys.platform != 'darwin':
# A workaround to avoid crash because tfds may open to many files.
import resource
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
from vit_jax import autoaugment # pylint: disable=g-import-not-at-top
import jax.numpy as jnp
# Adjust depending on the available RAM.
_RESIZE_MIN = 256
MAX_IN_MEMORY = 200_000
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def normalize_image(image):
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _central_crop(image, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image: a 3-D image tensor
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
3-D tensor with cropped image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w // 2
return tf.slice(
image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
def _smallest_size_at_least(height, width, resize_min):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: an int32 scalar tensor indicating the new width.
"""
resize_min = tf.cast(resize_min, tf.float32)
# Convert to floats to make subsequent calculations go smoothly.
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
smaller_dim = tf.minimum(height, width)
scale_ratio = resize_min / smaller_dim
# Convert back to ints to make heights and widths that TF ops will accept.
new_height = tf.cast(height * scale_ratio, tf.int32)
new_width = tf.cast(width * scale_ratio, tf.int32)
return new_height, new_width
def _aspect_preserving_resize(image, resize_min):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
new_height, new_width = _smallest_size_at_least(height, width, resize_min)
return _resize_image(image, new_height, new_width)
def _resize_image(image, height, width):
"""Simple wrapper around tf.resize_images.
This is primarily to make sure we use the same `ResizeMethod` and other
details each time.
Args:
image: A 3-D image `Tensor`.
height: The target height for the resized image.
width: The target width for the resized image.
Returns:
resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width].
"""
return tf.image.resize(
image, [height, width], method=tf.image.ResizeMethod.BICUBIC)
def get_tfds_info(dataset, split):
"""Returns information about tfds dataset -- see `get_dataset_info()`."""
data_builder = tfds.builder(dataset)
return dict(
num_examples=data_builder.info.splits[split].num_examples,
num_classes=data_builder.info.features['label'].num_classes,
int2str=data_builder.info.features['label'].int2str,
examples_glob=None,
)
def get_directory_info(directory):
"""Returns information about directory dataset -- see `get_dataset_info()`."""
examples_glob = f'{directory}/*/*.JPEG'
paths = glob.glob(examples_glob)
get_classname = lambda path: path.split('/')[-2]
class_names = sorted(set(map(get_classname, paths)))
return dict(
num_examples=len(paths),
num_classes=len(class_names),
int2str=lambda id_: class_names[id_],
examples_glob=examples_glob,
)
def get_dataset_info(dataset, split):
"""Returns information about a dataset.
Args:
dataset: Name of tfds dataset or directory -- see `./configs/common.py`
split: Which split to return data for (e.g. "test", or "train"; tfds also
supports splits like "test[:90%]").
Returns:
A dictionary with the following keys:
- num_examples: Number of examples in dataset/mode.
- num_classes: Number of classes in dataset.
- int2str: Function converting class id to class name.
- examples_glob: Glob to select all files, or None (for tfds dataset).
"""
directory = os.path.join(dataset, split)
if os.path.isdir(directory):
return get_directory_info(directory)
return get_tfds_info(dataset, split)
def get_datasets(config):
"""Returns `ds_train, ds_test` for specified `config`."""
if os.path.isdir(config.dataset):
train_dir = os.path.join(config.dataset, config.pp['train'])
test_dir = os.path.join(config.dataset, config.pp['test'])
if not os.path.isdir(train_dir):
raise ValueError('Expected to find directories"{}" and "{}"'.format(
train_dir,
test_dir,
))
logging.info('Reading dataset from directories "%s" and "%s"', train_dir,
test_dir)
ds_train = get_data_from_directory(
config=config, directory=train_dir, mode='train')
ds_test = get_data_from_directory(
config=config, directory=test_dir, mode='test')
else:
logging.info('Reading dataset from tfds "%s"', config.dataset)
ds_train = get_data_from_tfds(config=config, mode='train')
ds_test = get_data_from_tfds(config=config, mode='test')
return ds_train, ds_test
def get_data_from_directory(*, config, directory, mode):
"""Returns dataset as read from specified `directory`."""
dataset_info = get_directory_info(directory)
data = tf.data.Dataset.list_files(dataset_info['examples_glob'])
class_names = [
dataset_info['int2str'](id_) for id_ in range(dataset_info['num_classes'])
]
def _pp(path):
return dict(
image=path,
label=tf.where(
tf.strings.split(path, '/')[-2] == class_names
)[0][0],
)
image_decoder = lambda path: tf.image.decode_jpeg(tf.io.read_file(path), 3)
if config.trainer == 'train_mae' or config.trainer == 'train_xlnet':
return_mask = True
num_patches = config.num_patches
else:
return_mask = False
num_patches = None
if config.model.half_precision:
data_dtype = jnp.bfloat16 if jax.local_devices()[0].platform == 'tpu' else jnp.float16
else:
data_dtype = jnp.float32
return get_data(
data=data,
mode=mode,
num_classes=dataset_info['num_classes'],
image_decoder=image_decoder,
repeats=None if mode == 'train' else 1,
batch_size=config.batch_eval if mode == 'test' else config.batch,
image_size=config.pp['crop'],
shuffle_buffer=min(dataset_info['num_examples'], config.shuffle_buffer),
preprocess=_pp,
return_mask=return_mask,
num_patches=num_patches,
flip=config.flip,
randaug=config.randaug,
dtype=data_dtype)
def get_data_from_tfds(*, config, mode):
"""Returns dataset as read from tfds dataset `config.dataset`."""
data_builder = tfds.builder(config.dataset, data_dir=config.tfds_data_dir)
data_builder.download_and_prepare(
download_config=tfds.download.DownloadConfig(
manual_dir=config.tfds_manual_dir))
data = data_builder.as_dataset(
split=config.pp[mode],
# Reduces memory footprint in shuffle buffer.
decoders={'image': tfds.decode.SkipDecoding()},
shuffle_files=mode == 'train')
image_decoder = data_builder.info.features['image'].decode_example
dataset_info = get_tfds_info(config.dataset, config.pp[mode])
return get_data(
data=data,
mode=mode,
num_classes=dataset_info['num_classes'],
image_decoder=image_decoder,
repeats=None if mode == 'train' else 1,
batch_size=config.batch_eval if mode == 'test' else config.batch,
image_size=config.pp['crop'],
shuffle_buffer=min(dataset_info['num_examples'], config.shuffle_buffer))
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def get_data(*,
data,
mode,
num_classes,
image_decoder,
repeats,
batch_size,
image_size,
shuffle_buffer,
flip,
randaug=None,
preprocess=None,
return_mask=False,
num_patches=None,
dtype=jnp.float32):
"""Returns dataset for training/eval.
Args:
data: tf.data.Dataset to read data from.
mode: Must be "train" or "test".
num_classes: Number of classes (used for one-hot encoding).
image_decoder: Applied to `features['image']` after shuffling. Decoding the
image after shuffling allows for a larger shuffle buffer.
repeats: How many times the dataset should be repeated. For indefinite
repeats specify None.
batch_size: Global batch size. Note that the returned dataset will have
dimensions [local_devices, batch_size / local_devices, ...].
image_size: Image size after cropping (for training) / resizing (for
evaluation).
shuffle_buffer: Number of elements to preload the shuffle buffer with.
preprocess: Optional preprocess function. This function will be applied to
the dataset just after repeat/shuffling, and before the data augmentation
preprocess step is applied.
"""
def _pp(data):
im = image_decoder(data['image'])
if im.shape[-1] == 1:
im = tf.repeat(im, 3, axis=-1)
if mode == 'train':
original_shape = tf.shape(im)
channels = im.shape[-1]
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(im),
tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]),
area_range=(0.08, 1.0),
min_object_covered=0.1,
use_image_if_no_bounding_boxes=True,
max_attempts=10)
im = tf.slice(im, begin, size)
# Unfortunately, the above operation loses the depth-dimension. So we
# need to restore it the manual way.
im.set_shape([None, None, channels])
bad = _at_least_x_are_equal(original_shape, tf.shape(im), 3)
im = tf.cond(
bad,
lambda: _central_crop(_aspect_preserving_resize(im, _RESIZE_MIN), image_size, image_size),
lambda: _resize_image(im, image_size, image_size))
if flip:
im = tf.image.random_flip_left_right(im)
if randaug:
input_image_type = im.dtype
im = tf.clip_by_value(im, 0.0, 255.0)
im = tf.cast(im, dtype=tf.uint8)
im = autoaugment.distort_image_with_randaugment(
im, int(randaug.split('-')[0]), float(randaug.split('-')[1]), float(randaug.split('-')[2]))
im = tf.cast(im, dtype=input_image_type)
im = tf.image.convert_image_dtype(im, dtype=tf.float32)
else:
im = _central_crop(_aspect_preserving_resize(im, _RESIZE_MIN), image_size, image_size)
im = normalize_image(im)
im = tf.image.convert_image_dtype(im, dtype=dtype)
if return_mask:
label = tf.random.shuffle(tf.range(num_patches))
# np.random.permutation().astype(int) # [196]
else:
label = tf.cast(tf.one_hot(data['label'], num_classes), dtype) # pylint: disable=no-value-for-parameter
return {'image': im, 'label': label}
data = data.repeat(repeats)
if mode == 'train':
data = data.shuffle(shuffle_buffer)
if preprocess is not None:
data = data.map(preprocess, tf.data.experimental.AUTOTUNE)
data = data.map(_pp, tf.data.experimental.AUTOTUNE)
data = data.batch(batch_size, drop_remainder=True)
# Shard data such that it can be distributed accross devices
num_devices = jax.local_device_count()
def _shard(data):
data['image'] = | |
from biopharma import units
from biopharma.core import SpecificationViolatedError
from biopharma.specs import *
from enum import Enum
import os
from math import inf
import pandas as pd
import pytest
from pytest import fixture, raises
def test_quantity_specs():
spec = Q('m', 'desc')
assert spec.description == 'desc'
assert spec.parse('1 metre') == 1 * units.m
assert spec.parse('1.2 cm') == 1.2 * units.cm
with raises(SpecificationViolatedError):
spec.parse('1')
with raises(SpecificationViolatedError):
spec.parse('1 metress')
with raises(SpecificationViolatedError):
spec.parse('1 second')
assert spec.nested is None
assert spec.get is None
assert not spec.hidden
assert spec.zero == 0 * units.m
assert spec.zero.units == units.m
assert spec.inf == inf * units.m
assert spec.inf.units == units.m
new_spec = spec.with_same_units('new desc')
assert new_spec.units == spec.units
assert new_spec.description == 'new desc'
new_spec = spec.with_squared_units('new desc')
assert new_spec.units == spec.units * spec.units
assert new_spec.description == 'new desc'
assert spec.coerce(2) == 2 * units.m
assert spec.coerce(3 * units.m) == 3 * units.m
with raises(SpecificationViolatedError):
spec.coerce(1 * units.s)
with raises(AttributeError):
spec.coerce('1')
def test_bool_value_spec():
spec = Value(bool, 'my desc')
assert spec.description == 'my desc'
assert spec.parse(True) is True
assert spec.parse(False) is False
with raises(SpecificationViolatedError):
spec.parse('True')
assert spec.nested is None
assert spec.get is None
assert not spec.hidden
assert spec.coerce(True) is True
assert spec.coerce(2) is True
def test_float_value_spec():
spec = Value(float, 'float desc!')
assert spec.description == 'float desc!'
assert spec.parse(1.0) == 1.0
assert spec.parse(-1.5) == -1.5
assert spec.parse(2) == 2.0
assert type(spec.parse(2.0)) is float
assert type(spec.parse(2)) is float
with raises(SpecificationViolatedError):
spec.parse('2.5')
assert spec.nested is None
assert spec.get is None
assert not spec.hidden
assert spec.coerce(1.5) == 1.5
assert spec.coerce(True) == 1
assert spec.coerce('2') == 2
with raises(ValueError):
spec.coerce('one')
def test_hidden_spec():
spec = Q('s', 'desc', hidden=True)
assert spec.hidden
assert spec.description == 'desc'
assert spec.parse('55 s') == 55 * units.s
def test_enum_specs():
class E(Enum):
v1 = 1
v2 = 2
spec = Enumerated(E, 'enum')
assert spec.description == 'enum'
assert spec.parse(1) is E.v1
assert spec.parse('v2') is E.v2
with raises(SpecificationViolatedError):
spec.parse(3)
with raises(SpecificationViolatedError):
spec.parse('v3')
assert spec.nested is None
assert spec.get is None
assert not spec.hidden
assert spec.coerce(E.v1) is E.v1
assert spec.coerce('v1') is E.v1
with raises(SpecificationViolatedError):
spec.coerce(3)
@fixture
def mock_component():
"""Provide just enough of the ModelComponent interface for SpecifiedDict."""
class C:
pass
comp = C()
comp.facility = comp
root_folder = os.path.dirname(__file__)
comp.data_path = os.path.join(root_folder, 'data')
return comp
def test_table_spec_eq(mock_component):
eq_spec = Table(
columns={'EqName': str,
'Function': str,
'CostIndex': float,
'Size': in_units(col='Units'),
'Cost': in_units(col='Currency'),
'Diameter': in_units('cm')
},
index='EqName',
desc='$Table eq.',
column_descs={'Size': 'Eq size', 'Cost': 'Reference cost'})
assert eq_spec.description == '$Table eq.'
assert len(eq_spec.column_descriptions) == 6
assert eq_spec.column_descriptions['Size'] == 'Eq size'
assert eq_spec.column_descriptions['Cost'] == 'Reference cost'
assert eq_spec.column_descriptions['EqName'] == 'EqName'
assert eq_spec.column_descriptions['Diameter'] == 'Diameter'
assert eq_spec.column_descriptions['Function'] == 'Function'
assert eq_spec.column_descriptions['CostIndex'] == 'CostIndex'
eq_spec.component = mock_component
eq = eq_spec.parse('equipment.csv')
assert isinstance(eq, pd.DataFrame)
assert eq.index.name == 'EqName'
assert eq.index.dtype == object
for i in range(len(eq)):
assert hasattr(eq['Size'].iloc[i], 'units')
assert eq['Size'].iloc[i].units == eq['Units'].iloc[i]
assert hasattr(eq['Cost'].iloc[i], 'units')
assert eq['Cost'].iloc[i].units == eq['Currency'].iloc[i]
assert eq['Diameter'].iloc[i].units == units.cm
assert eq['CostIndex'].dtype == float
# Check we only have the columns specified
assert set(eq.columns) == {
'EqName', 'Function', 'CostIndex', 'Size', 'Units', 'Cost', 'Currency', 'Diameter'}
assert eq_spec.nested is None
assert eq_spec.get is None
assert not eq_spec.hidden
class MyEnum(Enum):
"""A simple Enum for testing specs."""
v1 = 1
v2 = 2
def test_table_with_enumeration(mock_component):
spec = Table(
columns={'ID': int,
'Mode': MyEnum,
'SingleUse': bool,
},
index='ID',
desc='')
spec.component = mock_component
resins = spec.parse('resins.csv')
assert 'Mode' in resins.columns
assert resins['Mode'].dtype == object
for i in range(len(resins)):
assert isinstance(resins['Mode'].iloc[i], MyEnum)
@fixture
def table_spec():
"""A common sample table spec for tests below."""
spec = Table(
columns={'ID': int, 'CmCol': in_units('cm'), 'BoolCol': bool,
'QuantCol': in_units(col='UnitsCol'), 'EnumCol': MyEnum},
index='ID',
desc='desc')
assert spec.nested is None
assert spec.get is None
assert not spec.hidden
return spec
def check_table_data(data):
"""Common checks for table tests below using the table_spec fixture."""
assert isinstance(data, pd.DataFrame)
assert len(data) == 3
assert data.index.name == 'ID'
assert data['CmCol'].loc[1].units == units.cm
assert data['CmCol'].loc[1] == 1 * units.cm
assert data['CmCol'].loc[2] == 2 * units.cm
assert data['CmCol'].loc[3] == 3 * units.cm
assert data['BoolCol'].dtype == bool
assert data['BoolCol'].loc[1]
assert not data['BoolCol'].loc[2]
assert data['BoolCol'].loc[3]
assert data['EnumCol'].dtype == object
assert data['EnumCol'].loc[1] is MyEnum.v1
assert data['EnumCol'].loc[2] is MyEnum.v2
assert data['EnumCol'].loc[3] is MyEnum.v1
assert data['QuantCol'].loc[1].units == units.m
assert data['QuantCol'].loc[1] == 100.0 * units.m
assert data['QuantCol'].loc[2] == 200.0 * units.s
assert data['QuantCol'].loc[3] == 300.0 * units.kg
# Check we only have the columns specified
assert set(data.columns) == {'ID', 'CmCol', 'BoolCol', 'QuantCol', 'UnitsCol', 'EnumCol'}
def test_table_from_csv_string(table_spec):
data = table_spec.parse('&CSV::ID,CmCol,BoolCol,QuantCol,UnitsCol,EnumCol\n'
'1,1,True,100.0,m,v1\n'
'2,2,False,200.0,s,2\n'
'3,3,True,300.0,kg,1\n')
check_table_data(data)
def test_table_from_dataframe(table_spec):
df = pd.DataFrame({'CmCol': [1 * units.cm, 2 * units.cm, 3 * units.cm],
'BoolCol': [True, False, True],
'UnitsCol': ['m', 's', 'kg'],
'QuantCol': [100.0 * units.m, 200.0 * units.s, 300.0 * units.kg],
'EnumCol': [MyEnum.v1, MyEnum.v2, MyEnum.v1],
'ID': [1, 2, 3]})
df.set_index('ID', inplace=True, drop=False)
data = table_spec.parse(df)
check_table_data(data)
@pytest.mark.skip(reason="causes a pandas indexing warning")
def test_table_from_dataframe_string_quantities(table_spec):
df = pd.DataFrame({'CmCol': ['1 cm', 'nan', '3 cm'],
'BoolCol': [True, False, True],
'UnitsCol': ['m', 's', 'kg'],
'QuantCol': ['100.0 m', '200.0 s', '300.0 kg'],
'EnumCol': [MyEnum.v1, MyEnum.v2, MyEnum.v1],
'ID': [1, 2, 3]})
df.set_index('ID', inplace=True, drop=False)
data = table_spec.parse(df)
from math import isnan
assert isnan(data['CmCol'].loc[2].magnitude)
data['CmCol'].loc[2] = 2 * units.cm
check_table_data(data)
def test_table_coercion(table_spec):
value_list = [{'ID': 1, 'CmCol': 1 * units.cm, 'BoolCol': True,
'QuantCol': 100.0, 'UnitsCol': 'm', 'EnumCol': '1'},
{'ID': 2, 'CmCol': 2 * units.cm, 'BoolCol': False,
'QuantCol': '200.0', 'UnitsCol': 's', 'EnumCol': 'v2'},
{'ID': 3, 'CmCol': 3 * units.cm, 'BoolCol': True,
'QuantCol': '300.0', 'UnitsCol': 'kg', 'EnumCol': MyEnum.v1}]
data = table_spec.coerce(value_list)
check_table_data(data)
def test_table_coercion_like_server(table_spec):
value_list = {'rows': [{'ID': 1, 'CmCol': '1 cm', 'BoolCol': True, 'NotCol': '300',
'QuantCol': '100.0', 'UnitsCol': 'm', 'EnumCol': '1'},
{'ID': 2, 'CmCol': '2 cm', 'BoolCol': False, 'NotCol': '400',
'QuantCol': 200.0, 'UnitsCol': 's', 'EnumCol': 'v2'},
{'ID': 3, 'CmCol': '3 cm', 'BoolCol': True, 'NotCol': '400',
'QuantCol': 300.0, 'UnitsCol': 'kg', 'EnumCol': MyEnum.v1}]}
data = table_spec.coerce(value_list)
check_table_data(data)
def test_nested_specs():
spec = Nested({
'sub1': Q('m', 'sub1 desc'),
'sub2': Value(bool, 'sub2 desc', hidden=True),
'nest': Nested({
'sub3': Q('s', 'sub3 desc')
}, 'inner desc')
}, 'outer desc')
# Check descriptions
assert spec.description == 'outer desc'
assert spec.nested['sub1'].description == 'sub1 desc'
assert spec.nested['nest'].description == 'inner desc'
assert spec.nested['nest'].nested['sub3'].description == 'sub3 desc'
# Check we can fill the whole dict from "YAML"
data = {
'sub1': '1 metre',
'sub2': False,
'nest': {
'sub3': '2 s'
}
}
value = spec.parse(data)
assert len(value) == 3
assert value['sub1'] == 1 * units.m
assert value['sub2'] is False
assert isinstance(value['nest'], dict)
assert len(value['nest']) == 1
assert value['nest']['sub3'] == 2 * units.s
# Check we can assign individual items
value['sub1'] = 3 * units.m
assert value['sub1'] == 3 * units.m
value['nest']['sub3'] = 4 * units.s
assert value['nest']['sub3'] == 4 * units.s
assert spec.get is None
assert not spec.hidden
assert not spec.nested['sub1'].hidden
assert spec.nested['sub2'].hidden
assert not spec.nested['nest'].hidden
assert not spec.nested['nest'].nested['sub3'].hidden
# Check coercion
value = spec.coerce({
'sub1': 1,
'sub2': 0,
'nest': {
'sub3': 2
}
})
assert len(value) == 3
assert value['sub1'] == 1 * units.m
assert value['sub2'] is False
assert isinstance(value['nest'], dict)
assert len(value['nest']) == 1
assert value['nest']['sub3'] == 2 * units.s
def test_computed_spec_simple(mock_component):
constant = 2.5
desc = 'Our description'
spec = Computed(lambda self: constant, desc)
spec.component = mock_component
assert spec.description == desc
assert spec.get() == constant
# Setting the value should fail
with raises(SpecificationViolatedError):
spec.parse('1')
with raises(SpecificationViolatedError):
spec.validate(1)
assert spec.nested is None
assert not spec.hidden
def test_computed_spec_obj_access(mock_component):
spec = Computed(lambda self: self.param * 2, 'Desc')
spec.component = mock_component
mock_component.param = 3.0
assert spec.get() == 6.0
mock_component.param = -2 * units.m
assert spec.get() == -4 * units.m
# Setting the value should still fail
with raises(SpecificationViolatedError):
spec.parse('1')
with raises(SpecificationViolatedError):
spec.validate(1)
@pytest.mark.parametrize('spec_type, arg', [
(Q, 'm'),
(Value, bool),
(Enumerated, Enum('E', '1,2,3')),
(Computed, lambda self: 1),
(Nested, {'sub1': Q('m', 'sub1 desc'),
'nest': Nested({
'sub2': Q('s', 'sub2 desc')
}, 'inner desc')}),
(Table, {'Name': str})
])
def test_cloning(mock_component, spec_type, arg):
if spec_type is Table:
spec = spec_type(columns=arg, desc='Desc', hidden=True)
else:
spec = spec_type(arg, desc='Desc')
spec.component = mock_component
clone = spec.clone()
assert clone.description == spec.description
assert clone.component is mock_component
clone.component = None
assert spec.component is mock_component
assert clone.hidden == spec.hidden
@pytest.mark.parametrize('spec_type, arg, input, result', [
(Q, 'm', '2 metres', 2 * units.m),
(Value, bool, True, True),
(Value, tuple, (1, '2', 3.4), (1, '2', 3.4)),
(Enumerated, MyEnum, 'v1', MyEnum.v1),
])
def test_yaml_roundtrip(spec_type, arg, input, result):
spec = spec_type(arg, desc='Description')
value = spec.parse(input)
assert value == result
# Now serialise to YAML and | |
<gh_stars>0
"""Parser of expressions."""
# Copyright 2020 by California Institute of Technology
# Copyright (c) 2008-2013 INRIA and Microsoft Corporation
# All rights reserved. Licensed under 3-clause BSD.
#
# This module is based on the file:
#
# <https://github.com/tlaplus/tlapm/blob/main/src/expr/e_parser.ml>
#
import functools
from . import _combinators as pco
from . import _location, _optable
from . import _tla_combinators as intf
from . import tokens
from ._combinators import (
alt,
apply,
apply_question,
attempt,
bang,
choice,
choice_iter,
commit,
enabled,
fail,
first,
get,
lookahead,
optional,
or_,
question,
return_,
second,
second_commit,
sep,
sep1,
shift_eq,
shift_plus,
star,
star1,
succeed,
times,
times2,
use,
using,
)
from .ast import Nodes as tla_ast
# open Ext
# open Property
# open E_t
# open Tla_parser.P
# open Tla_parser
# open Token
#
# module Prop = Property
#
# module Op = Optable
# module B = Builtin
#
# (*let b = ref false*)
#
# let fixities =
# let fixities = Hashtbl.create 109 in
# let infix op prec assoc =
# Opr begin
# prec, Infix begin
# assoc, fun oploc a b ->
# let op = Util.locate op oploc in
# let loc = Loc.merge
# oploc
# (Loc.merge
# (Util.get_locus a)
# (Util.get_locus b)) in
# Util.locate (Apply (op, [a ; b])) loc
# end
# end in
def infix(op, prec, assoc):
def f(oploc, a, b):
# TODO: location annotations
return tla_ast.Apply(op, [a, b])
return pco.Opr(prec, pco.Infix(assoc, f))
# let bin_prod =
# Opr begin
# (10, 13), Infix begin
# Left, fun oploc a b ->
# let loc = Loc.merge
# oploc
# (Loc.merge
# (Util.get_locus a)
# (Util.get_locus b)) in
# Util.locate begin
# match a.core with
# | Product es -> Product (es @ [b])
# | _ -> Product [a ; b]
# end loc
# end
# end in
# let prefix op prec =
# Opr begin
# prec, Prefix begin
# fun oploc a ->
# let op = Util.locate op oploc in
# let loc = Loc.merge oploc (Util.get_locus a) in
# Util.locate (Apply (op, [a])) loc
# end
# end in
def prefix(op, prec):
def f(oploc, a):
# TODO: location annotations
return tla_ast.Apply(op, [a])
return pco.Opr(prec, pco.Prefix(f)) # pco.Prefix
# let postfix op prec =
# Opr begin
# prec, Postfix begin
# fun oploc a ->
# let op = Util.locate op oploc in
# let loc = Loc.merge oploc (Util.get_locus a) in
# Util.locate (Apply (op, [a])) loc
# end
# end
def postfix(op, prec):
def f(oploc, a):
# TODO: location annotations
return tla_ast.Apply(op, [a])
return pco.Opr(prec, pco.Postfix(f)) # pco.Postfix
# in
# Hashtbl.iter begin
# fun form top ->
# Hashtbl.add fixities form begin
# match top.defn with
# | _ -> begin
# let defn = match top.defn with
# | Some bltin -> Internal bltin
# | None -> Opaque top.name
# in match top.fix with
# | Op.Prefix -> prefix defn top.prec
# | Op.Postfix -> postfix defn top.prec
# | Op.Infix ass ->
# infix defn top.prec begin
# match ass with
# | Op.Left -> Left
# | Op.Right -> Right
# | Op.Non -> Non
# end
# | _ ->
# failwith "Nonfix operator in optable?!"
# end
# end
# end Op.optable ;
def _generate_fixities():
fixities = dict()
for form, alternatives in _optable.optable.items():
fixities.setdefault(form, list())
for top in alternatives:
if top.defn is None:
defn = tla_ast.Opaque(top.name)
else:
# defn = tla_ast.Internal(top.defn)
defn = top.defn
if isinstance(top.fix, _optable.Prefix):
res = prefix(defn, top.prec)
elif isinstance(top.fix, _optable.Postfix):
res = postfix(defn, top.prec)
elif isinstance(top.fix, _optable.Infix):
assoc = top.fix.assoc
if isinstance(assoc, _optable.Left):
assoc = pco.Left()
elif isinstance(assoc, _optable.Right):
assoc = pco.Right()
elif isinstance(assoc, _optable.Non):
assoc = pco.Non()
else:
raise ValueError(assoc)
res = infix(defn, top.prec, assoc)
else:
raise ValueError(top.fix)
fixities[form].append(res)
return fixities
# Hashtbl.replace fixities "\\X" bin_prod ;
# Hashtbl.replace fixities "\\times" bin_prod ;
# fixities
fixities = _generate_fixities()
#
# let distinct =
# let module S = Set.Make (String) in
# let rec check seen = function
# | [] -> true
# | v :: vs ->
# not (S.mem v.core seen)
# && check (S.add v.core seen) vs
# in
# fun vs -> check S.empty vs
#
# let hint = locate anyident
def hint():
return intf.locate(intf.anyident())
# let rec expr b = lazy begin
# resolve (expr_or_op b);
# end
def expr(b):
while True:
f = functools.partial(expr_or_op, b)
r = pco.resolve(f)
yield r
# attempt anyop >>+ begin fun p pts ->
# match Hashtbl.find_all fixities p with
# | [] -> fail ("unknown operator " ^ p)
# | ops ->
# let non_test = function
# | Opr (_, Infix (_, ix)) ->
# attempt (punct "("
# >>> (use (expr b)
# <*> (punct "," >>> use (expr b)))
# <<< punct ")")
# (* <<! [Printf.sprintf "args of nonfix_%s" p] *)
# <$> (fun (e, f) -> [P.Atm (ix pts e f)])
# | Opr (_, Postfix ix) ->
# attempt (punct "("
# >>> use (expr b)
# <<< punct ")")
# (* <<! [Printf.sprintf "args of nonfix_%s" p] *)
# <$> (fun e -> [P.Atm (ix pts e)])
# | _ -> fail "Unnonable"
# in
# choice (List.map non_test ops @ [return ops pts])
def choice_fix_operators(b, p, pts):
ops = fixities[p]
assert isinstance(ops, list), ops
if not ops:
return fail(f"unknown operator {p}")
assert ops, ops
def non_test(op):
if isinstance(op, pco.Opr) and isinstance(op.opr, pco.Infix):
ix = op.opr.value
return (
attempt(
intf.punct("(")
<< second
>> use(expr(b))
<< times
>> (intf.punct(",") << second >> use(expr(b)))
<< first
>> intf.punct(")")
)
<< apply
>> (lambda e_f: [pco.Atm(ix(pts, e_f[0], e_f[1]))])
)
elif isinstance(op, pco.Opr) and isinstance(op.opr, pco.Postfix):
ix = op.opr.value
return (
attempt(
intf.punct("(")
<< second
>> use(expr(b))
<< first
>> intf.punct(")")
)
<< apply
>> (lambda e: [pco.Atm(ix(pts, e))])
)
else:
return fail("Unnonable")
new_ops = [non_test(op) for op in ops] + [return_(ops, pts)]
return choice(new_ops)
# (* record fields *)
# if not is_start then
# attempt begin
# locate (punct "." >>> anyname)
# end <$> begin
# fun sw ->
# [ P.Opr begin
# (17, 17),
# P.Postfix begin
# fun _ r ->
# let loc = Loc.merge (Util.get_locus r)
# (Util.get_locus sw) in
# Util.locate (Dot (r, sw.core)) loc
# end
# end ]
# end
# else fail "not a rproj" ;
def record_fields(is_start):
if is_start:
return fail("not a rproj")
assert not is_start, is_start
# def f(sw, _, r):
# return tla_ast.Dot(r, sw)
def f():
return (
attempt(intf.locate(intf.punct(".") << second >> intf.anyname()))
<< apply
>> (
lambda sw: [
pco.Opr(
(17, 17),
# Postfix(functools.partial(f, sw))
pco.Postfix(lambda _, r: tla_ast.Dot(r, sw)),
)
]
)
)
return ((tokens.PUNCT("."),), f)
# (* function arguments *)
#
# if not is_start then
# attempt begin
# locate (
# punct "["
# >>> sep1 (punct ",") (use (expr b))
# <<< punct "]")
# end
# <$> begin
# fun esw ->
# [ P.Opr begin
# (17, 17),
# P.Postfix begin
# fun oploc f ->
# let loc = Loc.merge (Util.get_locus f)
# (Util.get_locus esw) in
# Util.locate (FcnApp (f, esw.core)) loc
# end
# end ]
# end
# else fail "not a farg" ;
def function_arguments(b, is_start):
if is_start:
return fail("not a farg")
def f():
return (
attempt(
intf.locate(
intf.punct("[")
<< second
>> sep1(intf.punct(","), use(expr(b)))
<< first
>> intf.punct("]")
)
)
<< apply
>> (
lambda esw: [
pco.Opr(
(17, 17),
pco.Postfix(lambda oploc, f: tla_ast.FunctionApply(f, esw)),
)
]
)
)
return ((tokens.PUNCT("["),), f)
# if is_start then
# locate begin
# attempt (use (operator b))
# <*> use (opargs b)
# <*> optional (use (subref b))
# end <$> begin
# fun prs ->
# let ((op, args), sr) = prs.core in
# let e = match args with
# | [] -> op
# | _ -> Apply (op, args) @@ prs
# in match sr, op.core with
# | None, Opaque x when x.[0] = '<' ->
# (* A step name is more like an empty subref than an ident. *)
# [ P.Atm (Bang (e, []) @@ prs) ]
# | None, _ -> [ P.Atm e ]
# | Some sr, _ -> [ P.Atm (Bang (e, sr) @@ prs) ]
# end
# else fail "not an opapp" ;
def nonfix_operators(b, is_start):
if not is_start:
return fail("not an opapp")
assert is_start, is_start
def f(prs):
((op, args), sr) = prs
if not args:
e = op
else:
e = tla_ast.Apply(op, args)
if sr is None and isinstance(op, tla_ast.Opaque) and op.name[0] == "<":
return [pco.Atm(tla_ast.Bang(e, list()))]
elif sr is None:
return [pco.Atm(e)]
elif sr is not None:
return [pco.Atm(tla_ast.Bang(e, sr))]
else:
raise ValueError()
return (
intf.locate(
attempt(use(operator(b)))
<< times
>> use(opargs(b))
<< times
>> optional(use(subref(b)))
)
<< apply
>> f
)
# and expr_or_op b is_start =
def expr_or_op(b, is_start):
def choices():
# choice [
#
# (* labels *)
#
# if is_start then
# locate (attempt (use label) <**> use (expr b))
# <$> (function {core = (l, e)} | |
<reponame>bolero2/DeepLearning-dc<gh_stars>1-10
"""
Title : mobilenetv2_1.py
Date : 2019-02-25
Author : <NAME>
Network : MobileNet V2(2D)
Dataset : ImageNet 100 Labels
Dropout : 0.5
"""
import cv2
import os
import random
import numpy as np
import tensorflow as tf
import time
TrainDir = "D:\\Tiny_ImageNet\\Tiny_ImageNet\\Train\\" # 1,537,631 images
EvalDir = "D:\\Tiny_ImageNet\\Tiny_ImageNet\\Test\\" # 119,424 images
# The names of this variables(=ModelDir, ModelName) must come from the script name.
ModelName = "MobileNetv2_dropout_190225"
ModelDir = "D:\\SavedModel\\" + ModelName + "\\"
Filenames_Train = []
Filenames_Eval = []
index_train = 0
index_eval = 0
BatchSize = 64
Total_Train = 1537631
Total_Eval = 119424
# Total_List = np.zeros([16], dtype=int)
# Declaring Image Width and Image Height.
image_Width = 112
image_Height = 112
label_size = 100
channel = 3
exp = 6 # expansion ratio
# Count of Epoch(Epoch 1 ~ Epoch ?)
ForEpoch = 30
dropout_rate = 0.5
def load_image():
global Filenames_Train
global Filenames_Eval
print("###############################################")
print("Start Image Loading ...")
print("###############################################")
templist = []
for i in range(0, label_size):
filelist = os.listdir(TrainDir + str(i))
for j in range(0, len(filelist)):
templist.append([TrainDir + str(i) + '/' + filelist[j], i])
Filenames_Train = templist
random.shuffle(Filenames_Train)
templist = []
for i in range(0, label_size):
filelist = os.listdir(EvalDir + str(i))
for j in range(0, len(filelist)):
templist.append([EvalDir + str(i) + '/' + filelist[j], i])
Filenames_Eval = templist
random.shuffle(Filenames_Eval)
print("Finish Image Loading !")
print("###############################################")
def batch_train(batchsize):
global index_train
x_data = np.zeros([batchsize, image_Width, image_Height, channel], dtype=np.float32)
y_data = np.zeros((batchsize, label_size), dtype=np.float32) # one hot encoding을 위해 0으로 채워진 리스트를 만듭니다
for i in range(0, batchsize):
value = cv2.imread(Filenames_Train[index_train + i][0])
value = value/255
value = cv2.resize(value, (image_Height, image_Width))
x_data[i] = value
y_data[i][Filenames_Train[index_train + i][1]] = 1
index_train += batchsize
if index_train + batchsize >= Total_Train:
index_train = 0
return x_data, y_data
def batch_eval(batchsize):
global index_eval
x_data = np.zeros([batchsize, image_Width, image_Height, channel], dtype=np.float32)
y_data = np.zeros((batchsize, label_size), dtype=np.float32) # one hot encoding을 위해 0으로 채워진 리스트를 만듭니다
for i in range(0, batchsize):
value = cv2.imread(Filenames_Eval[index_eval + i][0])
value = value / 255
value = cv2.resize(value, (image_Height, image_Width))
x_data[i] = value
y_data[i][Filenames_Eval[index_eval + i][1]] = 1
index_eval += batchsize
if index_eval + batchsize >= Total_Eval:
index_eval = 0
return x_data, y_data
def batch_norm(input, n_out, training, scope='bn'):
with tf.variable_scope(scope):
beta = tf.Variable(tf.constant(0.0, shape=[n_out]), name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[n_out]), name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(input, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(training, true_fn=mean_var_with_update,
false_fn=lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
return normed
# ================================================================================================================
def conv2d(input, weight, strides=1, padding='SAME', name=None):
return tf.nn.conv2d(input, weight, strides=[1, strides, strides, 1], padding=padding, name=name)
def conv2d_1x1(input, output_dim, strides=1, padding='SAME', name=None):
input_tensor_channel = input.get_shape().as_list()[-1]
w = tf.Variable(tf.truncated_normal(shape=[1, 1, input_tensor_channel, output_dim], stddev=0.1))
return conv2d(input, w, strides, padding, name)
def pw_conv(input, output_dim, strides=1, padding='SAME', name=None):
return conv2d_1x1(input, output_dim, strides, padding, name)
def dw_conv(input, depth_filter_channel=1, strides=1, padding='SAME', name=None):
input_tensor_channel = input.get_shape().as_list()[-1]
s = [1, strides, strides, 1]
p = padding
w = tf.Variable(tf.truncated_normal(shape=[3, 3, input_tensor_channel, depth_filter_channel], stddev=0.1))
return tf.nn.depthwise_conv2d(input, w, strides=s, padding=p, name=name)
def separable_conv(input, output_dim, depth_filter_channel=1, strides=1, padding='SAME', name=None):
input_tensor_channel = input.get_shape().as_list()[-1]
dw_kernel = tf.Variable(tf.truncated_normal(shape=[3, 3, input_tensor_channel, depth_filter_channel], stddev=0.1))
pw_kernel = tf.Variable(tf.truncated_normal(shape=[1, 1, input_tensor_channel * depth_filter_channel, output_dim],
stddev=0.1))
s = [1, strides, strides, 1]
p = padding
return tf.nn.separable_conv2d(input, dw_kernel, pw_kernel, strides=s, padding=p, name=name)
def bottleneck_conv(input, expansion_ratio, output_dim, istraining, strides=1, padding='SAME', name=None):
input_tensor_channel = input.get_shape().as_list()[-1]
bottleneck_dim = round(expansion_ratio * input_tensor_channel)
p = padding
# step 1. Pointwise Conv
pw1 = tf.nn.relu(batch_norm(pw_conv(input, bottleneck_dim, strides=1, padding=p, name=name),
n_out=bottleneck_dim, training=istraining))
# step 2. Depthwise Conv
dw1 = dw_conv(pw1, strides=strides, padding=p, name=name)
dw2 = batch_norm(dw1, n_out=dw1.get_shape().as_list()[-1], training=istraining)
dw3 = tf.nn.relu(dw2)
# step 3. Pointwise Conv & Linear
pwl1 = pw_conv(dw3, output_dim, strides=1, padding=p, name=name)
pwl2 = batch_norm(pwl1, n_out=output_dim, training=istraining)
output = pwl2
return output
def MobileNetV2(input, weight, b_training, expansion_rate, label_size, keep_prob):
"""
:param input: Input Image
:param weight: Weight
:param b_training: bool variable(istraining, True or False)
:param expansion_rate: expansion rate
:param label_size: label size
:return: Last Fully-Connected Layer
"""
# ================================================================================
# N E T W O R K #
# ================================================================================
C1 = tf.nn.relu(batch_norm(conv2d(input, weight['wc1'], strides=2, padding='SAME'), n_out=32,
training=b_training), name='C1')
print(C1) # 112 * 112
C2 = bottleneck_conv(C1, 1, 16, istraining=b_training, strides=1, padding='SAME', name='C2')
print(C2) # 112 * 112
C3_1 = bottleneck_conv(C2, expansion_rate, 24, istraining=b_training, strides=2, padding='SAME', name='C3_1')
C3_2 = bottleneck_conv(C3_1, expansion_rate, 24, istraining=b_training, strides=1, padding='SAME', name='C3_2')
print(C3_2) # 56 * 56
C4_1 = bottleneck_conv(C3_2, expansion_rate, 32, istraining=b_training, strides=2, padding='SAME', name='C4_1')
C4_2 = bottleneck_conv(C4_1, expansion_rate, 32, istraining=b_training, strides=1, padding='SAME', name='C4_2')
C4_3 = bottleneck_conv(C4_2, expansion_rate, 32, istraining=b_training, strides=1, padding='SAME', name='C4_3')
print(C4_3) # 28 * 28
C5_1 = bottleneck_conv(C4_3, expansion_rate, 64, istraining=b_training, strides=2, padding='SAME', name='C5_1')
C5_2 = bottleneck_conv(C5_1, expansion_rate, 64, istraining=b_training, strides=1, padding='SAME', name='C5_2')
C5_3 = bottleneck_conv(C5_2, expansion_rate, 64, istraining=b_training, strides=1, padding='SAME', name='C5_3')
C5_4 = bottleneck_conv(C5_3, expansion_rate, 64, istraining=b_training, strides=1, padding='SAME', name='C5_4')
print(C5_4) # 14 * 14
C6_1 = bottleneck_conv(C5_4, expansion_rate, 96, istraining=b_training, strides=1, padding='SAME', name='C6_1')
C6_2 = bottleneck_conv(C6_1, expansion_rate, 96, istraining=b_training, strides=1, padding='SAME', name='C6_2')
C6_3 = bottleneck_conv(C6_2, expansion_rate, 96, istraining=b_training, strides=1, padding='SAME', name='C6_3')
print(C6_3) # 14 * 14
C7_1 = bottleneck_conv(C6_3, expansion_rate, 160, istraining=b_training, strides=2, padding='SAME', name='C7_1')
C7_2 = bottleneck_conv(C7_1, expansion_rate, 160, istraining=b_training, strides=1, padding='SAME', name='C7_2')
C7_3 = bottleneck_conv(C7_2, expansion_rate, 160, istraining=b_training, strides=1, padding='SAME', name='C7_3')
print(C7_3) # 7 * 7
C8 = bottleneck_conv(C7_3, expansion_rate, 320, istraining=b_training, strides=1, padding='SAME', name='C8')
print(C8)
C9 = conv2d_1x1(C8, 1280, strides=1, padding='SAME', name='C9')
print(C9)
AVP = tf.nn.avg_pool(C9, ksize=[1, 4, 4, 1], strides=[1, 1, 1, 1], padding='VALID', name='avg_pool')
print(AVP)
C10 = conv2d_1x1(AVP, label_size, strides=1, padding='SAME', name='C10')
print(C10)
C11 = tf.nn.dropout(C10, keep_prob=keep_prob)
FC1 = tf.reshape(C11, shape=[-1, weight['wfc1'].get_shape().as_list()[0]])
print(FC1)
FC2 = tf.matmul(FC1, weight['wfc1'])
print(FC2)
return FC2
# ================================================================================================================
weights = {
# 'wc1' : tf.Variable(tf.truncated_normal([3, 7, 7, channel, 64], stddev=0.1))
'wc1': tf.Variable(tf.truncated_normal([3, 3, channel, 32], stddev=0.1)),
'wfc1': tf.Variable(tf.truncated_normal([1 * 1 * label_size, label_size], stddev=0.1))
# 3D : [frameSize, width, height, input_channel, output_channel]
# 2D : [width, height, input_channel, output_channel]
}
if __name__ == '__main__':
load_image()
X = tf.placeholder(tf.float32, [BatchSize, image_Width, image_Height, channel], name='X')
Y = tf.placeholder(tf.float32, [BatchSize, label_size], name='Y')
istraining = tf.placeholder(tf.bool, name='istraining')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
result = MobileNetV2(input=X,
weight=weights,
b_training=istraining,
expansion_rate=exp,
label_size=label_size,
keep_prob=keep_prob)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=result,
labels=Y))
print("* Cross Entropy SIZE : " + str(cross_entropy))
Result_argmax = tf.argmax(tf.nn.softmax(result), 1)
Label_argmax = tf.argmax(Y, 1)
print("* Result Argmax : ", Result_argmax)
print("* Label Argmax : ", Label_argmax)
ay = tf.argmax(tf.nn.softmax(result), 1)
ly = tf.argmax(tf.nn.softmax(Y), 1)
correct_prediction = tf.equal(Result_argmax, Label_argmax)
print("* tf.argmax : " + str(Result_argmax))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_step = tf.train.AdamOptimizer(0.00001 * BatchSize).minimize(cross_entropy)
accuracy_list = []
accuracy_sum = 0
time_list = []
time_sum = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver() # Network model Save
# save_path = saver.restore(sess, ModelDir + ModelName + "_Epoch_7.ckpt")
# ==========================================================================================================
# Training!
# ==========================================================================================================
for epoch in range(0, ForEpoch): # epoch 1 ~ epoch 20
count = 0
# =============================================
percentage = epoch / ForEpoch
dropout_rate = (dropout_rate * percentage + 0.17)
dropout_rate = round(dropout_rate, 2)
# =============================================
for i in range(int(Total_Train / BatchSize)):
count = count + 1
bx, by = batch_train(BatchSize)
bx = np.reshape(bx, [BatchSize, image_Width, image_Height, channel])
ts, loss, train_accuracy, ra, la = sess.run([train_step, cross_entropy, accuracy, Result_argmax,
Label_argmax], feed_dict={X: bx,
Y: by,
istraining.name: True,
keep_prob: 0.5})
# for r in range(0, 5):
# print("Result Weight [" + str(r * 3 + 0) + "] " + str(RS[0][r * 3 + 0]) + " " +
# "Result Weight [" + str(r * 3 + 1) + "] " + str(RS[0][r * 3 + 1]) + " " +
# "Result Weight [" + str(r * 3 + 2) + "] " + str(RS[0][r * 3 + 2]) + " ")
print('[%d]' % count,
'Epoch %d ' % (epoch + 1),
'Training accuracy %g ' % train_accuracy,
'loss %g ' % loss)
# save_path = saver.save(sess, ModelDir + "\\" + ModelName + "_Epoch_" + str(epoch + 1) + ".ckpt")
count = 0
for j in range(int(Total_Eval / BatchSize)):
count = count + 1
Start = time.time() # For Time Checking!
ex, ey = batch_eval(BatchSize)
ex = np.reshape(ex, [BatchSize, image_Width, image_Height, channel])
loss, l, y, eval_accuracy = sess.run([cross_entropy, ly, ay, accuracy],
feed_dict={X: ex,
Y: ey,
istraining.name: False,
keep_prob: 1.0})
End = time.time() - Start
print("[" + str(count) + "] Epoch", (epoch + 1),
" mini accuracy", | |
of derivative"""
value_type = [mstype.int32, mstype.int64, mstype.float32, mstype.float64]
if not dtype(primals_item) in value_type:
raise TypeError(f"For `F.derivative`, the elements of primals should belong to "
f"`mstype.int32, mstype.int64, mstype.float32, mstype.float64`, but got"
f" {dtype(primals_item).__name__}.")
if dtype(primals_item) in [mstype.int32, mstype.int64]:
return cast(primals_item, mstype.float64)
return primals_item
def derivative(fn, primals, order):
"""
This function is designed to calculate the higher order differentiation of given composite function. To figure out
`order`-th order differentiations, original inputs and order must be provided together. In particular, the value of
input first order derivative is set to 1, while the other to 0.
Args:
fn (Union(Cell, function)): Function to do TaylorOperation.
primals (Union(Tensor, Tuple of Tensors)): The inputs to `fn`.
order (int): For each Tensor, the `order`-th order of derivative of output with respect to the inputs will be
figured out.
Returns:
Tuple, tuple of out_primals and out_series.
- **out_primals** (Tensors or List of Tensors) - The output of `fn(primals)`.
- **out_series** (Tensors or List of Tensors) - The `order`-th order of derivative of output with respect
to the inputs.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> from mindspore import set_context, GRAPH_MODE
>>> import mindspore.ops as P
>>> from mindspore import Tensor
>>> from mindspore.ops.functional import derivative
>>> set_context(mode=GRAPH_MODE)
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
... self.sin = P.Sin()
... self.exp = P.Exp()
... def construct(self, x):
... out1 = self.sin(x)
... out2 = self.exp(out1)
... return out2
>>> primals = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
>>> order = 3
>>> net = Net()
>>> out_primals, out_series = derivative(net, primals, order)
>>> print(out_primals, out_series)
"""
derivative_fn = _taylor(fn)
concat_op = P.Concat()
series_one = 1
if isinstance(primals, tuple):
trans_primals = [_trans_derivative_inputs(item) for item in primals]
inputs = list(map(lambda x: concat_op((expand_dims(x, 0), ones((1,) + x.shape, dtype(x)))), trans_primals))
if order > 1:
inputs = list(map(lambda x: concat_op((x, zeros((order - 1,) + x[0].shape, dtype(x)))), inputs))
outputs = derivative_fn(*inputs)
else:
primals = _trans_derivative_inputs(primals)
series = zeros((order,) + primals.shape, dtype(primals))
series[0] = series_one
inputs = concat_op((expand_dims(primals, 0), series))
outputs = derivative_fn(inputs)
if isinstance(outputs, tuple) and tuple_len(outputs) > 1:
out_primals = [element[0] for element in outputs]
out_series = [element[-1] for element in outputs]
else:
out_primals = outputs[0]
out_series = outputs[-1]
return out_primals, out_series
def jvp(fn, inputs, v):
"""
Compute the jacobian-vector-product of the given network.
Args:
fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single tensor or tuple of
Tensors.
inputs (Union[Tensor, Tuple or List of Tensors]): The inputs to `fn`.
v (Union[Tensor, Tuple or or List of Tensors]): The shape and type of v should be the same as inputs.
Returns:
Tuple, tuple of output and jvp.
- **netout** (Tensor or Tuple of Tensors) - The output of "fn(inputs)".
- **jvp** (Tensor or Tuple of Tensors) - The result of the dot product.
Raises:
TypeError: If the input is not a tensor or tuple or list of tensors.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore.ops import functional as F
>>> from mindspore import Tensor
>>> class Net(nn.Cell):
... def construct(self, x, y):
... return x**3 + y
>>> x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
>>> y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
>>> v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
>>> output = F.jvp(Net(), (x, y), (v, v))
>>> print(output[0])
[[ 2. 10.]
[30. 68.]]
>>> print(output[1])
[[ 4. 13.]
[28. 49.]]
"""
jvp_inner = _JvpInner()
@ms_function(hash_args=fn)
def _wrap_container(*arg):
args = arg[1:]
vectors = arg[0]
return jvp_inner(fn, vectors, *args)
if not isinstance(inputs, (Tensor, tuple, list)) or not isinstance(v, (Tensor, tuple, list)):
_raise_type_error()
if isinstance(v, list):
v = tuple(v)
if isinstance(inputs, (tuple, list)):
return _wrap_container(v, *inputs)
return _wrap_container(v, inputs)
def vjp(fn, inputs, v):
"""
Compute the vector-jacobian-product of the given network.
Args:
fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single tensor or tuple of
Tensors.
inputs (Union[Tensor, Tuple or List of Tensors]): The inputs to `fn`.
v (Union[Tensor, Tuple or List of Tensors]): The shape and type of v should be the same as outputs.
Returns:
Tuple, tuple of output and vjp.
- **netout** (Tensor or Tuple of Tensors) - The output of "fn(inputs)".
- **vjp** (Tensor or Tuple of Tensors) - The result of the dot product.
Raises:
TypeError: If the input is not a tensor or tuple or list of tensors.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore.ops import functional as F
>>> from mindspore import Tensor
>>> class Net(nn.Cell):
... def construct(self, x, y):
... return x**3 + y
>>> x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
>>> y = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
>>> v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
>>> output = F.vjp(Net(), (x, y), v)
>>> print(output[0])
[[ 2. 10.]
[30. 68.]]
>>> print(output[1])
(Tensor(shape=[2, 2], dtype=Float32, value=
[[ 3.00000000e+00, 1.20000000e+01],
[ 2.70000000e+01, 4.80000000e+01]]), Tensor(shape=[2, 2], dtype=Float32, value=
[[ 1.00000000e+00, 1.00000000e+00],
[ 1.00000000e+00, 1.00000000e+00]]))
"""
vjp_inner = _VjpInner()
@ms_function(hash_args=fn)
def wrap_container(*arg):
args = arg[:-1]
vectors = arg[-1]
return vjp_inner(fn, *args, vectors)
if not isinstance(inputs, (Tensor, tuple, list)) or not isinstance(v, (Tensor, tuple, list)):
_raise_type_error()
if isinstance(v, list):
v = tuple(v)
if isinstance(inputs, (tuple, list)):
return wrap_container(*inputs, v)
return wrap_container(inputs, v)
shard_fn = Shard()
def shard(fn, in_strategy, out_strategy, device="Ascend", level=0):
return shard_fn(fn, in_strategy, out_strategy, device, level)
def arange(start=0, stop=None, step=1, rtype=None):
"""
Returns evenly spaced values within a given interval.
Args:
start(Union[int, float]): Start value of interval. The interval includes this value. When
`stop` is None, `start` must be greater than 0, and the interval is :math:`[0, start)`.
When `stop` is not None, `start` must be less than `stop`.
stop(Union[int, float], optional): End value of interval. The interval does not
include this value. Default is None.
step(Union[int, float], optional): Spacing between values. For any output
`out`, this is the distance between two adjacent values, :math:`out[i+1] - out[i]`.
The default step size is 1. If `step` is specified as a position argument,
`start` must also be given.
rtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor type.
If rtype is None, the data type of the new tensor will be inferred from start,
stop and step. Default is None.
Returns:
Tensor with evenly spaced values.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input arguments have values not specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.ops as ops
>>> print(ops.arange(0, 5, 1))
[0 1 2 3 4]
>>> print(ops.arange(3))
[0 1 2]
>>> print(ops.arange(start=0, stop=3))
[0 1 2]
>>> print(ops.arange(0, stop=3, step=0.5))
[0. 0.5 1. 1.5 2. 2.5]
"""
if stop is None:
start, stop = 0, start
arg_map = {"start": start, "stop": stop, "step": step}
for arg in ("start", "stop", "step"):
arg_value = arg_map.get(arg)
if not isinstance(arg_value, int) and not isinstance(arg_value, float):
_raise_arange_type_error(arg, arg_value)
if start >= stop:
_raise_arange_value_error(start, stop)
if rtype is None:
if isinstance(start, float) or isinstance(stop, float) or isinstance(step, float):
rtype = mstype.float32
else:
rtype = mstype.int32
data = _arange(start, stop, step)
return _make_tensor(data, rtype)
@constexpr
def _make_tensor(data, rtype):
"""Make Tensor"""
return Tensor(data, dtype=rtype)
@constexpr
def _arange(start, stop, step):
"""Arange compute"""
return np.arange(start, stop, step)
@constexpr
def _raise_arange_type_error(arg, arg_value):
"""
Raise TypeError in both graph/pynative mode.
"""
raise TypeError("For mindspore.ops.arange, the argument '{}' must be int or float, but got {}."
.format(arg, type(arg_value)))
@constexpr
def _raise_arange_value_error(start, stop):
"""
Raise TypeError in both graph/pynative mode
"""
raise ValueError("For mindspore.ops.arange, the argument 'start' must be < 'stop', but got 'start': {}, "
"'stop': {}.".format(start, stop))
def narrow(inputs, axis, start, length):
"""
Returns a narrowed tensor from input tensor.
The dimension axis is input from start to start + length.
Args:
inputs (Tensor): the tensor to narrow.
axis (int): the axis along which to narrow.
start (int): the starting dimension.
length (int): the distance to the ending dimension.
Returns:
Tensor.
- output (Tensors) - The narrowed tensor.
Raises:
TypeError: If the input is not a tensor or tuple or list of tensors.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import | |
<filename>sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/operations/_replication_protected_items_operations.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ReplicationProtectedItemsOperations(object):
"""ReplicationProtectedItemsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicessiterecovery.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_replication_protection_containers(
self,
fabric_name, # type: str
protection_container_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ReplicationProtectedItemCollection"]
"""Gets the list of Replication protected items.
Gets the list of ASR replication protected items in the protection container.
:param fabric_name: Fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReplicationProtectedItemCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItemCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItemCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_replication_protection_containers.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItemCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_replication_protection_containers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems'} # type: ignore
def get(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ReplicationProtectedItem"
"""Gets the details of a Replication protected item.
Gets the details of an ASR replication protected item.
:param fabric_name: Fabric unique name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReplicationProtectedItem, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def _create_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
input, # type: "_models.EnableProtectionInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'EnableProtectionInput')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def begin_create(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
input, # type: "_models.EnableProtectionInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Enables protection.
The operation to create an ASR replication protected item (Enable replication).
:param fabric_name: Name of the fabric.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: A name for the replication protected item.
:type replicated_protected_item_name: str
:param input: Enable Protection Input.
:type input: ~azure.mgmt.recoveryservicessiterecovery.models.EnableProtectionInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
input=input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}'} # type: ignore
def _purge_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
# Construct URL
url = self._purge_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, | |
1.788007E+06, 1.817455E+06, 1.847247E+06, 1.877383E+06, 1.907867E+06,
])
# ---------------------- M = 2, I = 3 ---------------------------
M = 2
I = 3
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
2.260550E+00, 3.809072E+01, 7.584727E+01, 1.136063E+02, 1.513692E+02, 1.891574E+02,
2.270552E+02, 2.652444E+02, 3.039959E+02, 3.436341E+02, 3.845016E+02, 4.269345E+02,
4.712510E+02, 5.177476E+02, 5.667010E+02, 6.183711E+02, 6.730046E+02, 7.308391E+02,
7.921059E+02, 8.570332E+02, 9.258477E+02, 9.987770E+02, 1.076051E+03, 1.157902E+03,
1.244567E+03, 1.336290E+03, 1.433318E+03, 1.535906E+03, 1.644315E+03, 1.758814E+03,
1.879680E+03, 2.007196E+03, 2.141654E+03, 2.283356E+03, 2.432611E+03, 2.589737E+03,
2.755060E+03, 2.928918E+03, 3.111655E+03, 3.303627E+03, 3.505199E+03, 3.716746E+03,
3.938651E+03, 4.171310E+03, 4.415128E+03, 4.670521E+03, 4.937913E+03, 5.217743E+03,
5.510457E+03, 5.816514E+03, 6.136385E+03, 6.470548E+03, 6.819498E+03, 7.183738E+03,
7.563783E+03, 7.960160E+03, 8.373410E+03, 8.804082E+03, 9.252740E+03, 9.719961E+03,
1.020633E+04, 1.071245E+04, 1.123894E+04, 1.178641E+04, 1.235551E+04, 1.294689E+04,
1.356122E+04, 1.419917E+04, 1.486143E+04, 1.554872E+04, 1.626173E+04, 1.700122E+04,
1.776792E+04, 1.856258E+04, 1.938600E+04, 2.023894E+04, 2.112221E+04, 2.203662E+04,
2.298300E+04, 2.396219E+04, 2.497505E+04, 2.602245E+04, 2.710526E+04, 2.822438E+04,
2.938072E+04, 3.057522E+04, 3.180880E+04, 3.308241E+04, 3.439702E+04, 3.575361E+04,
3.715316E+04, 3.859669E+04, 4.008520E+04, 4.161973E+04, 4.320132E+04, 4.483101E+04,
4.650988E+04, 4.823901E+04, 5.001948E+04, 5.185240E+04, 5.373887E+04, 5.568001E+04,
5.767697E+04, 5.973088E+04, 6.184289E+04, 6.401416E+04, 6.624588E+04, 6.853920E+04,
7.089533E+04, 7.331546E+04, 7.580078E+04, 7.835251E+04, 8.097186E+04, 8.366006E+04,
8.641834E+04, 8.924792E+04, 9.215004E+04, 9.512594E+04, 9.817687E+04, 1.013041E+05,
1.045088E+05, 1.077923E+05, 1.111559E+05, 1.146007E+05, 1.181281E+05, 1.217393E+05,
1.254356E+05, 1.292182E+05, 1.330883E+05, 1.370473E+05, 1.410964E+05, 1.452368E+05,
1.494697E+05, 1.537965E+05, 1.582183E+05, 1.627364E+05, 1.673520E+05, 1.720663E+05,
1.768805E+05, 1.817959E+05, 1.868137E+05, 1.919349E+05, 1.971610E+05, 2.024929E+05,
2.079318E+05, 2.134791E+05, 2.191356E+05, 2.249027E+05, 2.307815E+05, 2.367730E+05,
2.428784E+05, 2.490987E+05, 2.554351E+05, 2.618886E+05, 2.684603E+05, 2.751512E+05,
2.819624E+05, 2.888948E+05, 2.959496E+05, 3.031276E+05, 3.104298E+05, 3.178573E+05,
3.254109E+05, 3.330916E+05, 3.409004E+05, 3.488380E+05, 3.569055E+05, 3.651036E+05,
3.734333E+05, 3.818953E+05, 3.904905E+05, 3.992197E+05, 4.080837E+05, 4.170833E+05,
4.262192E+05, 4.354922E+05,
])
# ---------------------- M = 2, I = 4 ---------------------------
M = 2
I = 4
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.325894E+01, 2.223057E+02, 4.426057E+02, 6.629202E+02, 8.832573E+02, 1.103737E+03,
1.324841E+03, 1.547611E+03, 1.773607E+03, 2.004700E+03, 2.242871E+03, 2.490065E+03,
2.748119E+03, 3.018750E+03, 3.303554E+03, 3.604030E+03, 3.921599E+03, 4.257630E+03,
4.613454E+03, 4.990381E+03, 5.389715E+03, 5.812764E+03, 6.260847E+03, 6.735300E+03,
7.237487E+03, 7.768800E+03, 8.330662E+03, 8.924535E+03, 9.551919E+03, 1.021436E+04,
1.091343E+04, 1.165077E+04, 1.242805E+04, 1.324700E+04, 1.410938E+04, 1.501703E+04,
1.597182E+04, 1.697567E+04, 1.803057E+04, 1.913855E+04, 2.030170E+04, 2.152217E+04,
2.280215E+04, 2.414392E+04, 2.554979E+04, 2.702215E+04, 2.856342E+04, 3.017611E+04,
3.186279E+04, 3.362608E+04, 3.546867E+04, 3.739332E+04, 3.940283E+04, 4.150010E+04,
4.368808E+04, 4.596979E+04, 4.834831E+04, 5.082679E+04, 5.340848E+04, 5.609665E+04,
5.889468E+04, 6.180600E+04, 6.483413E+04, 6.798265E+04, 7.125521E+04, 7.465556E+04,
7.818749E+04, 8.185488E+04, 8.566170E+04, 8.961199E+04, 9.370984E+04, 9.795946E+04,
1.023651E+05, 1.069311E+05, 1.116619E+05, 1.165621E+05, 1.216361E+05, 1.268886E+05,
1.323244E+05, 1.379484E+05, 1.437653E+05, 1.497802E+05, 1.559982E+05, 1.624244E+05,
1.690639E+05, 1.759222E+05, 1.830045E+05, 1.903164E+05, 1.978633E+05, 2.056509E+05,
2.136848E+05, 2.219708E+05, 2.305147E+05, 2.393225E+05, 2.484001E+05, 2.577535E+05,
2.673889E+05, 2.773125E+05, 2.875304E+05, 2.980491E+05, 3.088749E+05, 3.200142E+05,
3.314736E+05, 3.432596E+05, 3.553788E+05, 3.678379E+05, 3.806436E+05, 3.938026E+05,
4.073219E+05, 4.212082E+05, 4.354685E+05, 4.501096E+05, 4.651387E+05, 4.805627E+05,
4.963887E+05, 5.126236E+05, 5.292748E+05, 5.463492E+05, 5.638540E+05, 5.817965E+05,
6.001837E+05, 6.190230E+05, 6.383214E+05, 6.580863E+05, 6.783248E+05, 6.990442E+05,
7.202517E+05, 7.419545E+05, 7.641599E+05, 7.868749E+05, 8.101069E+05, 8.338629E+05,
8.581502E+05, 8.829758E+05, 9.083470E+05, 9.342706E+05, 9.607539E+05, 9.878038E+05,
1.015427E+06, 1.043631E+06, 1.072423E+06, 1.101809E+06, 1.131795E+06, 1.162390E+06,
1.193600E+06, 1.225430E+06, 1.257889E+06, 1.290982E+06, 1.324715E+06, 1.359096E+06,
1.394131E+06, 1.429826E+06, 1.466187E+06, 1.503220E+06, 1.540932E+06, 1.579328E+06,
1.618415E+06, 1.658198E+06, 1.698683E+06, 1.739876E+06, 1.781782E+06, 1.824407E+06,
1.867756E+06, 1.911835E+06, 1.956650E+06, 2.002204E+06, 2.048503E+06, 2.095553E+06,
2.143358E+06, 2.191923E+06, 2.241253E+06, 2.291353E+06, 2.342226E+06, 2.393878E+06,
2.446312E+06, 2.499534E+06,
])
# ---------------------- M = 2, I = 5 ---------------------------
M = 2
I = 5
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
4.521080E+00, 7.618089E+01, 1.516935E+02, 2.272111E+02, 3.027382E+02, 3.783295E+02,
4.541900E+02, 5.307384E+02, 6.085758E+02, 6.884015E+02, 7.709370E+02, 8.568792E+02,
9.468802E+02, 1.041544E+03, 1.141433E+03, 1.247075E+03, 1.358972E+03, 1.477611E+03,
1.603466E+03, 1.737009E+03, 1.878710E+03, 2.029043E+03, 2.188489E+03, 2.357536E+03,
2.536684E+03, 2.726445E+03, 2.927343E+03, 3.139917E+03, 3.364724E+03, 3.602332E+03,
3.853330E+03, 4.118322E+03, 4.397931E+03, 4.692798E+03, 5.003582E+03, 5.330962E+03,
5.675636E+03, 6.038323E+03, 6.419760E+03, 6.820707E+03, 7.241943E+03, 7.684269E+03,
8.148509E+03, 8.635506E+03, 9.146128E+03, 9.681264E+03, 1.024183E+04, 1.082875E+04,
1.144299E+04, 1.208554E+04, 1.275739E+04, 1.345959E+04, 1.419317E+04, 1.495924E+04,
1.575889E+04, 1.659324E+04, 1.746347E+04, 1.837075E+04, 1.931629E+04, 2.030133E+04,
2.132713E+04, 2.239497E+04, 2.350618E+04, 2.466211E+04, 2.586412E+04, 2.711361E+04,
2.841201E+04, 2.976079E+04, 3.116142E+04, 3.261543E+04, 3.412435E+04, 3.568975E+04,
3.731326E+04, 3.899648E+04, 4.074109E+04, 4.254878E+04, 4.442127E+04, 4.636031E+04,
4.836769E+04, 5.044520E+04, 5.259471E+04, 5.481807E+04, 5.711720E+04, 5.949401E+04,
6.195048E+04, 6.448859E+04, 6.711037E+04, 6.981786E+04, 7.261315E+04, 7.549834E+04,
7.847557E+04, 8.154701E+04, 8.471484E+04, 8.798130E+04, 9.134862E+04, 9.481909E+04,
9.839500E+04, 1.020787E+05, 1.058725E+05, 1.097788E+05, 1.138000E+05, 1.179385E+05,
1.221969E+05, 1.265774E+05, 1.310827E+05, 1.357152E+05, 1.404776E+05, 1.453722E+05,
1.504017E+05, 1.555688E+05, 1.608759E+05, 1.663257E+05, 1.719208E+05, 1.776640E+05,
1.835577E+05, 1.896048E+05, 1.958079E+05, 2.021696E+05, 2.086927E+05, 2.153799E+05,
2.222339E+05, 2.292574E+05, 2.364532E+05, 2.438239E+05, 2.513722E+05, 2.591010E+05,
2.670130E+05, 2.751108E+05, 2.833972E+05, 2.918749E+05, 3.005467E+05, 3.094152E+05,
3.184831E+05, 3.277532E+05, 3.372281E+05, 3.469105E+05, 3.568031E+05, 3.669086E+05,
3.772295E+05, 3.877685E+05, 3.985282E+05, 4.095113E+05, 4.207202E+05, 4.321576E+05,
4.438260E+05, 4.557279E+05, 4.678659E+05, 4.802424E+05, 4.928599E+05, 5.057207E+05,
5.188275E+05, 5.321824E+05, 5.457879E+05, 5.596464E+05, 5.737601E+05, 5.881313E+05,
6.027624E+05, 6.176554E+05, 6.328126E+05, 6.482363E+05, 6.639284E+05, 6.798912E+05,
6.961267E+05, 7.126369E+05, 7.294239E+05, 7.464896E+05, 7.638361E+05, 7.814651E+05,
7.993786E+05, 8.175784E+05, 8.360663E+05, 8.548442E+05, 8.739137E+05, 8.932765E+05,
9.129343E+05, 9.328888E+05,
])
# ---------------------- M = 2, I = 6 ---------------------------
M = 2
I = 6
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
2.651606E+01, 4.445925E+02, 8.851739E+02, 1.325785E+03, 1.766449E+03, 2.207477E+03,
2.650035E+03, 3.096522E+03, 3.550398E+03, 4.015699E+03, 4.496597E+03, 4.997122E+03,
5.521046E+03, 6.071862E+03, 6.652812E+03, 7.266940E+03, 7.917143E+03, 8.606215E+03,
9.336891E+03, 1.011188E+04, 1.093387E+04, 1.180560E+04, 1.272982E+04, 1.370933E+04,
1.474700E+04, 1.584577E+04, 1.700865E+04, 1.823873E+04, 1.953919E+04, 2.091330E+04,
2.236443E+04, 2.389604E+04, 2.551171E+04, 2.721511E+04, 2.901001E+04, 3.090030E+04,
3.288998E+04, 3.498317E+04, 3.718410E+04, 3.949710E+04, 4.192665E+04, 4.447733E+04,
4.715386E+04, 4.996106E+04, 5.290390E+04, 5.598748E+04, 5.921700E+04, 6.259783E+04,
6.613544E+04, 6.983547E+04, 7.370367E+04, 7.774594E+04, 8.196832E+04, 8.637699E+04,
9.097828E+04, 9.577865E+04, 1.007847E+05, 1.060033E+05, 1.114413E+05, 1.171057E+05,
1.230038E+05, 1.291430E+05, 1.355307E+05, 1.421748E+05, 1.490831E+05, 1.562635E+05,
1.637242E+05, 1.714737E+05, 1.795203E+05, 1.878728E+05, 1.965400E+05, 2.055309E+05,
2.148547E+05, 2.245208E+05, 2.345385E+05, 2.449177E+05, 2.556682E+05, 2.668000E+05,
2.783233E+05, 2.902486E+05, 3.025862E+05, 3.153471E+05, 3.285420E+05, 3.421821E+05,
3.562785E+05, 3.708427E+05, 3.858862E+05, 4.014209E+05, 4.174586E+05, 4.340114E+05,
4.510916E+05, 4.687115E+05, 4.868838E+05, 5.056212E+05, 5.249365E+05, 5.448429E+05,
5.653535E+05, 5.864817E+05, 6.082409E+05, 6.306448E+05, 6.537072E+05, 6.774420E+05,
7.018632E+05, 7.269850E+05, 7.528217E+05, 7.793877E+05, 8.066975E+05, 8.347658E+05,
8.636073E+05, 8.932368E+05, 9.236693E+05, 9.549198E+05, 9.870035E+05, 1.019935E+06,
1.053731E+06, 1.088405E+06, 1.123974E+06, 1.160452E+06, 1.197856E+06, 1.236200E+06,
1.275501E+06, 1.315774E+06, 1.357034E+06, 1.399297E+06, 1.442580E+06, 1.486897E+06,
1.532264E+06, 1.578697E+06, 1.626211E+06, 1.674823E+06, 1.724548E+06, 1.775400E+06,
1.827397E+06, 1.880553E+06, 1.934884E+06, 1.990405E+06, 2.047132E+06, 2.105079E+06,
2.164262E+06, 2.224697E+06, 2.286397E+06, 2.349379E+06, 2.413656E+06, 2.479244E+06,
2.546157E+06, 2.614410E+06, 2.684016E+06, 2.754992E+06, 2.827349E+06, 2.901104E+06,
2.976268E+06, 3.052857E+06, 3.130883E+06, 3.210361E+06, 3.291303E+06, 3.373723E+06,
3.457633E+06, 3.543047E+06, 3.629976E+06, 3.718434E+06, 3.808433E+06, 3.899985E+06,
3.993102E+06, 4.087795E+06, 4.184076E+06, 4.281956E+06, 4.381448E+06, 4.482561E+06,
4.585306E+06, 4.689695E+06, 4.795737E+06, 4.903442E+06, 5.012821E+06, 5.123884E+06,
5.236639E+06, 5.351097E+06,
])
# ---------------------- M = 2, I = 7 ---------------------------
M = 2
I = 7
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[0]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.250890E+00, 2.020899E+01, 4.025104E+01, 6.029436E+01, 8.033986E+01, 1.003996E+02,
1.205209E+02, 1.408040E+02, 1.613971E+02, 1.824767E+02, 2.042283E+02, 2.268340E+02,
2.504658E+02, 2.752848E+02, 3.014409E+02, 3.290757E+02, 3.583237E+02, 3.893145E+02,
4.221748E+02, 4.570293E+02, 4.940023E+02, 5.332187E+02, 5.748044E+02, 6.188873E+02,
6.655979E+02, 7.150694E+02, 7.674382E+02, 8.228445E+02, 8.814321E+02, 9.433489E+02,
1.008747E+03, 1.077783E+03, 1.150618E+03, 1.227417E+03, 1.308352E+03, 1.393596E+03,
1.483332E+03, 1.577744E+03, 1.677024E+03, 1.781366E+03, 1.890973E+03, 2.006051E+03,
2.126813E+03, 2.253477E+03, 2.386266E+03, 2.525410E+03, 2.671145E+03, 2.823711E+03,
2.983356E+03, 3.150333E+03, 3.324902E+03, 3.507329E+03, 3.697886E+03, 3.896851E+03,
4.104509E+03, 4.321152E+03, 4.547077E+03, 4.782590E+03, 5.028001E+03, 5.283630E+03,
5.549800E+03, 5.826843E+03, 6.115099E+03, 6.414913E+03, 6.726639E+03, 7.050636E+03,
7.387272E+03, 7.736920E+03, 8.099964E+03, 8.476792E+03, 8.867801E+03, 9.273394E+03,
9.693984E+03, 1.012999E+04, 1.058184E+04, 1.104996E+04, 1.153481E+04, 1.203682E+04,
1.255646E+04, 1.309419E+04, 1.365049E+04, 1.422584E+04, 1.482072E+04, 1.543563E+04,
1.607109E+04, 1.672760E+04, 1.740569E+04, 1.810588E+04, 1.882871E+04, 1.957473E+04,
2.034450E+04, 2.113858E+04, 2.195754E+04, 2.280195E+04, 2.367242E+04, 2.456953E+04,
2.549389E+04, 2.644612E+04, 2.742683E+04, 2.843666E+04, 2.947625E+04, 3.054624E+04,
3.164730E+04, 3.278009E+04, 3.394529E+04, 3.514357E+04, 3.637564E+04, 3.764218E+04,
3.894392E+04, 4.028157E+04, 4.165585E+04, 4.306751E+04, 4.451729E+04, 4.600595E+04,
4.753424E+04, 4.910294E+04, 5.071284E+04, 5.236471E+04, 5.405937E+04, 5.579761E+04,
5.758027E+04, 5.940815E+04, 6.128210E+04, 6.320296E+04, 6.517158E+04, 6.718883E+04,
6.925557E+04, 7.137269E+04, 7.354107E+04, 7.576161E+04, 7.803522E+04, 8.036280E+04,
8.274529E+04, 8.518362E+04, 8.767872E+04, 9.023154E+04, 9.284305E+04, 9.551420E+04,
9.824598E+04, 1.010394E+05, 1.038953E+05, 1.068149E+05, 1.097991E+05, 1.128489E+05,
1.159653E+05, 1.191494E+05, 1.224023E+05, 1.257249E+05, 1.291182E+05, 1.325835E+05,
1.361217E+05, 1.397340E+05, 1.434213E+05, 1.471849E+05, 1.510257E+05, 1.549449E+05,
1.589437E+05, 1.630231E+05, 1.671842E+05, 1.714282E+05, 1.757562E+05, 1.801694E+05,
1.846689E+05, 1.892559E+05, 1.939315E+05, 1.986969E+05, 2.035533E+05, 2.085017E+05,
2.135436E+05, 2.186799E+05, 2.239119E+05, 2.292408E+05, 2.346678E+05, 2.401941E+05,
2.458208E+05, 2.515493E+05, 2.573807E+05, 2.633163E+05, 2.693572E+05, 2.755047E+05,
2.817600E+05, 2.881243E+05, 2.945989E+05, 3.011850E+05, 3.078838E+05, 3.146966E+05,
3.216246E+05, 3.286691E+05, 3.358312E+05, 3.431123E+05, 3.505135E+05, 3.580362E+05,
3.656815E+05, 3.734508E+05, 3.813451E+05, 3.893659E+05, 3.975143E+05, 4.057916E+05,
4.141989E+05, 4.227377E+05, 4.314090E+05, 4.402142E+05, 4.491544E+05, 4.582309E+05,
4.674450E+05, 4.767978E+05, 4.862906E+05, 4.959246E+05, 5.057011E+05, 5.156212E+05,
5.256861E+05, 5.358972E+05, 5.462555E+05, 5.567623E+05, 5.674188E+05, 5.782262E+05,
5.891856E+05, 6.002984E+05, 6.115655E+05, 6.229883E+05, 6.345679E+05, 6.463054E+05,
6.582021E+05, 6.702590E+05, 6.824774E+05, 6.948584E+05, 7.074030E+05, 7.201125E+05,
7.329880E+05, 7.460305E+05, 7.592412E+05, 7.726213E+05, 7.861717E+05, 7.998937E+05,
8.137882E+05, 8.278563E+05, 8.420992E+05, 8.565179E+05, 8.711134E+05, 8.858868E+05,
9.008391E+05, 9.159714E+05, 9.312846E+05, 9.467798E+05, 9.624580E+05, 9.783202E+05,
9.943673E+05, 1.010600E+06, 1.027020E+06, 1.043628E+06, 1.060425E+06,
])
# ---------------------- M = 2, I = 8 ---------------------------
M = 2
I = 8
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[2]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
1.391189E+01, 2.356850E+02, 4.693664E+02, 7.030628E+02, 9.367842E+02, 1.170667E+03,
1.405248E+03, 1.641677E+03, 1.881655E+03, 2.127217E+03, 2.380506E+03, 2.643624E+03,
2.918557E+03, 3.207165E+03, 3.511177E+03, 3.832224E+03, 4.171854E+03, 4.531559E+03,
4.912793E+03, 5.316991E+03, 5.745580E+03, 6.199992E+03, 6.681674E+03, 7.192093E+03,
7.732745E+03, 8.305157E+03, 8.910895E+03, 9.551564E+03, 1.022881E+04, 1.094434E+04,
1.169988E+04, 1.249723E+04, 1.333824E+04, 1.422480E+04, 1.515886E+04, 1.614244E+04,
1.717760E+04, 1.826646E+04, 1.941121E+04, 2.061408E+04, 2.187739E+04, 2.320350E+04,
2.459484E+04, 2.605391E+04, 2.758326E+04, 2.918551E+04, 3.086338E+04, 3.261960E+04,
3.445702E+04, 3.637852E+04, 3.838709E+04, 4.048575E+04, 4.267762E+04, 4.496589E+04,
4.735382E+04, 4.984473E+04, 5.244204E+04, 5.514923E+04, 5.796986E+04, 6.090757E+04,
6.396608E+04, 6.714918E+04, 7.046075E+04, 7.390475E+04, 7.748521E+04, 8.120625E+04,
8.507207E+04, 8.908696E+04, 9.325528E+04, 9.758148E+04, 1.020701E+05, 1.067258E+05,
1.115532E+05, 1.165571E+05, 1.217425E+05, 1.271142E+05, 1.326774E+05, 1.384371E+05,
1.443986E+05, 1.505672E+05, 1.569483E+05, 1.635474E+05, 1.703700E+05, 1.774218E+05,
1.847086E+05, 1.922361E+05, 2.000103E+05, 2.080372E+05, 2.163229E+05, 2.248735E+05,
2.336952E+05, 2.427945E+05, 2.521776E+05, 2.618512E+05, 2.718217E+05, 2.820959E+05,
2.926803E+05, 3.035819E+05, 3.148074E+05, 3.263639E+05, 3.382582E+05, 3.504976E+05,
3.630890E+05, 3.760398E+05, 3.893571E+05, 4.030483E+05, 4.171208E+05, 4.315819E+05,
4.464392E+05, 4.617002E+05, 4.773724E+05, 4.934635E+05, 5.099811E+05, 5.269329E+05,
5.443266E+05, 5.621700E+05, 5.804710E+05, 5.992372E+05, 6.184765E+05, 6.381968E+05,
6.584059E+05, 6.791119E+05, 7.003224E+05, 7.220455E+05, 7.442890E+05, 7.670610E+05,
7.903691E+05, 8.142215E+05, 8.386259E+05, 8.635903E+05, 8.891225E+05, 9.152303E+05,
9.419216E+05, 9.692043E+05, 9.970860E+05, 1.025575E+06, 1.054678E+06, 1.084403E+06,
1.114758E+06, 1.145751E+06, 1.177388E+06, 1.209678E+06, 1.242628E+06, 1.276246E+06,
1.310538E+06, 1.345512E+06, 1.381175E+06, 1.417535E+06, 1.454598E+06, 1.492372E+06,
1.530863E+06, 1.570079E+06, 1.610026E+06, 1.650710E+06, 1.692140E+06, 1.734320E+06,
1.777258E+06, 1.820960E+06, 1.865433E+06, 1.910682E+06, 1.956714E+06, 2.003535E+06,
2.051150E+06, 2.099566E+06, 2.148789E+06, 2.198824E+06, 2.249676E+06, 2.301352E+06,
2.353856E+06, 2.407194E+06, 2.461371E+06, 2.516392E+06, 2.572262E+06, 2.628986E+06,
2.686569E+06, 2.745015E+06,
])
# ---------------------- M = 2, I = 9 ---------------------------
M = 2
I = 9
TIPS_2017_ISOT_HASH[(M,I)] = TIPS_2017_ISOT[0]
TIPS_2017_ISOQ_HASH[(M,I)] = float64([
4.124397E+01, 6.874652E+02, 1.368916E+03, 2.050410E+03, 2.731976E+03, 3.413998E+03,
4.098007E+03, 4.787289E+03, 5.486743E+03, 6.202233E+03, 6.939947E+03, 7.705959E+03,
8.506016E+03, 9.345477E+03, 1.022934E+04, 1.116230E+04, 1.214882E+04, 1.319320E+04,
1.429960E+04, 1.547216E+04, 1.671497E+04, 1.803216E+04, 1.942786E+04, 2.090629E+04,
2.247174E+04, 2.412860E+04, 2.588134E+04, 2.773459E+04, 2.969305E+04, 3.176160E+04,
3.394522E+04, 3.624906E+04, | |
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from scipy import stats
import statsmodels.api as sm
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.feature_selection import f_regression, SelectKBest, f_classif
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def correlations(data, y, xs):
"""
Computes Pearsons and Spearman correlation coefficient.
Parameters
----------
data: Pandas Data Frame
y: Target/Dependent variable - has to be python string object
xs: Features/Independent variables - python list of string objects
Returns
------
df: Pandas Data Frame Object
"""
if data is None:
raise ValueError(
"The parameter 'data' must be assigned a non-nil reference to a Pandas DataFrame")
if (y is None) or (xs is None):
raise ValueError(
"The parameter `y` or `xs` has to be non-nil reference")
if not isinstance(data, pd.DataFrame):
raise ValueError("`data` - has to be Pandas DataFrame object")
if not isinstance(y, str):
raise ValueError("`data` - has to be Python string object")
if not isinstance(xs, list):
raise ValueError("`xs` - has to be Python list object")
rs = []
rhos = []
for x in xs:
r = stats.pearsonr(data[y], data[x])[0]
rs.append(r)
rho = stats.spearmanr(data[y], data[x])[0]
rhos.append(rho)
return pd.DataFrame({"feature": xs, "r": rs, "rho": rhos})
def hist_plot(df, col_name, title=None, xlabel=None, ylabel='Density'):
"""
Plot's histogram
Parameters
----------
df : Pandas Data Frame
col_name : column name in data frame
title : Plot title
xlabel : X-axis label
ylabel : Y-axis label
"""
fig = plt.figure(figsize=(10, 6)) # define plot area
ax = fig.add_subplot(111) # add single subplot
sns.distplot(df[col_name], ax=ax) # Use seaborn plot
if not title:
title = 'Histogram of {}'.format(col_name)
ax.set_title(title) # Give the plot a main title
if not xlabel:
xlabel = col_name
ax.set_xlabel(xlabel) # Set text for the x axis
ax.set_ylabel(ylabel) # Set text for y axis
def box_plot(df, col_name, title=None, xlabel=None):
"""
Draw's a single horizontal boxplot
Parameters
----------
df : Pandas Data Frame
col_name : column name in data frame
title : Plot title
xlabel : X-axis label
ylabel : Y-axis label
"""
fig = plt.figure(figsize=(10, 6)) # define plot area
ax = fig.add_subplot(111) # add single subplot
sns.boxplot(df[col_name], ax=ax) # Use seaborn plot
if not title:
title = 'Boxplot of {}'.format(col_name)
ax.set_title(title) # Give the plot a main title
if not xlabel:
xlabel = col_name
ax.set_xlabel(xlabel) # Set text for the x axis
def bar_plot(df, col_name, title=None, xlabel=None, ylabel='Count'):
"""
Draw's a single bar plot
Parameters
----------
df : Pandas Data Frame
col_name : column name in data frame
title : Plot title
xlabel : X-axis label
ylabel : Y-axis label
"""
fig = plt.figure(figsize=(10, 6)) # define plot area
ax = fig.add_subplot(111) # add single subplot
ax = df[col_name].value_counts().plot.bar(
color='steelblue') # Use pandas bar plot
if not title:
title = 'Barplot of {}'.format(col_name)
ax.set_title(title) # Give the plot a main title
if not xlabel:
xlabel = f'No. of {col_name}'
ax.set_xlabel(xlabel) # Set text for the x axis
ax.set_ylabel(ylabel) # Set text for the y axis
def get_categorical_columns(df, min_card=None, max_card=None):
"""
Returns categorical columns from pandas dataframe
Parameters
----------
df : Pandas Dataframe
min_card : Minimum Cardinality (default = None)
max_card : Maximum Cardinality (default = None)
Returns
-------
Python list
"""
if df is None:
raise ValueError(
"The parameter 'df' must be assigned a non-nil reference to a Pandas DataFrame")
cat_cols = df.select_dtypes(
include=['category', 'object']).columns.tolist()
if min_card and (max_card is None):
cat_cols = [col for col in cat_cols if df[col].nunique() >= min_card]
elif max_card and (min_card is None):
cat_cols = [col for col in cat_cols if df[col].nunique() <= max_card]
elif min_card and max_card:
cat_cols = [col for col in cat_cols if (
df[col].nunique() >= min_card) and (df[col].nunique() <= max_card)]
return cat_cols
def get_numeric_columns(df):
"""
Returns numerical columns from pandas dataframe
Parameters
----------
df : Pandas Dataframe
Returns
-------
Python list
"""
if df is None:
raise ValueError(
"The parameter 'df' must be assigned a non-nil reference to a Pandas DataFrame")
return list(df.select_dtypes(exclude=['category', 'object']))
def plot_numerical_columns_reg(df, target_col, alpha=0.5, color='grey'):
"""
Plots numerical features vs. numerical target
Parameters
----------
df : Pandas Dataframe
target_col : Target variable, dependent variable
"""
num_columns = get_numeric_columns(df)
for col in sorted(num_columns):
if col != target_col:
sns.lmplot(x=col, y=target_col, data=df,
scatter_kws=dict(alpha=alpha, color=color))
plt.title(f'{target_col} vs. {col}')
plt.show()
plt.close()
def plot_categorical_reg(df, target_col, min_card=2, max_card=12, height=5, ascpect=2, rotation=45, color='grey', kind='bar'):
"""
Plots categorical features vs. numerical target
Parameters
----------
df: Pandas Dataframe
traget_col: Target variable, dependent variable
min_card: Minimum cardinality of categorical feature
max_card: Maximum cardinality of categorical feature
"""
cat_columns = get_categorical_columns(df)
for col in sorted(cat_columns):
if (df[col].nunique() >= min_card) and (df[col].nunique() < max_card):
sns.catplot(x=col, y=target_col, data=df, kind=kind,
color=color, height=height, aspect=ascpect)
plt.xticks(rotation=rotation)
plt.title(f'{target_col} vs. {col}')
plt.show()
plt.close()
def plot_correlation_heatmap(data=None, vmax=1, annot=True, corr_type='pearson', figsize=(12, 12)):
"""
Plots correlations on a heatmap
"""
if data is None:
raise ValueError(
"The parameter 'data' must be assigned a non-nil reference to a Pandas DataFrame")
# Compute the correlation matrix
corr = data.corr(corr_type)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
fig, axes = plt.subplots(figsize=figsize)
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=vmax, annot=annot, square=True,
linewidths=.5, cbar_kws={"shrink": .5}, ax=axes)
plt.show()
plt.close()
def joint_plot(df, col_name1, col_name2, title=None, alpha=0.75):
"""
Draws joint plot
"""
sns.jointplot(x=col_name1, y=col_name2, data=df, alpha=alpha)
if not title:
title = 'Joint plot of {} vs. {}'.format(col_name1, col_name2)
plt.suptitle(title)
plt.show()
plt.close()
def get_quantiles(df, cols):
"""
Returns quantiles 1%, 25, 50, 75, 95 and 99%
"""
return df[cols].quantile(q=[.01, .25, .5, .75, .95, .99]).to_frame()
def get_leq_quantile(df, col, q):
"""
Returns values less than or equal to passed quantile
"""
return df[(df[col] <= df[col].quantile(q=q))].to_frame()
def get_geq_quantile(df, col, q):
"""
Returns values greater than or equal to passed quantile
"""
return df[(df[col] >= df[col].quantile(q=q))].to_frame()
def get_between_quantiles(df, col, qs):
"""
Returns values >= and <= passed quantile ranges
"""
lower_q = min(qs)
upper_q = max(qs)
return df[(df[col] >= df[col].quantile(q=lower_q)) & (df[col] <= df[col].quantile(q=upper_q))].to_frame()
def lowess_scatter(data, x, y, jitter=0.0, skip_lowess=False):
if skip_lowess:
fit = np.polyfit(data[x], data[y], 1)
line_x = np.linspace(data[x].min(), data[x].max(), 10)
line = np.poly1d(fit)
line_y = list(map(line, line_x))
else:
lowess = sm.nonparametric.lowess(data[y], data[x], frac=.3)
line_x = list(zip(*lowess))[0]
line_y = list(zip(*lowess))[1]
figure = plt.figure(figsize=(10, 6))
axes = figure.add_subplot(1, 1, 1)
xs = data[x]
if jitter > 0.0:
xs = data[x] + stats.norm.rvs(0, 0.5, data[x].size)
axes.scatter(xs, data[y], marker="o", color="steelblue", alpha=0.5)
axes.plot(line_x, line_y, color="DarkRed")
title = "Plot of {0} v. {1}".format(x, y)
if not skip_lowess:
title += " with LOESS"
axes.set_title(title)
axes.set_xlabel(x)
axes.set_ylabel(y)
plt.show()
plt.close()
def plot_scatter_by_groups(df, x_col, y_col, group_by_col, colors=None, alpha=0.75):
labels = df[group_by_col].unique()
num_labels = np.arange(1, len(labels)+1)
fig, ax = plt.subplots()
for idx, label in zip(num_labels, labels):
indices_to_keep = df[group_by_col] == label
y = df.loc[indices_to_keep, y_col]
if x_col == 'index':
x = df.index[indices_to_keep]
else:
x = df.loc[indices_to_keep, x_col]
ax.scatter(x, y, label=label, alpha=alpha)
plt.show()
plt.close()
def subset_features(train, val, test, features, target):
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
try:
y_test = test[target]
except:
y_test = None
return X_train, X_val, X_test, y_train, y_val, y_test
def print_shape(train, val, test=None, title='Train/Val'):
print('------- SHAPE - {} ---------'.format(title))
print(f'Training Set: {train.shape}')
print(f'Validation Set: {val.shape}')
if test is not None:
print(f'Testing Set: {test.shape}')
def train_validate_test_split(df, train_percent=.6, validate_percent=.2, seed=42):
from sklearn.model_selection import train_test_split
"""
Function does 3-Way hold out splot of Train, Validate, Test
"""
train, test = train_test_split(df, test_size=0.2, random_state=seed)
train, validate = train_test_split(
train, test_size=0.25, random_state=seed)
return (train, validate, test)
def get_rmse_mae_r2(y_true, y_pred, verbose=False, title='TEST'):
"""
Returns regression metrics like - R^2, MSE, RMSE, and MAE as dictionary,
and prints them out if verbose is set to True.
Parameters
----------
y_true : Python list or numpy 1D array
y_pred : Python list or numpy 1D array
Returns
-------
results : Python dictionary
"""
if (y_true is None) or (y_pred is None):
raise ValueError(
'Parameters `y_true` and `y_pred` must be a non-nil reference to python list or numpy arrary')
assert y_true.shape == y_pred.shape
# Results dict
results = {}
mae_ = mean_absolute_error(y_true, y_pred)
mse_ = mean_squared_error(y_true, | |
<gh_stars>1-10
from __future__ import absolute_import, division, print_function
import dolfin as dl
dl.dx = dl.dx(metadata={'quadrature_degree':2, "representation":'uflacs'})
import numpy as np
path = "../../"
import sys
sys.path.append(path)
from soupy.utils.variables import STATE, PARAMETER, ADJOINT, OPTIMIZATION
from soupy.utils.vector2function import vector2Function
class QoI(object):
"""
define the quantity of interest and its derivative information
"""
def __init__(self, mesh, Vh):
"""
Constructor.
INPUTS:
- mesh: the mesh
- Vh: the finite element space for [state, parameter, adjoint, optimization] variable
"""
self.mesh = mesh
self.Vh = Vh
self.x = [dl.Function(Vh[STATE]).vector(), dl.Function(Vh[PARAMETER]).vector(),
dl.Function(Vh[ADJOINT]).vector(), dl.Function(Vh[OPTIMIZATION]).vector()]
self.x_test = [dl.TestFunction(Vh[STATE]), dl.TestFunction(Vh[PARAMETER]),
dl.TestFunction(Vh[ADJOINT]), dl.TestFunction(Vh[OPTIMIZATION])]
def form(self, u, m, z):
"""
weak form of the qoi
:param u: state
:param m: parameter
:param z: optimization
:return: weak form
"""
return None
def eval(self, x):
"""
evaluate the qoi at given x
:param x: [state, parameter, adjoint, optimization] variable
:return: qoi(x)
"""
u = vector2Function(x[STATE], self.Vh[STATE])
m = vector2Function(x[PARAMETER], self.Vh[PARAMETER])
z = vector2Function(x[OPTIMIZATION], self.Vh[OPTIMIZATION])
return dl.assemble(self.form(u, m, z))
def adj_rhs(self, x, rhs):
"""
The right hand for the adjoint problem (i.e. the derivative of the Lagrangian funtional
with respect to the state u).
INPUTS:
- x coefficient vector of all variables
- rhs: FEniCS vector to store the rhs for the adjoint problem.
"""
self.grad_state(x, rhs)
rhs *= -1
def grad_state(self, x, g):
"""
The partial derivative of the qoi with respect to the state variable.
INPUTS:
- x coefficient vector of all variables
- g: FEniCS vector to store the gradient w.r.t. the state.
"""
u = vector2Function(x[STATE], self.Vh[STATE])
m = vector2Function(x[PARAMETER], self.Vh[PARAMETER])
z = vector2Function(x[OPTIMIZATION], self.Vh[OPTIMIZATION])
f_form = self.form(u, m, z)
f_u = dl.assemble(dl.derivative(f_form, u, self.x_test[STATE]))
g.zero()
g.axpy(1.0, f_u)
def grad_parameter(self, x, g):
"""
The partial derivative of the qoi with respect to the parameter variable.
INPUTS:
- x coefficient vector of [state,parameter,adjoint,optimization] variable
- g: FEniCS vector to store the gradient w.r.t. the parameter.
"""
u = vector2Function(x[STATE], self.Vh[STATE])
m = vector2Function(x[PARAMETER], self.Vh[PARAMETER])
z = vector2Function(x[OPTIMIZATION], self.Vh[OPTIMIZATION])
f_form = self.form(u, m, z)
f_m = dl.assemble(dl.derivative(f_form, m, self.x_test[PARAMETER]))
g.zero()
g.axpy(1.0, f_m)
# print("gm_obs", np.zeros(100), "gm_eva", g.get_local()[:100])
#
# g.zero()
def grad_optimization(self, x, g):
"""
The partial derivative of the qoi with respect to the optimization variable.
INPUTS:
- x coefficient vector of [state,parameter,adjoint,optimization] variable
- g: FEniCS vector to store the gradient w.r.t. the optimization.
"""
u = vector2Function(x[STATE], self.Vh[STATE])
m = vector2Function(x[PARAMETER], self.Vh[PARAMETER])
z = vector2Function(x[OPTIMIZATION], self.Vh[OPTIMIZATION])
f_form = self.form(u, m, z)
f_z = dl.assemble(dl.derivative(f_form, z, self.x_test[OPTIMIZATION]))
g.zero()
g.axpy(1.0, f_z)
# print("gz_obs", np.zeros(100), "gz_eva", g.get_local()[:100])
#
# g.zero()
def apply_ij(self,i,j, dir, out):
"""
Apply the second variation \delta_ij (i,j = STATE,PARAMETER,OPTIMIZATION) of the q.o.i. in direction dir.
INPUTS:
- i,j integer (STATE=0, PARAMETER=1, OPTIMIZATION=3) which indicates with respect to which variables differentiate
- dir the direction in which to apply the second variation
- out: FEniCS vector to store the second variation in the direction dir.
NOTE: setLinearizationPoint must be called before calling this method.
"""
out.zero()
x_fun = [vector2Function(self.x[s], self.Vh[s]) for s in range(len(self.x))]
f_form = self.form(x_fun[STATE], x_fun[PARAMETER], x_fun[OPTIMIZATION])
dir_fun = vector2Function(dir, self.Vh[j])
f_i = dl.derivative(f_form, x_fun[i], self.x_test[i])
f_ij = dl.derivative(f_i, x_fun[j], dir_fun)
out.axpy(1.0, dl.assemble(f_ij))
def apply_ijk(self,i,j,k,dir1,dir2, out):
"""
Apply the third order variation of the q.o.i. w.r.t. ijk in direction dir1, dir2 for j and k
:param i: STATE or PARAMETER or OPTIMIZATION
:param j:
:param k:
:param dir1:
:param dir2:
:param out:
:return: out
"""
out.zero()
x_fun = [vector2Function(self.x[s], self.Vh[s]) for s in range(len(self.x))]
f_form = self.form(x_fun[STATE], x_fun[PARAMETER], x_fun[OPTIMIZATION])
dir1_fun, dir2_fun = vector2Function(dir1, self.Vh[i]), vector2Function(dir2, self.Vh[j])
f_i = dl.derivative(f_form, x_fun[i], dir1_fun)
f_ij = dl.derivative(f_i, x_fun[j], dir2_fun)
f_ijk = dl.derivative(f_ij, x_fun[k], self.x_test[k])
out.axpy(1.0, dl.assemble(f_ijk))
# print("fijk_obs", np.zeros(100), "fijk_eva", g.get_local()[:100])
def setLinearizationPoint(self, x):
"""
Specify the linearization point for computation of the second variations in method apply_ij.
INPUTS:
- x = [u,m,p] is a list of the state u, parameter m, and adjoint variable p
"""
for i in range(len(x)):
self.x[i].zero()
self.x[i].axpy(1.0, x[i])
# class QoIObjective(QoI):
#
# """
# integral of the inflow rate along the boundary
# q = \int_{\partial D} exp(m) dot (grad u, n) * ds
# u is the state variable
# """
# def __init__(self, mesh, Vh):
# """
# Constructor.
# INPUTS:
# - mesh: the mesh
# - Vh: the finite element space for [state, parameter, adjoint] variable
# """
# super(QoIObjective, self).__init__(mesh, Vh)
#
# self.n = dl.FacetNormal(mesh)
# self.Obs = dl.assemble(self.x_test[ADJOINT] * dl.dx)
#
# mollifierlist = []
# x = np.linspace(0.25, 0.75, 5)
# y = np.linspace(0.25, 0.75, 5)
# for i in range(5):
# for j in range(5):
# mollifierlist.append(
# dl.Expression("exp(-(pow(x[0]-xi,2)+pow(x[1]-yj,2))/(pow(0.1,2)))/2", xi=x[i], yj=y[j], degree=2))
#
# self.mollifier = dl.as_vector(mollifierlist)
#
# mollifierMat = [ [ None for i in range(25) ] for j in range(25) ]
# for i in range(25):
# for j in range(25):
# if i == j:
# mollifierMat[i][j] = mollifierlist[i]
# else:
# mollifierMat[i][j] = 0.
# self.mollifierMat = dl.as_matrix(mollifierMat)
#
# z = 18.*np.ones(25)
# z_target = dl.Function(self.Vh[OPTIMIZATION])
# idx = z_target.vector().local_range()
# z_target.vector().set_local(z[idx[0]:idx[1]])
# self.z_target = z_target
#
# def form(self, u, m, z):
# """
# weak form of the q.o.i.
# :param x:
# :return:
# """
#
# # f = - dl.exp(m) * dl.dot(dl.grad(u), self.n) * dl.ds
#
# f = - dl.dot(dl.grad(u), self.n) * dl.ds
#
# # f = u * dl.dx
#
# # f = - dl.inner(self.mollifier, z)*dl.dx
#
# # f = dl.dot(self.mollifierMat*(z-self.z_target), (z-self.z_target)) * dl.dx
#
# # f = dl.inner(self.mollifier, (z-self.z_target)) * dl.inner(self.mollifier, (z-self.z_target))*dl.dx
#
# return f
class QoIObjective(QoI):
"""
integral of the inflow rate along the boundary
q = \int_{\partial D} exp(m) dot (grad u, n) * ds
u is the state variable
"""
def __init__(self, mesh, Vh):
"""
Constructor.
INPUTS:
- mesh: the mesh
- Vh: the finite element space for [state, parameter, adjoint] variable
"""
super(QoIObjective, self).__init__(mesh, Vh)
self.num = 25
z = 18.*np.ones(self.num)
z_target = dl.Function(self.Vh[OPTIMIZATION]).vector()
idx = z_target.local_range()
z_target.set_local(z[idx[0]:idx[1]])
self.z_target = z_target
self.z_diff = dl.Function(Vh[OPTIMIZATION]).vector()
def form(self, u, m, z):
# arbitrary
return u * dl.dx
def eval(self, x):
z = x[OPTIMIZATION]
self.z_diff.zero()
self.z_diff.axpy(1.0, z)
self.z_diff.axpy(-1.0, self.z_target)
return 1./self.num * self.z_diff.inner(self.z_diff)
def adj_rhs(self, x, rhs):
"""
The right hand for the adjoint problem (i.e. the derivative of the Lagrangian funtional
with respect to the state u).
INPUTS:
- x coefficient vector of all variables
- rhs: FEniCS vector to store the rhs for the adjoint problem.
"""
self.grad_state(x, rhs)
rhs *= -1
def grad_state(self, x, g):
"""
The partial derivative of the qoi with respect to the state variable.
INPUTS:
- x coefficient vector of all variables
- g: FEniCS vector to store the gradient w.r.t. the state.
"""
g.zero()
def grad_parameter(self, x, g):
"""
The partial derivative of the qoi with respect to the parameter variable.
INPUTS:
- x coefficient vector of [state,parameter,adjoint,optimization] variable
- g: FEniCS vector to store the gradient w.r.t. the parameter.
"""
g.zero()
def grad_optimization(self, x, g):
"""
The partial derivative of the qoi with respect to the optimization variable.
INPUTS:
- x coefficient vector of [state,parameter,adjoint,optimization] variable
- g: FEniCS vector to store the gradient w.r.t. the optimization.
"""
z = x[OPTIMIZATION]
self.z_diff.zero()
self.z_diff.axpy(1.0, z)
self.z_diff.axpy(-1.0, self.z_target)
g.zero()
g.axpy(2./self.num, self.z_diff)
def apply_ij(self,i,j, dir, out):
"""
Apply the second variation \delta_ij (i,j = STATE,PARAMETER,OPTIMIZATION) of the q.o.i. in direction dir.
INPUTS:
- i,j integer (STATE=0, PARAMETER=1, OPTIMIZATION=3) which indicates with respect to which variables differentiate
- dir the direction in which to apply the second variation
- out: FEniCS vector to store the second variation in the direction dir.
NOTE: setLinearizationPoint must be called before calling this method.
"""
out.zero()
def apply_ijk(self,i,j,k,dir1,dir2, out):
"""
Apply the third order variation of the q.o.i. w.r.t. ijk in direction dir1, dir2 for j and k
:param i: STATE or PARAMETER or OPTIMIZATION
:param j:
:param k:
:param dir1:
:param dir2:
:param out:
:return: out
"""
out.zero()
def setLinearizationPoint(self, x):
"""
Specify the linearization point for computation of the second variations in method apply_ij.
INPUTS:
- x = [u,m,p] is a list of the state u, parameter m, and adjoint variable p
"""
for i in range(len(x)):
self.x[i].zero()
self.x[i].axpy(1.0, x[i])
# ############################################################################
# class QoIConstraint:
#
# """
# misfit term of simulaation-observation at a few locations
# Q = ||O(u) - \bar{u}||^2
# O is the observation functional
# u is the state variable
# \bar{u} | |
import sys
from dataclasses import dataclass
import logging
import curses
from tx.functional.either import Left, Right
from coloredtext import ColoredText, toColoredText
logging.basicConfig(filename="qctool.log",
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
NORMAL_COLOR = 0
SELECTED_NORMAL_COLOR = 1
HIGHLIGHT_COLOR = 2
SELECTED_HIGHLIGHT_COLOR = 3
ERROR_COLOR = 4
def init_colors():
curses.init_pair(HIGHLIGHT_COLOR, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(SELECTED_NORMAL_COLOR, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(SELECTED_HIGHLIGHT_COLOR, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(ERROR_COLOR, curses.COLOR_BLACK, curses.COLOR_RED)
def clip(a, b, c):
if a < b:
return b
elif a > c:
return c
else:
return a
def selection_color(c, selected):
if selected:
if c == NORMAL_COLOR:
return SELECTED_NORMAL_COLOR
elif c == HIGHLIGHT_COLOR:
return SELECTED_HIGHLIGHT_COLOR
else:
return c
else:
return c
@dataclass
class WindowExit:
data: object
@dataclass
class WindowContinue:
pass
@dataclass
class WindowPass:
pass
class Widget:
def __init__(self, window):
self.window = window
def resize_move(self, h, w, window_y, window_x):
self._resize_move(h, w, window_y, window_x)
self.update()
def _resize_move(self, h, w, window_y, window_x):
self.resize(h, w)
self.move_window(window_y, window_x)
def move_window(self, window_y, window_x):
self._move_window(window_y, window_x)
self.update()
def _move_window(self, window_y, window_x):
self.window.mvwin(window_y, window_x)
def resize(self, h, w):
self._resize(h, w)
self.update()
def _resize(self, h, w):
self.window.resize(h, w)
def getch(self):
return self.window.getch()
def size(self):
return self.window.getmaxyx()
def _update_line(self, window_y, window_x, line, window_w, selected):
current_window_x = window_x
for cstr in line.coloredStrings():
color = cstr.color
string = cstr.string
self.window.insnstr(window_y, current_window_x, string, window_w - window_x, curses.color_pair(selection_color(color, selected)))
current_window_x += len(string)
if current_window_x >= window_w:
break
def update(self, clear=True, refresh=True):
if clear:
self.window.clear()
if refresh:
self.window.refresh()
def onKey(self, ch):
res = self._onKey(ch)
self.update()
return res
def _onKey(self, ch):
return WindowPass()
def __del__(self):
del self.window
def draw_border(win):
h, w = win.getmaxyx()
win.insstr(0, 0, "-" * w)
for i in range(1, h - 1):
win.addstr(i, 0, "|")
win.addstr(i, w-1, "|")
win.insstr(h-1, 0, "-" * w)
def draw_textfield(pop, w, y, x, label, c):
pop.insstr(y, x, label)
xoffset = len(label) + 1
text = c[-w+xoffset + 1:]
padding = " " * (w - xoffset - len(text))
pop.insstr(y,x + xoffset, padding, curses.color_pair(SELECTED_NORMAL_COLOR))
pop.insstr(y,x + xoffset, text, curses.color_pair(SELECTED_NORMAL_COLOR))
return xoffset + len(text)
# key_handler returns True for exit
def popup(window, h, w, y, x, footer, create_window, key_handler):
window.set_footer(footer)
pop = curses.newwin(h, w, y, x)
popwindow = Window(pop)
popwindow.border = True
create_window(popwindow)
popwindow.update()
while True:
ch = pop.getch()
res = key_handler(popwindow, ch)
if isinstance(res, WindowExit):
data = res.data
break
res = popwindow._onKey(ch)
if isinstance(res, WindowExit):
data = res.data
break
popwindow.update()
del popwindow
return data
class Pane(Widget):
def __init__(self, window, selectable, editable=False, max_lines=None):
super().__init__(window)
self.lines = ColoredText()
# coordinates of the cursor relative to the document
self.current_document_y = 0
self.current_document_x = 0
# coordinates of the top left corner of the window relative to the document
self.window_document_y = 0
self.window_document_x = 0
self.max_lines = max_lines
self.selectable = selectable
self.editable = editable
self.edited = False
# padding in the document where the cursor cannot pass
self.top_padding = 0
self.bottom_padding = 0
self.left_padding = 0
self.right_padding = 0
# event handlers
self.cursorMoveHandlers = []
self.keyHandlers = []
def print(self, s: ColoredText) -> None:
self.current_document_y += s.numberOfLines()
self.append(s)
def append(self, s: ColoredText) -> None:
self._append(s)
self.update()
def _append(self, s : ColoredText) -> None:
nlines = len(s)
if self.max_lines is not None and self.lines.numberOfLines() + nlines > self.max_lines:
self.lines = self.lines[self.lines.numberOfLines() + nlines - self.max_lines:]
self.lines += s
self._clip_coordinates()
def replace(self, s: ColoredText) -> None:
self._replace(s)
self.update()
def _replace(self, s: ColoredText) -> None:
self.lines.clear()
self._append(s)
def move(self, document_dy, document_dx):
self._move(document_dy, document_dx)
self.update()
def _move(self, document_dy, document_dx):
self._move_abs(self.current_document_y + document_dy, self.current_document_x + document_dx)
def move_abs(self, document_y, document_x):
self._move_abs(document_y, document_x)
self.update()
def _move_abs(self, document_y, document_x):
oc = (self.current_document_y, self.current_document_x)
self.current_document_y = document_y
self.current_document_x = document_x
self._clip_coordinates()
c = (self.current_document_y, self.current_document_x)
if c != oc:
self._onCursorMove(oc, c)
def _clip_coordinates(self):
document_height = self.lines.numberOfLines()
self.current_document_y = clip(self.current_document_y, self.top_padding, max(0, document_height - 1 - self.bottom_padding))
self.current_document_x = clip(self.current_document_x, self.left_padding, max(0, len(self.lines[self.current_document_y]) - self.right_padding) if document_height > 0 else 0)
def move_page(self, document_dy, document_dx):
window_h, window_w = self.window.getmaxyx()
self.move(document_dy * window_h, document_dx * window_w)
def _scroll_window(self, window_h, window_w):
document_h = self.lines.numberOfLines()
self.window_document_y = clip(self.window_document_y, self.current_document_y - window_h + 1, self.current_document_y)
self.window_document_y = clip(self.window_document_y, 0, max(0, document_h - window_h))
def _clear(self):
self.lines.clear()
self._move(0, 0)
def clear(self):
self._clear()
self.update()
def __del__(self):
del self.window
def update(self, clear=True, refresh=True):
if clear:
self.window.clear()
window_h, window_w = self.window.getmaxyx()
document_h = self.lines.numberOfLines()
self._scroll_window(window_h, window_w)
min_document_y = self.window_document_y
max_document_y = min(min_document_y + window_h, document_h)
for document_y in range(min_document_y, max_document_y):
line = self.lines[document_y]
self._update_line(document_y - min_document_y, 0, line, window_w, self.selectable and document_y == self.current_document_y)
if refresh:
self.window.refresh()
return self.current_document_y - self.window_document_y, self.current_document_x - self.window_document_x
def _onKey(self, ch):
if self.selectable or self.editable:
if ch == curses.KEY_UP:
self.move(-1, 0)
return WindowContinue()
elif ch == curses.KEY_DOWN:
self.move(+1, 0)
return WindowContinue()
elif ch == curses.KEY_PPAGE:
self.move_page(-1, 0)
return WindowContinue()
elif ch == curses.KEY_NPAGE:
self.move_page(+1, 0)
return WindowContinue()
if self.editable:
logger.info("editable")
if ch == curses.KEY_LEFT:
self.move(0, -1)
return WindowContinue()
elif ch == curses.KEY_RIGHT:
self.move(0, +1)
return WindowContinue()
elif ch == curses.KEY_HOME:
self.move(0, -self.current_document_x)
return WindowContinue()
elif ch == curses.KEY_END:
self.move(0, len(self.lines[self.current_document_y]))
return WindowContinue()
elif ch == curses.KEY_ENTER or ch == ord("\n") or ch == ord("\r"):
color = self.lines.getColorAt(self.current_document_y, self.current_document_x)
self._replace(self.lines.insert(self.current_document_y, self.current_document_x, toColoredText(color, "\n")))
self.move(1, -self.current_document_x)
self.edited = True
return WindowContinue()
elif ch == curses.KEY_BACKSPACE or ch == ord("\b") or ch == 127:
if self.current_document_x == 0:
if self.current_document_y == 0:
return WindowContinue()
else:
offset_y = -1
offset_x = len(self.lines[self.current_document_y - 1])
else:
offset_y = 0
offset_x = -1
self.move(offset_y, offset_x)
self._replace(self.lines.delete(self.current_document_y, self.current_document_x))
self.edited = True
return WindowContinue()
elif ch == curses.KEY_DC:
self._replace(self.lines.delete(self.current_document_y, self.current_document_x))
self.edited = True
return WindowContinue()
elif 32 <= ch <= 126 or 128 <= ch <= 255:
color = self.lines.getColorAt(self.current_document_y, self.current_document_x)
char = chr(ch)
self._replace(self.lines.insert(self.current_document_y, self.current_document_x, toColoredText(color, char)))
self.move(0, +1)
self.edited = True
return WindowContinue()
for h in self.keyHandlers:
ret = h(self, ch)
if ret is not None:
return ret
return WindowPass()
def addKeyHandler(self, h):
self.keyHandlers.append(h)
def _onCursorMove(self, oc, c):
for h in self.cursorMoveHandlers:
h(self, oc, c)
def addCursorMoveHandler(self, h):
self.cursorMoveHandlers.append(h)
class Fill(Widget):
def __init__(self, window, ch):
super().__init__(window)
self.ch = ch
def update(self, clear=True, refresh=True):
if clear:
self.window.clear()
window_h, window_w = self.window.getmaxyx()
for window_x in range(window_w):
for window_y in range(window_h):
self.window.insch(window_y, window_x, self.ch)
if refresh:
self.window.refresh()
return 0, 0
class Text(Widget):
def __init__(self, window, text):
super().__init__(window)
self.text = text
self.window_document_y = 0
self.window_document_x = 0
def update(self, clear=True, refresh=True):
if clear:
self.window.clear()
window_h, window_w = self.window.getmaxyx()
lines = self.text.lines()
for window_y in range(min(window_h, len(lines) - self.window_document_y)):
line = lines[self.window_document_y + window_y]
x = 0
for string in line.coloredStrings():
self.window.insnstr(window_y, x, string.string, window_w, curses.color_pair(string.color))
x += len(string)
if refresh:
self.window.refresh()
return 0, 0
class TextField(Widget):
def __init__(self, window, label, initial_text):
super().__init__(window)
self.label = label
self.text = initial_text
self.changeHandlers = []
def update(self, clear=True, refresh=True):
if clear:
self.window.clear()
window_h, window_w = self.window.getmaxyx()
text_w = min(window_w - len(self.label) - 2, len(self.text))
text = self.text[-text_w:]
cursor_x = draw_textfield(self.window, window_w, 0, 0, self.label, text)
if refresh:
self.window.refresh()
return 0, cursor_x
def _onKey(self, ch):
if ch == curses.KEY_BACKSPACE or ch == ord("\b") or ch == 127:
oc = self.text
self.text = self.text[:-1]
self._onChange(oc, self.text)
return WindowContinue()
elif 32 <= ch <= 126 or 128 <= ch <= 255:
oc = self.text
self.text += chr(ch)
self._onChange(oc, self.text)
return WindowContinue()
return WindowPass()
def _onChange(self, oc, c):
for h in self.changeHandlers:
h(self, oc, c)
def addChangeHandler(self, h):
self.changeHandlers.append(h)
class Window(Widget):
def __init__(self, window):
super().__init__(window)
self.footer = ColoredText()
self.header = ColoredText()
self.children = {}
self.focus = None
self.border = False
def set_footer(self, s : ColoredText) -> None:
self._set_footer(s)
self.update()
def _set_footer(self, s : ColoredText) -> None:
self.footer = s
def set_header(self, s : ColoredText) -> None:
self._set_header(s)
self.update()
def _set_header(self, s : ColoredText) -> None:
self.header = s
def _update_header(self, window_h, window_w):
for header_y, line in enumerate(self.header):
if header_y >= window_h:
break
header_window_y = header_y
self._update_line(header_window_y, 0, line, window_w, False)
def _update_footer(self, window_h, window_w):
footer_height = self.footer.numberOfLines()
for footer_y, line in enumerate(self.footer[max(0, footer_height - window_h):]):
footer_window_y = window_h - (footer_height - footer_y)
self._update_line(footer_window_y, 0, line, window_w, False)
def _new_window(self, h, w, window_y, window_x):
return self.window.derwin(h, w, window_y, window_x)
def pane(self, name, h, w, window_y, window_x, selectable):
window = | |
<gh_stars>0
import collections
import io
import logging
import os
import subprocess
import re
import threading
import warnings
from imagemounter import _util, filesystems, FILE_SYSTEM_TYPES, VOLUME_SYSTEM_TYPES, dependencies
from imagemounter.exceptions import SubsystemError, NotMountedError, ImageMounterError
from imagemounter.filesystems import FileSystem, CarveFileSystem
from imagemounter.volume_system import VolumeSystem
logger = logging.getLogger(__name__)
class Volume:
"""Information about a volume. Note that every detected volume gets their own Volume object, though it may or may
not be mounted. This can be seen through the :attr:`mountpoint` attribute -- if it is not set, perhaps the
:attr:`exception` attribute is set with an exception.
"""
def __init__(self, disk, parent=None, index="0", size=0, offset=0, flag='alloc', slot=0, fstype=None, key="",
vstype='', volume_detector='auto'):
"""Creates a Volume object that is not mounted yet.
Only use arguments as keyword arguments.
:param disk: the parent disk
:type disk: :class:`Disk`
:param parent: the parent volume or disk.
:param str index: the volume index within its volume system, see the attribute documentation.
:param int size: the volume size, see the attribute documentation.
:param int offset: the volume offset, see the attribute documentation.
:param str flag: the volume flag, see the attribute documentation.
:param int slot: the volume slot, see the attribute documentation.
:param FileSystem fstype: the fstype you wish to use for this Volume.
If not specified, will be retrieved from the ImageParser instance instead.
:param str key: the key to use for this Volume.
:param str vstype: the volume system type to use.
:param str volume_detector: the volume system detection method to use
"""
self.parent = parent
self.disk = disk
# Should be filled somewhere
self.size = size
self.offset = offset
self.index = index
self.slot = slot
self.flag = flag
self.block_size = self.disk.block_size
self.volumes = VolumeSystem(parent=self, vstype=vstype, volume_detector=volume_detector)
self._get_fstype_from_parser(fstype)
if key:
self.key = key
elif self.index in self.disk.parser.keys:
self.key = self.disk.parser.keys[self.index]
elif '*' in self.disk.parser.keys:
self.key = self.disk.parser.keys['*']
else:
self.key = ""
self.info = {}
self.bindmounts = []
self._real_path = None
self.was_mounted = False
self.is_mounted = False
def __str__(self):
return '{0}:{1}'.format(self.index, self.info.get('fsdescription') or '-')
def __getitem__(self, item):
return self.volumes[item]
@property
def numeric_index(self):
try:
return tuple([int(x) for x in self.index.split(".")])
except ValueError:
return ()
@property
def fstype(self):
warnings.warn("fstype is deprecated in favor of filesystem", DeprecationWarning)
return str(self.filesystem)
@property
def mountpoint(self):
return getattr(self.filesystem, 'mountpoint', None)
@property
def loopback(self):
return getattr(self.filesystem, 'loopback', None)
def _get_fstype_from_parser(self, fstype=None):
"""Load fstype information from the parser instance."""
if not fstype:
if self.index in self.disk.parser.fstypes:
fstype = self.disk.parser.fstypes[self.index]
elif '*' in self.disk.parser.fstypes:
fstype = self.disk.parser.fstypes['*']
elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None:
fstype = "?" + self.disk.parser.fstypes['?']
else:
fstype = ""
if not fstype:
self.filesystem = None
elif isinstance(fstype, FileSystem):
fstype.volume = self
self.filesystem = fstype
elif fstype in VOLUME_SYSTEM_TYPES:
self.volumes.vstype = fstype
self.filesystem = FILE_SYSTEM_TYPES["volumesystem"](self)
elif fstype.startswith("?"):
fallback = FILE_SYSTEM_TYPES[fstype[1:]](self)
self.filesystem = filesystems.FallbackFileSystem(self, fallback)
else:
self.filesystem = FILE_SYSTEM_TYPES[fstype](self)
def get_description(self, with_size=True, with_index=True):
"""Obtains a generic description of the volume, containing the file system type, index, label and NTFS version.
If *with_size* is provided, the volume size is also included.
"""
desc = ''
if with_size and self.size:
desc += '{0} '.format(self.get_formatted_size())
s = self.info.get('statfstype') or self.info.get('fsdescription') or '-'
if with_index:
desc += '{1}:{0}'.format(s, self.index)
else:
desc += s
if self.info.get('label'):
desc += ' {0}'.format(self.info.get('label'))
if self.info.get('version'): # NTFS
desc += ' [{0}]'.format(self.info.get('version'))
return desc
def get_formatted_size(self):
"""Obtains the size of the volume in a human-readable format (i.e. in TiBs, GiBs or MiBs)."""
if self.size is not None:
if self.size < 1024:
return "{0} B".format(self.size)
elif self.size < 1024 ** 2:
return "{0} KiB".format(round(self.size / 1024, 2))
elif self.size < 1024 ** 3:
return "{0} MiB".format(round(self.size / 1024 ** 2, 2))
elif self.size < 1024 ** 4:
return "{0} GiB".format(round(self.size / 1024 ** 3, 2))
else:
return "{0} TiB".format(round(self.size / 1024 ** 4, 2))
else:
return self.size
@dependencies.require(dependencies.blkid, none_on_failure=True)
def _get_blkid_type(self):
"""Retrieves the FS type from the blkid command."""
try:
result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()])
if not result:
return None
# noinspection PyTypeChecker
blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result))
self.info['blkid_data'] = blkid_result
if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result:
return blkid_result.get('PTTYPE')
else:
return blkid_result.get('TYPE')
except Exception:
return None # returning None is better here, since we do not care about the exception in determine_fs_type
@dependencies.require(dependencies.magic, none_on_failure=True)
def _get_magic_type(self):
"""Checks the volume for its magic bytes and returns the magic."""
try:
with io.open(self.disk.get_fs_path(), "rb") as file:
file.seek(self.offset)
fheader = file.read(min(self.size, 4096) if self.size else 4096)
except IOError:
logger.exception("Failed reading first 4K bytes from volume.")
return None
# TODO fallback to img-cat image -s blocknum | file -
# if we were able to load the module magic
try:
# noinspection PyUnresolvedReferences
import magic
if hasattr(magic, 'from_buffer'):
# using https://github.com/ahupp/python-magic
logger.debug("Using python-magic Python package for file type magic")
result = magic.from_buffer(fheader)
self.info['magic_data'] = result
return result
elif hasattr(magic, 'open'):
# using Magic file extensions by <NAME> (Ubuntu python-magic module)
logger.debug("Using python-magic system package for file type magic")
ms = magic.open(magic.NONE)
ms.load()
result = ms.buffer(fheader)
ms.close()
self.info['magic_data'] = result
return result
else:
logger.warning("The python-magic module is not available, but another module named magic was found.")
except ImportError:
logger.warning("The python-magic module is not available.")
except AttributeError:
logger.warning("The python-magic module is not available, but another module named magic was found.")
return None # returning None is better here, since we do not care about the exception in determine_fs_type
def get_raw_path(self):
"""Retrieves the base mount path of the volume. It best understood by seeing it as: where is the raw (dd) file
that this volume is reading from, i.e. what does the offset of this volume equal to?
In the most easy way, it is just the path of the disk. However, if we need to mount a volume again, its
subvolumes will be mounted from somewhere else. This is controlled by :attr:`_real_path`, which is set by
the mounting method (either by :cls:`VolumeDetector` or by :cls:`FileSystem`).
"""
if self._real_path is not None:
return self._real_path
elif self.parent and self.parent != self.disk:
return self.parent.get_raw_path()
else:
return self.disk.get_fs_path()
def get_safe_label(self):
"""Returns a label that is safe to add to a path in the mountpoint for this volume."""
if self.info.get('label') == '/':
return 'root'
suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else ""
if suffix and suffix[0] == '_':
suffix = suffix[1:]
if len(suffix) > 2 and suffix[-1] == '_':
suffix = suffix[:-1]
return suffix
@dependencies.require(dependencies.photorec)
def carve(self, freespace=True):
"""Call this method to carve the free space of the volume for (deleted) files. Note that photorec has its
own interface that temporarily takes over the shell.
:param freespace: indicates whether the entire volume should be carved (False) or only the free space (True)
:type freespace: bool
:return: string to the path where carved data is available
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubsystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available
:raises NoLoopbackAvailableError: if there is no loopback available (only when volume has no slot number)
"""
volume = self.volumes._make_subvolume(flag='alloc', offset=0, fstype=CarveFileSystem(None, freespace=freespace))
volume.mount()
return volume.filesystem.mountpoint
@dependencies.require(dependencies.vshadowmount)
def detect_volume_shadow_copies(self):
"""Method to call vshadowmount and mount NTFS volume shadow copies.
:return: iterable with the :class:`Volume` objects of the VSS
:raises CommandNotFoundError: if the underlying command does not exist
:raises SubSystemError: if the underlying command fails
:raises NoMountpointAvailableError: if there is no mountpoint available
"""
volume = self.volumes._make_subvolume(flag='alloc', offset=0, fstype='vss-container')
volume.mount()
return volume.volumes
def _should_mount(self, only_mount=None, skip_mount=None):
"""Indicates whether this volume should be mounted. Internal method, used by imount.py"""
om = only_mount is None \
or self.index in only_mount \
or self.info.get('lastmountpoint') in only_mount \
or self.info.get('label') in only_mount
sm = skip_mount is None \
or (self.index not in skip_mount
and self.info.get('lastmountpoint') not in skip_mount
and self.info.get('label') not in skip_mount)
return om and sm
def init(self, only_mount=None, skip_mount=None, swallow_exceptions=True):
"""Generator that mounts this volume and either yields itself or recursively generates its subvolumes.
More specifically, this function will call :func:`load_fsstat_data` (iff *no_stats* is False), followed by
:func:`mount`, followed by a call to :func:`detect_mountpoint`, after which ``self`` is yielded, or the result
of the | |
:
fi = open("unusedproj.txt","r")
unusedproj=[]
for line in fi:
unusedproj.append(line.split('\n')[0])
return unusedproj
def findInputLinks(dtcphirange) :
fi = open(dtcphirange,"r")
ilinks=[]
for line in fi:
dtcname=line.split()[0]
layerdisk=int(line.split()[1])
phimin=float(line.split()[2])
phimax=float(line.split()[3])
#print "Line: ",dtcname,layerdisk,phimin,phimax
phimin1=phimin-two_pi/9.0+0.5*phirange
phimax1=phimax-two_pi/9.0+0.5*phirange
phimin2=phimin+0.5*phirange
phimax2=phimax+0.5*phirange
#print "phimin1, phimax1 : ",phimin1,phimax1
if layerdisk<7 :
layer=layerdisk
#print "layer : ",layer
nallstubs=nallstubslayers[layer-1]
dphi=phirange/nallstubs
for iallstub in range(0,nallstubs) :
phiminallstub=iallstub*dphi
phimaxallstub=phiminallstub+dphi
#print "Allstub phimin,max :",phiminallstub,phimaxallstub
if (phiminallstub<phimax1 and phimaxallstub>phimin1) or (phiminallstub<phimax2 and phimaxallstub>phimin2) :
if iallstub<nallstubs/2 :
il="IL_L"+str(layer)+"PHI"+letter(iallstub+1)+"_"+dtcname+"_A"
ilinks.append(il)
#print "Inputlink : ",il
if iallstub>=nallstubs/2 :
il="IL_L"+str(layer)+"PHI"+letter(iallstub+1)+"_"+dtcname+"_B"
ilinks.append(il)
#print "Inputlink : ",il
else :
disk=layerdisk-6
#print "layerdisk disk : ",layerdisk,disk
nallstubs=nallstubsdisks[disk-1]
dphi=phirange/nallstubs
for iallstub in range(0,nallstubs) :
phiminallstub=iallstub*dphi
phimaxallstub=phiminallstub+dphi
#print "Allstub phimin,max :",phiminallstub,phimaxallstub
if (phiminallstub<phimax1 and phimaxallstub>phimin1) or (phiminallstub<phimax2 and phimaxallstub>phimin2) :
if iallstub<nallstubs/2 :
il="IL_D"+str(disk)+"PHI"+letter(iallstub+1)+"_"+dtcname+"_A"
ilinks.append(il)
#print "Inputlink : ",il
if iallstub>=nallstubs/2 :
il="IL_D"+str(disk)+"PHI"+letter(iallstub+1)+"_"+dtcname+"_B"
ilinks.append(il)
#print "Inputlink : ",il
return ilinks
inputlinks=findInputLinks("dtcphirange.txt")
print "Inputlinks :",len(inputlinks),inputlinks
unusedproj=readUnusedProj()
print "Unusedproj :",unusedproj
fp = open("wires.input.hourglass","w")
#
# Do the VM routers for the TE in the layers
#
#
# triplets VMs:
# FIRST (same as inner): L3,L5,D1 ->same memories as pairs; L2abcdefg for L2L3D1
# SECOND (same as outer): L4,L6,D2 -> same memeories as pairs; L3abcdefg for L2L3D1
# THIRD (same as outer): L2,L4 -> same memories as pairs; L2xyz, D1xyz for D1D2L2 and L2L3D1
for ilayer in range(1,7) :
print "layer =",ilayer,"allstub memories",nallstubslayers[ilayer-1]
fp.write("\n")
fp.write("#\n")
fp.write("# VMRouters for the TEs in layer "+str(ilayer)+" \n")
fp.write("#\n")
for iallstubmem in range(1,nallstubslayers[ilayer-1]+1) :
allstubsmemname="L"+str(ilayer)+"PHI"+letter(iallstubmem)
for il in inputlinks :
if allstubsmemname in il :
fp.write(il+" ")
fp.write("> VMR_L"+str(ilayer)+"PHI"+letter(iallstubmem)+" > ")
fp.write("AS_L"+str(ilayer)+"PHI"+letter(iallstubmem))
for ivm in range(1,nvmmelayers[ilayer-1]+1) :
fp.write(" VMSME_L"+str(ilayer)+"PHI"+letter(iallstubmem)+str((iallstubmem-1)*nvmmelayers[ilayer-1]+ivm))
for ivm in range(1,nvmtelayers[ilayer-1]+1) :
fp.write(" VMSTE_L"+str(ilayer)+"PHI"+letter(iallstubmem)+str((iallstubmem-1)*nvmtelayers[ilayer-1]+ivm))
if extraseeding :
if (nvmteextralayers[ilayer-1]>0) :
for ivm in range(1,nvmteextralayers[ilayer-1]+1) :
fp.write(" VMSTE_L"+str(ilayer)+"PHI"+letterextra(iallstubmem)+str((iallstubmem-1)*nvmteextralayers[ilayer-1]+ivm))
if ilayer in range(1,3) :
for ivm in range(1,nvmteoverlaplayers[ilayer-1]+1) :
fp.write(" VMSTE_L"+str(ilayer)+"PHI"+letteroverlap(iallstubmem)+str((iallstubmem-1)*nvmteoverlaplayers[ilayer-1]+ivm))
if displacedseeding :
if ilayer == 2:
for ivm in range(1,nvmtelayers[ilayer-1]+1) :
fp.write(" VMSTE_L"+str(ilayer)+"PHI"+letter_triplet(iallstubmem)+str((iallstubmem-1)*nvmtelayers[ilayer-1]+ivm))
for ivm in range(1,nvmteoverlaplayers[ilayer-1]+1) :
fp.write(" VMSTE_L"+str(ilayer)+"PHI"+letteroverlap_triplet(iallstubmem)+str((iallstubmem-1)*nvmteoverlaplayers[ilayer-1]+ivm))
if ilayer == 3:
for ivm in range(1,nvmtelayers[ilayer-1]+1) :
fp.write(" VMSTE_L"+str(ilayer)+"PHI"+letter_triplet(iallstubmem)+str((iallstubmem-1)*nvmtelayers[ilayer-1]+ivm))
fp.write("\n\n")
#
# Do the VM routers for the TE in the overlap layers
#
#for ilayer in range(1,3) :
# print "layer =",ilayer,"allstub memories",nallstubslayers[ilayer-1]
# fp.write("\n")
# fp.write("#\n")
# fp.write("# VMRouters for the TEs in overlap layer "+str(ilayer)+" \n")
# fp.write("#\n")
# for iallstubmem in range(1,nallstubsoverlaplayers[ilayer-1]+1) :
# allstubsmemname="L"+str(ilayer)+"PHI"+letter(iallstubmem)
# for il in inputlinks :
# if allstubsmemname in il :
# fp.write(il+" ")
# fp.write("> VMRTE_L"+str(ilayer)+"PHI"+letteroverlap(iallstubmem)+" > "#)
# fp.write("AS_L"+str(ilayer)+"PHI"+letteroverlap(iallstubmem))
# for ivm in range(1,nvmteoverlaplayers[ilayer-1]+1) :
# fp.write(" VMSTE_L"+str(ilayer)+"PHI"+letteroverlap(iallstubmem)+str((iallstubmem-1)*nvmteoverlaplayers[ilayer-1]+ivm))
# fp.write("\n\n")
#
# Do the VM routers for the TE in the disks
#
for idisk in range(1,6) :
print "disk =",idisk,"allstub memories",nallstubsdisks[idisk-1]
fp.write("\n")
fp.write("#\n")
fp.write("# VMRouters for the TEs in disk "+str(idisk)+" \n")
fp.write("#\n")
for iallstubmem in range(1,nallstubsdisks[idisk-1]+1) :
allstubsmemname="D"+str(idisk)+"PHI"+letter(iallstubmem)
for il in inputlinks :
if allstubsmemname in il :
fp.write(il+" ")
fp.write("> VMR_D"+str(idisk)+"PHI"+letter(iallstubmem)+" > ")
fp.write("AS_D"+str(idisk)+"PHI"+letter(iallstubmem))
for ivm in range(1,nvmmedisks[idisk-1]+1) :
fp.write(" VMSME_D"+str(idisk)+"PHI"+letter(iallstubmem)+str((iallstubmem-1)*nvmmedisks[idisk-1]+ivm))
if idisk in range(1,5) :
for ivm in range(1,nvmtedisks[idisk-1]+1) :
fp.write(" VMSTE_D"+str(idisk)+"PHI"+letter(iallstubmem)+str((iallstubmem-1)*nvmtedisks[idisk-1]+ivm))
if idisk in range(1,2) :
for ivm in range(1,nvmteoverlapdisks[idisk-1]+1) :
fp.write(" VMSTE_D"+str(idisk)+"PHI"+letteroverlap(iallstubmem)+str((iallstubmem-1)*nvmteoverlapdisks[idisk-1]+ivm))
if displacedseeding :
for ivm in range(1,nvmteoverlapdisks[idisk-1]+1) :
fp.write(" VMSTE_D"+str(idisk)+"PHI"+letteroverlap_triplet(iallstubmem)+str((iallstubmem-1)*nvmteoverlapdisks[idisk-1]+ivm))
fp.write("\n\n")
#
# Do the VM routers for the TE in the overlap disks
#
#for idisk in range(1,2) :
# print "disk =",idisk,"allstub memories overlap ",nallstubsoverlapdisks[idisk-1]
# fp.write("\n")
# fp.write("#\n")
# fp.write("# VMRouters for the TEs in overlap disk "+str(idisk)+" \n")
# fp.write("#\n")
# for iallstubmem in range(1,nallstubsoverlapdisks[idisk-1]+1) :
# allstubsmemname="D"+str(idisk)+"PHI"+letter(iallstubmem)
# for il in inputlinks :
# if allstubsmemname in il :
# fp.write(il+" ")
# fp.write("> VMRTE_D"+str(idisk)+"PHI"+letteroverlap(iallstubmem)+" > ")
# fp.write("AS_D"+str(idisk)+"PHI"+letteroverlap(iallstubmem))
# for ivm in range(1,nvmteoverlapdisks[idisk-1]+1) :
# fp.write(" VMSTE_D"+str(idisk)+"PHI"+letteroverlap(iallstubmem)+str((iallstubmem-1)*nvmteoverlapdisks[idisk-1]+ivm))
# fp.write("\n\n")
#
# Do the VM routers for the ME in the layers
#
#for ilayer in range(1,7) :
# print "layer =",ilayer,"allproj memories",nallprojlayers[ilayer-1]
# fp.write("\n")
# fp.write("#\n")
# fp.write("# VMRouters for the MEs in layer "+str(ilayer)+" \n")
# fp.write("#\n")
# for iallprojmem in range(1,nallprojlayers[ilayer-1]+1) :
# allstubsmemname="L"+str(ilayer)+"PHI"+letter(iallprojmem)
# for il in inputlinks :
# if allstubsmemname in il :
# fp.write(il+" ")
# fp.write("> VMRME_L"+str(ilayer)+"PHI"+letter(iallprojmem)+" > ")
# fp.write("\n\n")
#
# Do the VM routers for the ME in the disks
#
#for idisk in range(1,6) :
# print "disk =",idisk,"allproj memories",nallprojdisks[idisk-1]
# fp.write("\n")
# fp.write("#\n")
# fp.write("# VMRouters for the MEs in disk "+str(idisk)+" \n")
# fp.write("#\n")
# for iallprojmem in range(1,nallprojdisks[idisk-1]+1) :
# allstubsmemname="D"+str(idisk)+"PHI"+letter(iallprojmem)
# for il in inputlinks :
# if allstubsmemname in il :
# fp.write(il+" ")
# fp.write("> VMRME_D"+str(idisk)+"PHI"+letter(iallprojmem)+" > ")
# fp.write("AS_D"+str(idisk)+"PHI"+letter(iallprojmem))
# for ivm in range(1,nvmmedisks[idisk-1]+1) :
# fp.write(" VMSME_D"+str(idisk)+"PHI"+letter(iallprojmem)+str((iallprojmem-1)*nvmmedisks[idisk-1]+ivm))
# fp.write("\n\n")
#
# Do the TED for the LL->L
#
SPD_list=[]
PairAMs = []
if displacedseeding :
for lll in dispLLL :
fp.write("\n")
fp.write("#\n")
fp.write("# Tracklet Engines for Displaced seeding layer"+str(lll[0])+"+layer"+str(lll[1])+"->layer"+str(lll[2])+"\n")
fp.write("#\n")
#print "layer = ",lll[0]
for ivminner in range(1,nallstubslayers[lll[0]-1]*nvmtelayers[lll[0]-1]+1) :
for ivmouter in range(1,nallstubslayers[lll[1]-1]*nvmtelayers[lll[1]-1]+1) :
if validtedpair(lll[0],ivminner,ivmouter) :
amn = "L"+str(lll[0])+letter((ivminner-1)/nvmtelayers[lll[0]-1]+1)+"L"+str(lll[1])+letter((ivmouter-1)/nvmtelayers[lll[1]-1]+1)
if amn not in PairAMs :
PairAMs.append(amn)
fp.write("VMSTE_L"+str(lll[0])+"PHI"+letter((ivminner-1)/nvmtelayers[lll[0]-1]+1)+str(ivminner))
fp.write(" VMSTE_L"+str(lll[1])+"PHI"+letter((ivmouter-1)/nvmtelayers[lll[1]-1]+1)+str(ivmouter))
fp.write(" > TED_L"+str(lll[0])+"PHI"+letter((ivminner-1)/nvmtelayers[lll[0]-1]+1)+str(ivminner))
fp.write("_L"+str(lll[1])+"PHI"+letter((ivmouter-1)/nvmtelayers[lll[1]-1]+1)+str(ivmouter))
fp.write(" > ")
prange = phiproj5stlayer(lll[0],lll[2], ivminner, ivmouter)
#fp.write(str(prange[0])+" "+str(prange[1])+"\n")
for ivmproj in range(1, nallstubslayers[lll[2]-1]*nvmtelayers[lll[2]-1]+1) :
phiprojmin=phirange/nallstubslayers[lll[2]-1]/nvmtelayers[lll[2]-1]*(ivmproj-1)
phiprojmax=phirange/nallstubslayers[lll[2]-1]/nvmtelayers[lll[2]-1]*ivmproj
if prange[0]<phiprojmax and prange[1]>phiprojmin :
spd_name="SPD_L"+str(lll[0])+"PHI"+letter((ivminner-1)/nvmtelayers[lll[0]-1]+1)+str(ivminner)+"_L"+str(lll[1])+"PHI"+letter((ivmouter-1)/nvmtelayers[lll[1]-1]+1)+str(ivmouter)+"_L"+str(lll[2])+"PHI"+letter((ivmproj-1)/nvmtelayers[lll[2]-1]+1)+str(ivmproj)
fp.write(spd_name+" ")
SPD_list.append(spd_name)
fp.write("\n\n")
#
# Do the TED for the LL->D
#
for lll in dispLLD :
fp.write("\n")
fp.write("#\n")
fp.write("# Tracklet Engines for Displaced seeding layer"+str(lll[0])+"+layer"+str(lll[1])+"->disk"+str(lll[2])+"\n")
fp.write("#\n")
#print "layer = ",lll[0]
for ivminner in range(1,nallstubslayers[lll[0]-1]*nvmtelayers[lll[0]-1]+1) :
for ivmouter in range(1,nallstubslayers[lll[1]-1]*nvmtelayers[lll[1]-1]+1) :
if validtedpair(lll[0],ivminner,ivmouter) :
amn = "L"+str(lll[0])+letter_triplet((ivminner-1)/nvmtelayers[lll[0]-1]+1)+"L"+str(lll[1])+letter_triplet((ivmouter-1)/nvmtelayers[lll[1]-1]+1)
if amn not in PairAMs :
PairAMs.append(amn)
fp.write("VMSTE_L"+str(lll[0])+"PHI"+letter_triplet((ivminner-1)/nvmtelayers[lll[0]-1]+1)+str(ivminner))
fp.write(" VMSTE_L"+str(lll[1])+"PHI"+letter_triplet((ivmouter-1)/nvmtelayers[lll[1]-1]+1)+str(ivmouter))
fp.write(" > TED_L"+str(lll[0])+"PHI"+letter_triplet((ivminner-1)/nvmtelayers[lll[0]-1]+1)+str(ivminner))
fp.write("_L"+str(lll[1])+"PHI"+letter_triplet((ivmouter-1)/nvmtelayers[lll[1]-1]+1)+str(ivmouter))
fp.write(" > ")
prange = phiproj5stlayer_to_disk(lll[0],lll[2], ivminner, ivmouter)
#fp.write(str(prange[0])+" "+str(prange[1])+"\n")
for ivmproj in range(1, nallstubsdisks[lll[2]-1]*nvmteoverlapdisks[lll[2]-1]+1) :
phiprojmin=phirange/nallstubsdisks[lll[2]-1]/nvmteoverlapdisks[lll[2]-1]*(ivmproj-1)
phiprojmax=phirange/nallstubsdisks[lll[2]-1]/nvmteoverlapdisks[lll[2]-1]*ivmproj
if prange[0]<phiprojmax and prange[1]>phiprojmin :
spd_name="SPD_L"+str(lll[0])+"PHI"+letter_triplet((ivminner-1)/nvmtelayers[lll[0]-1]+1)+str(ivminner)+"_L"+str(lll[1])+"PHI"+letter_triplet((ivmouter-1)/nvmtelayers[lll[1]-1]+1)+str(ivmouter)+"_D"+str(lll[2])+"PHI"+letteroverlap_triplet((ivmproj-1)/nvmteoverlapdisks[lll[2]-1]+1)+str(ivmproj)
fp.write(spd_name+" ")
SPD_list.append(spd_name)
fp.write("\n\n")
#
# Do the TED for the DD->L
#
for lll in dispDDL :
fp.write("\n")
fp.write("#\n")
fp.write("# Tracklet Engines for Displaced seeding disk"+str(lll[0])+"+disk"+str(lll[1])+"->layer"+str(lll[2])+"\n")
fp.write("#\n")
#print "disk = ",lll[0]
for ivminner in range(1,nallstubsdisks[lll[0]-1]*nvmtedisks[lll[0]-1]+1) :
for ivmouter in range(1,nallstubsdisks[lll[1]-1]*nvmtedisks[lll[1]-1]+1) :
if validtedpairdisk(lll[0],ivminner,ivmouter) :
amn = "D"+str(lll[0])+letter((ivminner-1)/nvmtedisks[lll[0]-1]+1)+"D"+str(lll[1])+letter((ivmouter-1)/nvmtedisks[lll[1]-1]+1)
if amn not in PairAMs :
PairAMs.append(amn)
fp.write("VMSTE_D"+str(lll[0])+"PHI"+letter((ivminner-1)/nvmtedisks[lll[0]-1]+1)+str(ivminner))
fp.write(" VMSTE_D"+str(lll[1])+"PHI"+letter((ivmouter-1)/nvmtedisks[lll[1]-1]+1)+str(ivmouter))
fp.write(" > TED_D"+str(lll[0])+"PHI"+letter((ivminner-1)/nvmtedisks[lll[0]-1]+1)+str(ivminner))
fp.write("_D"+str(lll[1])+"PHI"+letter((ivmouter-1)/nvmtedisks[lll[1]-1]+1)+str(ivmouter))
fp.write(" > ")
prange = phiproj5stdisk_to_layer(lll[0],lll[2], ivminner, ivmouter)
#fp.write(str(prange[0])+" "+str(prange[1])+"\n")
for ivmproj in range(1, nallstubslayers[lll[2]-1]*nvmteoverlaplayers[lll[2]-1]+1) :
phiprojmin=phirange/nallstubslayers[lll[2]-1]/nvmteoverlaplayers[lll[2]-1]*(ivmproj-1)
phiprojmax=phirange/nallstubslayers[lll[2]-1]/nvmteoverlaplayers[lll[2]-1]*ivmproj
if prange[0]<phiprojmax and prange[1]>phiprojmin :
spd_name="SPD_D"+str(lll[0])+"PHI"+letter((ivminner-1)/nvmtedisks[lll[0]-1]+1)+str(ivminner)+"_D"+str(lll[1])+"PHI"+letter((ivmouter-1)/nvmtedisks[lll[1]-1]+1)+str(ivmouter)+"_L"+str(lll[2])+"PHI"+letteroverlap_triplet((ivmproj-1)/nvmteoverlaplayers[lll[2]-1]+1)+str(ivmproj)
fp.write(spd_name+" ")
SPD_list.append(spd_name)
fp.write("\n\n")
TPROJ_list=[]
TPAR_list=[]
SP_list=[]
if combinedTP :
#
# Do the TP for the layers
#
for ilayer in (1,2,3,5) :
if ilayer==2 and not extraseeding :
continue
fp.write("\n")
fp.write("#\n")
fp.write("# Tracklet Processors for seeding layer "+str(ilayer)+" \n")
fp.write("#\n")
if ilayer!=2 :
for ivminner in range(1,nallstubslayers[ilayer-1]*nvmtelayers[ilayer-1]+1) :
for ivmouter in range(1,nallstubslayers[ilayer]*nvmtelayers[ilayer]+1) :
if validtepair(ilayer,ivminner,ivmouter) :
sp_name="SP_L"+str(ilayer)+"PHI"+letter((ivminner-1)/nvmtelayers[ilayer-1]+1)+str(ivminner)+"_L"+str(ilayer+1)+"PHI"+letter((ivmouter-1)/nvmtelayers[ilayer]+1)+str(ivmouter)
SP_list.append(sp_name)
else :
for ivminner in range(1,nallstubslayers[ilayer-1]*nvmteextralayers[ilayer-1]+1) :
for ivmouter in range(1,nallstubslayers[ilayer]*nvmteextralayers[ilayer]+1) :
if validtepairextra(ilayer,ivminner,ivmouter) :
sp_name="SP_L"+str(ilayer)+"PHI"+letterextra((ivminner-1)/nvmteextralayers[ilayer-1]+1)+str(ivminner)+"_L"+str(ilayer+1)+"PHI"+letterextra((ivmouter-1)/nvmteextralayers[ilayer]+1)+str(ivmouter)
SP_list.append(sp_name)
sp_layer=[]
for sp_name in SP_list :
if "_L"+str(ilayer) in sp_name and "_L"+str(ilayer+1) in sp_name :
#print ilayer,sp_name
sp_layer.append(sp_name)
tcs=12
if ilayer==2 :
tcs=2
if ilayer==3 :
tcs=8
if ilayer==5 :
tcs=4
sp_per_tc=split(sp_layer,tcs)
tp_count=0
for sps in sp_per_tc :
print len(sps), sps
innervms=[]
outervms=[]
for sp_name in sps :
innervm=sp_name.split("_")[1]
outervm=sp_name.split("_")[2]
fp.write("VMSTE_"+innervm+" VMSTE_"+outervm+" ")
tp_count+=1
as_names = asmems(sps)
for asn in as_names:
fp.write(asn+" ")
tpar_name="TPAR_L"+str(ilayer)+"L"+str(ilayer+1)+xx+letter(tp_count)
fp.write(" > TP_L"+str(ilayer)+"L"+str(ilayer+1)+letter(tp_count)+" > "+tpar_name)
TPAR_list.append(tpar_name)
for projlayer in range(1,7) :
if ilayer==2 and projlayer==6:
continue #seeding in L2L3 assumed not to project to L6
if projlayer!=ilayer and projlayer!=ilayer+1 :
projrange=phiprojrange(ilayer,projlayer,sps)
for iallproj in range(1,nallprojlayers[projlayer-1]+1) :
phiprojmin=phirange/nallprojlayers[projlayer-1]*(iallproj-1)
phiprojmax=phirange/nallprojlayers[projlayer-1]*iallproj
if projrange[0]<phiprojmax and projrange[1]>phiprojmin :
proj_name="TPROJ_L"+str(ilayer)+"L"+str(ilayer+1)+xx+letter(tp_count)+"_L"+str(projlayer)+"PHI"+letter(iallproj)
if proj_name not in unusedproj :
fp.write(" "+proj_name)
TPROJ_list.append(proj_name)
projdisks=[]
if ilayer<5 :
projdisks.append(1)
projdisks.append(2)
if ilayer==2 :
projdisks.append(3)
projdisks.append(4)
if ilayer==1 :
projdisks.append(3)
projdisks.append(4)
projdisks.append(5)
for projdisk in projdisks :
projrange=phiprojrangelayertodisk(ilayer,projdisk,sps)
for iallproj in range(1,nallprojdisks[projdisk-1]+1) :
phiprojmin=phirange/nallprojdisks[projdisk-1]*(iallproj-1)
phiprojmax=phirange/nallprojdisks[projdisk-1]*iallproj
if projrange[0]<phiprojmax and projrange[1]>phiprojmin :
proj_name="TPROJ_L"+str(ilayer)+"L"+str(ilayer+1)+xx+letter(tp_count)+"_D"+str(projdisk)+"PHI"+letter(iallproj)
if proj_name not in unusedproj :
fp.write(" "+proj_name)
TPROJ_list.append(proj_name)
fp.write("\n\n")
#
# Do the TP for the disks
#
for idisk in (1,3) :
fp.write("#\n")
fp.write("# Tracklet Processors for seeding disk "+str(idisk)+" \n")
fp.write("#\n")
for ivminner in range(1,nallstubsdisks[idisk-1]*nvmtedisks[idisk-1]+1) :
for ivmouter in range(1,nallstubsdisks[idisk]*nvmtedisks[idisk]+1) :
if validtepairdisk(idisk,ivminner,ivmouter) :
sp_name="SP_D"+str(idisk)+"PHI"+letter((ivminner-1)/nvmtedisks[idisk-1]+1)+str(ivminner)+"_D"+str(idisk+1)+"PHI"+letter((ivmouter-1)/nvmtedisks[idisk]+1)+str(ivmouter)
SP_list.append(sp_name)
sp_disk=[]
for sp_name in SP_list :
if "_D"+str(idisk) in sp_name and "_D"+str(idisk+1) in sp_name :
#print idisk,sp_name
sp_disk.append(sp_name)
tcs=6
if idisk==3 :
tcs=2
sp_per_tc=split(sp_disk,tcs)
tc_count=0
for sps in sp_per_tc :
#print len(sps), sps
for sp_name in sps :
innervm=sp_name.split("_")[1]
outervm=sp_name.split("_")[2]
fp.write("VMSTE_"+innervm+" VMSTE_"+outervm+" ")
tc_count+=1
as_names = asmems(sps)
for asn in as_names:
fp.write(asn+" ")
tpar_name="TPAR_D"+str(idisk)+"D"+str(idisk+1)+xx+letter(tc_count)
fp.write(" > TP_D"+str(idisk)+"D"+str(idisk+1)+letter(tc_count)+" > "+tpar_name)
TPAR_list.append(tpar_name)
for projdisk in range(1,6) :
if projdisk!=idisk and projdisk!=idisk+1 :
#print "idisk, projdisk, sps:",idisk, projdisk,sps
projrange=phiprojrangedisk(idisk,projdisk,sps)
for iallproj in range(1,nallprojdisks[projdisk-1]+1) :
print "looking for projection | |
<filename>python/paddle/fluid/contrib/layers/rnn_impl.py
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import layers
from paddle.fluid.dygraph import Layer
from paddle.fluid.layers.control_flow import StaticRNN
__all__ = ['BasicGRUUnit', 'basic_gru', 'BasicLSTMUnit', 'basic_lstm']
class BasicGRUUnit(Layer):
"""
****
BasicGRUUnit class, using basic operators to build GRU
The algorithm can be described as the equations below.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
name_scope(string) : The name scope used to identify parameters and biases
hidden_size (integer): The hidden size used in the Unit.
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import BasicGRUUnit
input_size = 128
hidden_size = 256
input = layers.data( name = "input", shape = [-1, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
gru_unit = BasicGRUUnit( "gru_unit", hidden_size )
new_hidden = gru_unit( input, pre_hidden )
"""
def __init__(self,
name_scope,
hidden_size,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32'):
super(BasicGRUUnit, self).__init__(name_scope, dtype)
self._name = name_scope
self._hiden_size = hidden_size
self._param_attr = param_attr
self._bias_attr = bias_attr
self._gate_activation = gate_activation or layers.sigmoid
self._activation = activation or layers.tanh
self._dtype = dtype
def _build_once(self, input, pre_hidden):
self._input_size = input.shape[-1]
assert (self._input_size > 0)
self._gate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, 2 * self._hiden_size],
dtype=self._dtype)
self._candidate_weight = self.create_parameter(
attr=self._param_attr,
shape=[self._input_size + self._hiden_size, self._hiden_size],
dtype=self._dtype)
self._gate_bias = self.create_parameter(
self._bias_attr,
shape=[2 * self._hiden_size],
dtype=self._dtype,
is_bias=True)
self._candidate_bias = self.create_parameter(
self._bias_attr,
shape=[self._hiden_size],
dtype=self._dtype,
is_bias=True)
def forward(self, input, pre_hidden):
concat_input_hidden = layers.concat([input, pre_hidden], 1)
gate_input = layers.matmul(x=concat_input_hidden, y=self._gate_weight)
gate_input = layers.elementwise_add(gate_input, self._gate_bias)
gate_input = self._gate_activation(gate_input)
r, u = layers.split(gate_input, num_or_sections=2, dim=1)
r_hidden = r * pre_hidden
candidate = layers.matmul(
layers.concat([input, pre_hidden], 1), self._candidate_weight)
candidate = layers.elementwise_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_hidden = u * pre_hidden + (1 - u) * c
return new_hidden
def basic_gru(input,
init_hidden,
hidden_size,
num_layers=1,
sequence_length=None,
dropout_prob=0.0,
bidirectional=False,
batch_first=True,
param_attr=None,
bias_attr=None,
gate_activation=None,
activation=None,
dtype='float32',
name='basic_gru'):
"""
GRU implementation using basic operator, supports multiple layers and bidirection gru.
.. math::
u_t & = actGate(W_ux xu_{t} + W_uh h_{t-1} + b_u)
r_t & = actGate(W_rx xr_{t} + W_rh h_{t-1} + b_r)
m_t & = actNode(W_cx xm_t + W_ch dot(r_t, h_{t-1}) + b_m)
h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)
Args:
input (Variable): GRU input tensor,
if batch_first = False, shape should be ( seq_len x batch_size x input_size )
if batch_first = True, shape should be ( batch_size x seq_len x hidden_size )
init_hidden(Variable|None): The initial hidden state of the GRU
This is a tensor with shape ( num_layers x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
and can be reshaped to tensor with ( num_layers x 2 x batch_size x hidden_size) to use.
If it's None, it will be set to all 0.
hidden_size (int): Hidden size of the GRU
num_layers (int): The total number of layers of the GRU
sequence_length (Variabe|None): A Tensor (shape [batch_size]) stores each real length of each instance,
This tensor will be convert to a mask to mask the padding ids
If it's None means NO padding ids
dropout_prob(float|0.0): Dropout prob, dropout ONLY works after rnn output of earch layers,
NOT between time steps
bidirectional (bool|False): If it is bidirectional
param_attr(ParamAttr|None): The parameter attribute for the learnable
weight matrix. Note:
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|None): The parameter attribute for the bias
of GRU unit.
If it is set to None or one attribute of ParamAttr, gru_unit will
create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
gate_activation (function|None): The activation function for gates (actGate).
Default: 'fluid.layers.sigmoid'
activation (function|None): The activation function for cell (actNode).
Default: 'fluid.layers.tanh'
dtype(string): data type used in this unit
name(string): name used to identify parameters and biases
Returns:
rnn_out(Tensor),last_hidden(Tensor)
- rnn_out is result of GRU hidden, with shape (seq_len x batch_size x hidden_size) \
if is_bidirec set to True, shape will be ( seq_len x batch_sze x hidden_size*2)
- last_hidden is the hidden state of the last step of GRU \
shape is ( num_layers x batch_size x hidden_size ) \
if is_bidirec set to True, shape will be ( num_layers*2 x batch_size x hidden_size),
can be reshaped to a tensor with shape( num_layers x 2 x batch_size x hidden_size)
Examples:
.. code-block:: python
import paddle.fluid.layers as layers
from paddle.fluid.contrib.layers import basic_gru
batch_size = 20
input_size = 128
hidden_size = 256
num_layers = 2
dropout = 0.5
bidirectional = True
batch_first = False
input = layers.data( name = "input", shape = [-1, batch_size, input_size], dtype='float32')
pre_hidden = layers.data( name = "pre_hidden", shape=[-1, hidden_size], dtype='float32')
sequence_length = layers.data( name="sequence_length", shape=[-1], dtype='int32')
rnn_out, last_hidden = basic_gru( input, pre_hidden, hidden_size, num_layers = num_layers, \
sequence_length = sequence_length, dropout_prob=dropout, bidirectional = bidirectional, \
batch_first = batch_first)
"""
fw_unit_list = []
for i in range(num_layers):
new_name = name + "_layers_" + str(i)
fw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if bidirectional:
bw_unit_list = []
for i in range(num_layers):
new_name = name + "_reverse_layers_" + str(i)
bw_unit_list.append(
BasicGRUUnit(new_name, hidden_size, param_attr, bias_attr,
gate_activation, activation, dtype))
if batch_first:
input = layers.transpose(input, [1, 0, 2])
mask = None
if sequence_length:
max_seq_len = layers.shape(input)[0]
mask = layers.sequence_mask(
sequence_length, maxlen=max_seq_len, dtype='float32')
mask = layers.transpose(mask, [1, 0])
direc_num = 1
if bidirectional:
direc_num = 2
if init_hidden:
init_hidden = layers.reshape(
init_hidden, shape=[num_layers, direc_num, -1, hidden_size])
def get_single_direction_output(rnn_input,
unit_list,
mask=None,
direc_index=0):
rnn = StaticRNN()
with rnn.step():
step_input = rnn.step_input(rnn_input)
if mask:
step_mask = rnn.step_input(mask)
for i in range(num_layers):
if init_hidden:
pre_hidden = rnn.memory(init=init_hidden[i, direc_index])
else:
pre_hidden = rnn.memory(
batch_ref=rnn_input,
shape=[-1, hidden_size],
ref_batch_dim_idx=1)
new_hidden = unit_list[i](step_input, pre_hidden)
if mask:
new_hidden = layers.elementwise_mul(
new_hidden, step_mask, axis=0) - layers.elementwise_mul(
pre_hidden, (step_mask - 1), axis=0)
rnn.update_memory(pre_hidden, new_hidden)
rnn.step_output(new_hidden)
step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0:
step_input = layers.dropout(
step_input,
dropout_prob=dropout_prob, )
rnn.step_output(step_input)
rnn_out = rnn()
last_hidden_array = []
rnn_output = rnn_out[-1]
for i in range(num_layers):
last_hidden = rnn_out[i]
last_hidden = last_hidden[-1]
last_hidden_array.append(last_hidden)
last_hidden_output = layers.concat(last_hidden_array, axis=0)
last_hidden_output = layers.reshape(
last_hidden_output, shape=[num_layers, -1, hidden_size])
return rnn_output, last_hidden_output
# seq_len, batch_size, hidden_size
fw_rnn_out, fw_last_hidden = get_single_direction_output(
input, fw_unit_list, mask, direc_index=0)
if bidirectional:
bw_input = layers.reverse(input, axis=[0])
bw_mask = None
if mask:
bw_mask = layers.reverse(mask, axis=[0])
bw_rnn_out, bw_last_hidden = get_single_direction_output(
bw_input, bw_unit_list, bw_mask, direc_index=1)
bw_rnn_out = layers.reverse(bw_rnn_out, axis=[0])
rnn_out = layers.concat([fw_rnn_out, bw_rnn_out], axis=2)
last_hidden = layers.concat([fw_last_hidden, bw_last_hidden], axis=1)
last_hidden = layers.reshape(
last_hidden, shape=[num_layers * direc_num, -1, hidden_size])
if batch_first:
| |
<reponame>bobbypaton/FullMonte
#!/usr/bin/python
### ### ### ### ###
### ### ### ### ###
#####b. ####b. ###### .d##b. #####b. ### ####b. #####b. ###
### ### "##b "##b ### d##""##b ### "##b ### "##b ### "##b ###
### ### ### .d###### ### ### ### ### ### ### .d###### ### ###
### ### d##P ### ### Y##b. Y##..##P ### ### ### ### ### ### d##P ###
### #####P" "Y###### "Y### "Y##P" ### ### ### "Y###### #####P" ###
###
###
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# <EMAIL>
###############################################################
# FMTools.py #
# Libraries and methods for Full Monte Carlo #
###############################################################
####### Written by: <NAME> ###############################
####### Last modified: Mar 20, 2013 #########################
###############################################################
# Python Libraries ############################################
import subprocess, sys, os, commands, math, time, tarfile, random
###############################################################
# EXECECTUBALE ################################################
G09_EXEC = 'g09sub'
MOPAC_EXEC = '/u/rsp/rpaton/mopac/MOPAC2012.exe'
###############################################################
# The time elapsed between two specified Y/M/D 24H/M/S format #
def RealTime(time1, time2):
timeTuple1 = time.strptime(time1, "%Y/%m/%d %H:%M:%S")
timeTuple2 = time.strptime(time2, "%Y/%m/%d %H:%M:%S")
time_difference = time.mktime(timeTuple2) - time.mktime(timeTuple1)
realdays=int(time_difference/(60.0*60*24))
realhours=int(time_difference/(60.0*60))-realdays*24
realmins=int(time_difference/60.0)-realdays*24-realhours*60
realsecs=int(time_difference)-realdays*24*60*60-realhours*60*60-realmins*60
timediff=[realdays,realhours,realmins,realsecs]
return timediff
###############################################################
# Tidies up times in [D,H,M,S] format #########################
def CPUTime(CSearch):
totalcpu = [0,0,0,0]
for cpu in CSearch.ALLCPU:
for i in range(0,4): totalcpu[i] = totalcpu[i] + cpu[i]
totalcpu[2] = totalcpu[2]+int(totalcpu[3]/60.0)
totalcpu[3] = totalcpu[3]-60*int(totalcpu[3]/60.0)
totalcpu[1] = totalcpu[1] + int(totalcpu[2]/60.0)
totalcpu[2] = totalcpu[2] - 60*int(totalcpu[2]/60.0)
totalcpu[0] = totalcpu[0] + int(totalcpu[1]/24.0)
totalcpu[1] = totalcpu[1] - 24*int(totalcpu[1]/24.0)
return totalcpu
###############################################################
# Define Job type #############################################
class JobSpec:
def __init__(self, software):
self.PROGRAM = software
self.CONSTRAINED = []
if software == "Mopac":
self.EXEC = MOPAC_EXEC
self.INPUT = ".mop"
self.ARGS = "&"
if software == "Gaussian":
self.EXEC = G09_EXEC
self.INPUT = " "
self.ARGS = " "
###############################################################
# Submits a computational chemistry job #######################
def submitJob(JobSpec,MolSpec,log):
if not os.path.exists(MolSpec.NAME+".log"):command = JobSpec.EXEC+" "+MolSpec.NAME+JobSpec.INPUT+" "+JobSpec.ARGS+" > /dev/null"
else: command = ""
try:
#print "deactivated submission"
retcode = subprocess.call(command, shell=True)
if retcode != 0:
print >>sys.stderr, log.Write("\nERROR: Submission of "+MolSpec.NAME+" failed")
return -1
else: return 1
except OSError, e:
print >>sys.stderr, log.Write("\nERROR: Submission of "+MolSpec.NAME+" failed")
return -1
###############################################################
# Check that a computational chemistry job has finished #######
def isJobFinished(JobSpec, MolSpec):
if JobSpec.PROGRAM == "Mopac":
if not os.path.exists(MolSpec.NAME+".out"): return 0
else:
outfile = open(MolSpec.NAME+".out","r")
jobdone=0; normal=0
for line in outfile.readlines():
if JobSpec.PROGRAM == "Mopac":
if line.find("== MOPAC DONE ==") > -1:
jobdone = jobdone+1
normal=normal+1
if line.find("EXCESS NUMBER OF OPTIMIZATION CYCLES") > -1:
jobdone = jobdone+1
normal=normal-1
outfile.close()
if JobSpec.PROGRAM == "Gaussian":
if os.path.exists(MolSpec.NAME+".out"):
outfile = open(MolSpec.NAME+".out","r")
if os.path.exists(MolSpec.NAME+".log"):
outfile = open(MolSpec.NAME+".log","r");
jobdone=0; normal=0
for line in outfile.readlines():
if line.find("Normal termination") > -1:
jobdone = jobdone+1
normal=normal+1
outfile.close()
if jobdone>0 and normal>0: return 1
if jobdone>0 and normal==0: return 2
else:
if JobSpec.PROGRAM == "Mopac": modtime=commands.getoutput("ls -l -t "+MolSpec.NAME+".out")
if JobSpec.PROGRAM == "Gaussian":
if os.path.exists(MolSpec.NAME+".log"):modtime=commands.getoutput("ls -l -t "+MolSpec.NAME+".log")
if os.path.exists(MolSpec.NAME+".out"):modtime=commands.getoutput("ls -l -t "+MolSpec.NAME+".out")
#print modtime
for mod in modtime.split():
if mod.find(":") > -1: timeofday = mod
Elapsed = RealTime(time.strftime("%Y/%m/%d" , time.localtime())+" "+modtime.split()[7]+":00", time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()))
ElapsedMins = Elapsed[0]*24*60+Elapsed[1]*60+Elapsed[2]
if JobSpec.PROGRAM == "Mopac":
if ElapsedMins < 5: return 0
else: return -1
###############################################################
# Filter prior to optimization - if there are any very close nonbonded contacts, a non-zero value is returned
def checkDists(MolSpec, SearchParams):
checkval = 0
for i in range(0,len(MolSpec.CARTESIANS)):
bondedatomlist = []
for partners in MolSpec.CONNECTIVITY[i]: bondedatomlist.append(int(partners.split("__")[0])-1)
for j in range(i+1,len(MolSpec.CARTESIANS)):
bond = 0
for bondedatom in bondedatomlist:
if j == bondedatom: bond = bond + bond + 1
if bond == 0:
totdist = abs(calcdist(i, j, MolSpec.CARTESIANS))
bump = SearchParams.RJCT*(bondiRadius(atomicnumber(MolSpec.ATOMTYPES[i]))+bondiRadius(atomicnumber(MolSpec.ATOMTYPES[j])))
# If heteroatom - hydrogen bonds are not specified as fixed...
if SearchParams.HSWAP != 0:
if MolSpec.ATOMTYPES[i]=="N" or MolSpec.ATOMTYPES[i]=="O" or MolSpec.ATOMTYPES[i]=="S":
if MolSpec.ATOMTYPES[j]=="H": bump=0.75*bump
if MolSpec.ATOMTYPES[j]=="N" or MolSpec.ATOMTYPES[j]=="O" or MolSpec.ATOMTYPES[j]=="S":
if MolSpec.ATOMTYPES[i]=="H": bump=0.75*bump
if totdist<bump:
checkval = checkval+1
#print " PREOPT: Rejecting structure!",MolSpec.ATOMTYPES[i],(i+1),MolSpec.ATOMTYPES[j],(j+1),"distance =", totdist,"Ang"
return checkval
###############################################################
# Some useful arrays ##########################################
periodictable = ["","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr",
"Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl",
"Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo"]
atomicmass = [0.0,1.008, 4.003, 6.941, 9.012, 10.81, 12.01, 14.01, 16.00, 19.00, 20.18, 22.99, 24.31, 26.98, 28.09, 30.97, 32.07, 35.45, 39.95, 39.10, 40.08, 44.96, 47.87, 50.94, 52.00, 54.94, 55.84, 58.93, 58.69,
63.55, 65.39, 69.72, 72.61, 74.92, 78.96, 79.90, 83.80, 85.47, 87.62, 88.91, 91.22, 92.91, 95.94, 99.0, 101.07, 102.91, 106.42, 107.87, 112.41, 114.82, 118.71, 121.76, 127.60, 126.90, 131.29]
calendar=["","jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"]
def digitalMonth(month):
digital = 0
for i in range(0,len(calendar)):
if calendar[i] in month.lower(): digital = i
return digital
def elementID(massno):
if massno < len(periodictable): return periodictable[massno]
else: return "XX"
def atomicnumber(element):
atomicno = 0
for i in range(0,len(periodictable)):
if element == periodictable[i]: atomicno = i
return atomicno
def bondiRadius(massno):
#<NAME>als radii for all atoms from: <NAME>. Phys. Chem. 1964, 68, 441-452, except hydrogen, which is taken from <NAME>.; <NAME>. Phys. Chem. 1996, 100, 7384-7391
#Radii that are not available in either of these publications have RvdW = 2.00 Angstrom
bondi = [0.0,1.09, 1.40, 1.82,2.00,2.00,1.70,1.55,1.52,1.47,1.54,2.27,1.73,2.00,2.10,1.80,1.80,1.75,1.88,2.75,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.63,1.40,1.39,1.87,2.00,1.85,1.90,
1.85,2.02,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.63,1.72,1.58,1.93,2.17,2.00,2.06,1.98,2.16,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.72,1.66,1.55,1.96,2.02,2.00,2.00,2.00,
2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,2.00,1.86]
if massno<len(bondi): radius = bondi[massno]
else: radius = 2.0
return radius
###############################################################
#Geometric calculations #######################################
def calcdist(atoma,atomb,coords):
x1=coords[atoma][0]
y1=coords[atoma][1]
z1=coords[atoma][2]
x2=coords[atomb][0]
y2=coords[atomb][1]
z2=coords[atomb][2]
ba = [x1-x2, y1-y2, z1-z2]
dist = math.sqrt(ba[0]*ba[0]+ba[1]*ba[1]+ba[2]*ba[2])
return dist
def calcangle(atoma,atomb,atomc,coords):
x1=coords[atoma][0]
y1=coords[atoma][1]
z1=coords[atoma][2]
x2=coords[atomb][0]
y2=coords[atomb][1]
z2=coords[atomb][2]
x3=coords[atomc][0]
y3=coords[atomc][1]
z3=coords[atomc][2]
ba = [x1-x2, y1-y2, z1-z2]
bc = [x3-x2, y3-y2, z3-z2]
angle = 180.0/math.pi*math.acos((ba[0]*bc[0]+ba[1]*bc[1]+ba[2]*bc[2])/(math.sqrt(ba[0]*ba[0]+ba[1]*ba[1]+ba[2]*ba[2])*math.sqrt(bc[0]*bc[0]+bc[1]*bc[1]+bc[2]*bc[2])))
return angle
def calcdihedral(atoma,atomb,atomc,atomd,coords):
x1=coords[atoma][0]
y1=coords[atoma][1]
z1=coords[atoma][2]
x2=coords[atomb][0]
y2=coords[atomb][1]
z2=coords[atomb][2]
x3=coords[atomc][0]
y3=coords[atomc][1]
z3=coords[atomc][2]
x4=coords[atomd][0]
y4=coords[atomd][1]
z4=coords[atomd][2]
ax= (y2-y1)*(z2-z3)-(z2-z1)*(y2-y3)
ay= (z2-z1)*(x2-x3)-(x2-x1)*(z2-z3)
az= (x2-x1)*(y2-y3)-(y2-y1)*(x2-x3)
bx= (y3-y2)*(z3-z4)-(z3-z2)*(y3-y4)
by= (z3-z2)*(x3-x4)-(x3-x2)*(z3-z4)
bz= (x3-x2)*(y3-y4)-(y3-y2)*(x3-x4)
nbx= (y2-y3)*(z4-z3)-(z2-z3)*(y4-y3)
nby= (z2-z3)*(x4-x3)-(x2-x3)*(z4-z3)
nbz= (x2-x3)*(y4-y3)-(y2-y3)*(x4-x3)
torsion=180.0/math.pi*math.acos((ax*bx+ay*by+az*bz)/(math.sqrt(ax*ax+ay*ay+az*az)*math.sqrt(bx*bx+by*by+bz*bz)))
sign=180.0/math.pi*math.acos((nbx*(x2-x1)+nby*(y2-y1)+nbz*(z2-z1))/(math.sqrt(nbx*nbx+nby*nby+nbz*nbz)*math.sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1))))
if sign<90.0: torsion=torsion*-1.0
return torsion
###############################################################
# Filter post optimization - checks whether two conformers are identical on the basis of non-bonded distances and energy. Needs to consider equivalent coordinate descriptions
# also do enantiomers here
def checkSame(ConfSpec, CSearch, SearchParams, savedconf):
if not hasattr(ConfSpec, "CARTESIANS"): return 1
tordiff=0.0; besttordiff=180.0; sameval=0
if len(SearchParams.EQUI)==0:
#print "No equivalent coordinate descriptions"
torval1=getTorsion(ConfSpec)
#print ConfSpec.NAME
#print ConfSpec.CARTESIANS
#print torval1
#print CSearch.NAME[savedconf]
#print CSearch.CARTESIANS[savedconf]
#print CSearch.TORVAL[savedconf]
for x in range(0,len(torval1)):
difftor=math.sqrt((torval1[x]-CSearch.TORVAL[savedconf][x])*(torval1[x]-CSearch.TORVAL[savedconf][x]))
if difftor>180.0:
difftor=360.0-difftor
tordiff=tordiff + difftor*difftor
# print torval1[x], CSearch.TORVAL[savedconf][x], difftor
if len(torval1)!=0:
besttordiff=math.sqrt(tordiff/len(torval1))
#print "----------"
#print besttordiff
#print "----------"
else:
tempcart=[]
for i in range(0,len(ConfSpec.CARTESIANS)): tempcart.append(ConfSpec.CARTESIANS[i])
allposscoords=[tempcart]
#print len(allposscoords)
#print SearchParams.EQUI
equilist=[]
for i in range(0,len(SearchParams.EQUI)): equilist.append(SearchParams.EQUI[i])
for equivcoords in equilist:
equivstring=equivcoords.split(" ")
if len(equivstring)==3:
equilist.append(equivstring[0]+" "+equivstring[1])
equilist.append(equivstring[0]+" "+equivstring[2])
equilist.append(equivstring[1]+" "+equivstring[2])
#print equilist
for equivcoords in equilist:
#print equivcoords
equivstring=equivcoords.split(" ")
nequiv=len(equivstring)
if nequiv == 2:
equivloop=[]
for item in equivstring:
equivloop.append(int(item)-1)
for item in equivstring:
equivloop.append(int(item)-1)
for l in range(0,len(allposscoords)):
for i in range(1, nequiv):
orig=[]
swap=[]
for j in range(0,nequiv):
orig.append(equivloop[j])
swap.append(equivloop[i+j])
swappedcoords=[0]*len(ConfSpec.CARTESIANS)
for j in range(0,len(ConfSpec.CARTESIANS)):
swappedcoords[j]=allposscoords[l][j]
for k in range(0,nequiv):
if j == orig[k]:
#print "Interchanging coordinates",j, swap[k]
swappedcoords[j]=allposscoords[l][swap[k]]
#print swappedcoords[j]
#PossSpec = ConfSpec
#PossSpec.CARTESIANS = swappedcoords
#torval1=getTorsion(PossSpec)
#print torval1
allposscoords.append(swappedcoords)
#print "appending"
#print swappedcoords
#print len(allposscoords)
originaltorval=getTorsion(ConfSpec)
originalsum = 0.0
alteredsum = 0.0
for x in range(0,len(originaltorval)):
originalsum = originalsum + math.pow(originaltorval[x],2.0)
for poss in allposscoords:
#print "poss"
#for i in range(0,len(poss)):
# print ConfSpec.ATOMTYPES[i], poss[i][0], poss[i][1], poss[i][2]
PossSpec = ConfSpec
PossSpec.CARTESIANS = poss
torval1=getTorsion(PossSpec)
tordiff=0.0
alteredsum = 0.0
for y in range(0,len(torval1)):
alteredsum = alteredsum + math.pow(torval1[y],2.0)
if math.pow((originalsum-alteredsum),2.0) < 0.1:
print originalsum, alteredsum
print "comparing torsions"
for z in range(0,len(torval1)):
#print savedconf, "-", x, torval1[x], CSearch.TORVAL[savedconf][x]
difftor=math.sqrt((torval1[z]-CSearch.TORVAL[savedconf][z])*(torval1[z]-CSearch.TORVAL[savedconf][z]))
if difftor>180.0:
difftor=360.0-difftor
tordiff=tordiff + difftor*difftor
#print torval1[z], CSearch.TORVAL[savedconf][z], difftor
if len(torval1)!=0:
tordiff=math.sqrt(tordiff/len(torval1))
print tordiff
if tordiff<besttordiff:
besttordiff=tordiff
#this is a horrible hack which returns the cartesians back to before equivalent coordinate systems were considered....
ConfSpec.CARTESIANS = tempcart
#print " ----------"
#print " "+str(besttordiff)
#print " ----------"
#print ConfSpec.NAME, CSearch.NAME[savedconf], besttordiff
if besttordiff<SearchParams.COMP: sameval=sameval+1
return sameval
#Check the stereochemistry has not been changed
def checkchir(ConfSpec, MolSpec, CSearch, SearchParams):
epimerized = 0; epimatom = 0
for i in range(0,len(ConfSpec.CARTESIANS)):
if MolSpec.ATOMTYPES[i] == "C":
if len(MolSpec.CONNECTIVITY[i]) == 4:
#print "\nATOM", MolSpec.ATOMTYPES[i], (i),
abcd = []; types = []
for partners in MolSpec.CONNECTIVITY[i]:
abcd.append(int(partners.split("__")[0])-1)
types.append(MolSpec.ATOMTYPES[int(partners.split("__")[0])-1])
numh = 0
for type in types:
if type == "H": numh = numh + 1
if numh <= 1:
#print " Computing dihedral angle between", abcd
if math.fabs(calcdihedral(abcd[0],abcd[1],abcd[2],abcd[3],ConfSpec.CARTESIANS) - calcdihedral(abcd[0],abcd[1],abcd[2],abcd[3],MolSpec.CARTESIANS)) > 10.0:
print " POSSIBLY EPIMERIZED!",i, abcd, types
print calcdihedral(abcd[0],abcd[1],abcd[2],abcd[3],ConfSpec.CARTESIANS), calcdihedral(abcd[0],abcd[1],abcd[2],abcd[3],MolSpec.CARTESIANS)
epimerized = 1; epimatom = (i+1)
return [epimerized,epimatom]
#Check the connectivity and compare to the starting structure
def checkconn(ConfSpec, MolSpec, CSearch, SearchParams):
checkval=0
a = "X"; b = 0; c = "X"; d = 0
for i in range(0,len(ConfSpec.CARTESIANS)):
bondedatomlist=[]
for partners in MolSpec.CONNECTIVITY[i]: bondedatomlist.append(int(partners.split("__")[0])-1)
nonbondedlist=[]
for j in range(i+1,len(ConfSpec.CARTESIANS)):
bond=0
nonbondedlist.append(j)
for bondedatom in bondedatomlist:
#This deals with breaking existing bonds
if j==bondedatom:
#print (i+1), (j+1)
nonbondedlist.pop()
xdist=float(ConfSpec.CARTESIANS[i][0])-float(ConfSpec.CARTESIANS[j][0])
ydist=float(ConfSpec.CARTESIANS[i][1])-float(ConfSpec.CARTESIANS[j][1])
zdist=float(ConfSpec.CARTESIANS[i][2])-float(ConfSpec.CARTESIANS[j][2])
totdist=math.sqrt(xdist*xdist+ydist*ydist+zdist*zdist)
#print totdist
origxdist=float(CSearch.CARTESIANS[0][i][0])-float(CSearch.CARTESIANS[0][j][0])
origydist=float(CSearch.CARTESIANS[0][i][1])-float(CSearch.CARTESIANS[0][j][1])
origzdist=float(CSearch.CARTESIANS[0][i][2])-float(CSearch.CARTESIANS[0][j][2])
origdist=math.sqrt(origxdist*origxdist+origydist*origydist+origzdist*origzdist)
#print origdist
if (totdist-origdist)>0.5*origdist:
#print "Looks like ",MolSpec.ATOMTYPES[i],(i+1)," has broken from ",MolSpec.ATOMTYPES[j],(j+1)
a = MolSpec.ATOMTYPES[i]; b = (i+1); c = MolSpec.ATOMTYPES[j]; d = (j+1)
checkval=checkval+1
#if HSWAP!=0:
# if MolSpec.ATOMTYPES[i]=="N" or MolSpec.ATOMTYPES[i]=="O" or MolSpec.ATOMTYPES[i]=="S":
# if MolSpec.ATOMTYPES[j]=="H":
#Has an acidic proton swapped positions?
#for k in range(0,len(ConfSpec.CARTESIANS)):
#if k!=i and k!=j:
#hxdist=float(ConfSpec.CARTESIANS[k][0])-float(ConfSpec.CARTESIANS[j][0])
#hydist=float(ConfSpec.CARTESIANS[k][1])-float(ConfSpec.CARTESIANS[j][1])
#hzdist=float(ConfSpec.CARTESIANS[k][2])-float(ConfSpec.CARTESIANS[j][2])
#htotdist=math.sqrt(hxdist*hxdist+hydist*hydist+hzdist*hzdist)
#print htotdist
#if htotdist<0.5*(bondiradius(atomicnumber(MolSpec.ATOMTYPES[i]))+bondiradius(atomicnumber(MolSpec.ATOMTYPES[j]))):
#print "Looks like ",MolSpec.ATOMTYPES[i],(i+1)," has broken from ",MolSpec.ATOMTYPES[j],(j+1)
# breakbond.append([i+1,j+1])
# checkval=checkval-1
# if MolSpec.ATOMTYPES[j]=="N" or MolSpec.ATOMTYPES[j]=="O" or MolSpec.ATOMTYPES[j]=="S":
# if MolSpec.ATOMTYPES[i]=="H":
#Has an acidic proton swapped positions?
#for k in range(0,len(ConfSpec.CARTESIANS)):
#if k!=j and k!=i:
#hxdist=float(ConfSpec.CARTESIANS[k][0])-float(ConfSpec.CARTESIANS[i][0])
#hydist=float(ConfSpec.CARTESIANS[k][1])-float(ConfSpec.CARTESIANS[i][1])
#hzdist=float(ConfSpec.CARTESIANS[k][2])-float(ConfSpec.CARTESIANS[i][2])
#htotdist=math.sqrt(hxdist*hxdist+hydist*hydist+hzdist*hzdist)
#if htotdist<0.5*(bondiradius(atomicnumber(MolSpec.ATOMTYPES[i]))+bondiradius(atomicnumber(MolSpec.ATOMTYPES[k]))):
#print "Looks like ",MolSpec.ATOMTYPES[i],(i+1)," has broken from ",MolSpec.ATOMTYPES[j],(j+1)
#breakbond.append([i+1,j+1])
#checkval=checkval-1
#This deals with forming new bonds
for j in nonbondedlist:
xdist=float(ConfSpec.CARTESIANS[i][0])-float(ConfSpec.CARTESIANS[j][0])
ydist=float(ConfSpec.CARTESIANS[i][1])-float(ConfSpec.CARTESIANS[j][1])
zdist=float(ConfSpec.CARTESIANS[i][2])-float(ConfSpec.CARTESIANS[j][2])
totdist=math.sqrt(xdist*xdist+ydist*ydist+zdist*zdist)
if totdist<0.5*(bondiRadius(atomicnumber(MolSpec.ATOMTYPES[i]))+bondiRadius(atomicnumber(MolSpec.ATOMTYPES[j]))):
print "Looks like ",MolSpec.ATOMTYPES[i],(i+1),":",MolSpec.ATOMTYPES[j],(j+1),"have formed a new bond"
checkval=checkval+1
if SearchParams.NNBO == 0:
if MolSpec.ATOMTYPES[i]=="N" or MolSpec.ATOMTYPES[i]=="O" or MolSpec.ATOMTYPES[i]=="S":
if MolSpec.ATOMTYPES[j]=="H": checkval=checkval-1
if MolSpec.ATOMTYPES[j]=="N" or MolSpec.ATOMTYPES[j]=="O" or MolSpec.ATOMTYPES[j]=="S":
if MolSpec.ATOMTYPES[i]=="H": checkval=checkval-1
return [checkval, str(a), str(b),str(c),str(d)]
# Returns a matrix of dihedral angles (with sign) given connecitivty and coordinates in numerical order. Only between heavy atoms and NH,OH and SH protons
def getTorsion(MolSpec):
torval=[]
for atoma in range(0,len(MolSpec.CARTESIANS)):
for partner1 in MolSpec.CONNECTIVITY[atoma]:
atomb = int(partner1.split("__")[0])-1
for partner2 in MolSpec.CONNECTIVITY[atomb]:
atomc = int(partner2.split("__")[0])-1
if atomc!=atoma:
for partner3 in MolSpec.CONNECTIVITY[atomc]:
atomd = int(partner3.split("__")[0])-1
if atomd>atoma and atomd!=atomb:
if MolSpec.ATOMTYPES[atoma]=="H" and MolSpec.ATOMTYPES[atomb]=="C": ignore=1
elif MolSpec.ATOMTYPES[atomd]=="H" and MolSpec.ATOMTYPES[atomc]=="C": ignore=1
else:
endA=""
endD=""
for endAatom in MolSpec.CONNECTIVITY[atomb]:
if (int(endAatom.split("__")[0])-1)!=atomc:
endA = endA+MolSpec.ATOMTYPES[int(endAatom.split("__")[0])-1]
for endDatom in MolSpec.CONNECTIVITY[atomc]:
if (int(endDatom.split("__")[0])-1)!=atomb:endD = endD+MolSpec.ATOMTYPES[int(endDatom.split("__")[0])-1]
if endA!="HHH" and endD!="HHH":
torsion=calcdihedral(atoma,atomb,atomc,atomd,MolSpec.CARTESIANS)
#print (MolSpec.ATOMTYPES[atoma],atoma, MolSpec.ATOMTYPES[atomb],atomb, MolSpec.ATOMTYPES[atomc],atomc, MolSpec.ATOMTYPES[atomd],atomd, | |
plot_args.nx_item
assert node in plot_args.solution
val = plot_args.solution.overwrites[node]
val_str = "\n ".join(f"{i}. {v}" for i, v in reversed(list(enumerate(val))))
tooltip = f"(x{len(val)} overwrites) {val_str}"
return quote_html_tooltips(tooltip)
def make_op_prune_comment(plot_args: PlotArgs):
op = plot_args.nx_item
sol = plot_args.solution
plottable = plot_args.plottable
comments = ()
if hasattr(sol, "plan") and op in sol.plan.comments:
comments = sol.plan.comments
elif op in getattr(plottable, "comments", ()):
comments = plottable.comments
if comments:
return f"(pruned due to {comments[op]})"
def make_op_tooltip(plot_args: PlotArgs):
"""the string-representation of an operation (name, needs, provides)"""
return plot_args.nx_attrs.get("_op_tooltip", str(plot_args.nx_item))
def make_fn_tooltip(plot_args: PlotArgs):
"""the sources of the operation-function"""
if "_fn_tooltip" in plot_args.nx_attrs:
return plot_args.nx_attrs["_fn_tooltip"]
return func_source(plot_args.nx_item.fn, None, human=1)
class Theme:
"""
The poor man's css-like :term:`plot theme` (see also :class:`.StyleStack`).
Tip: `Graphviz node-attributes <https://graphviz.org/doc/info/attrs.html>`_
To use the values contained in theme-instances, stack them in a :class:`.StylesStack`,
and :meth:`.StylesStack.merge` them with :term:`style expansion`\\s
(read it from :meth:`.StyleStack.expand()`).
.. theme-warn-start
.. Attention::
It is recommended to use other means for :ref:`plot-customizations`
instead of modifying directly theme's class-attributes.
All :class:`Theme` *class-attributes* are deep-copied when constructing
new instances, to avoid modifications by mistake, while attempting to update
*instance-attributes* instead
(*hint:* allmost all its attributes are containers i.e. dicts).
Therefore any class-attributes modification will be ignored, until a new
``Theme`` instance from the patched class is used .
.. theme-warn-end
"""
##########
## VARIABLES
#: args for jinja2 patched `truncate` filter, above.
truncate_args = ((23, True), {"reverse": True})
fill_color = "wheat"
null_color = "#ffa9cd"
sideffect_color = "blue"
subdoc_color = "#8B4513" # SaddleBrown
pruned_color = "#d3d3d3" # LightGrey
canceled_color = "#a9a9a9" # DarkGray
failed_color = "LightCoral"
resched_thickness = 4
broken_color = "Red"
overwrite_color = "SkyBlue"
steps_color = "#00bbbb"
vector_color = "#7193ff" # pandas logo
evicted_color = "#006666"
#: the url to the architecture section explaining *graphtik* glossary,
#: linked by legend.
arch_url = "https://graphtik.readthedocs.io/en/latest/arch.html"
##########
## GRAPH
kw_graph = {
"graph_type": "digraph",
"fontname": "italic",
## Whether to plot `curved/polyline edges
# <https://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:splines>`_
# BUT disabled due to crashes:
# https://gitlab.com/graphviz/graphviz/issues/1408
# "splines": "ortho",
}
#: styles per plot-type
kw_graph_plottable_type = {
"FnOp": {},
"Pipeline": {},
"Network": {},
"ExecutionPlan": {},
"Solution": {},
}
#: For when type-name of :attr:`PlotArgs.plottable` is not found
#: in :attr:`.kw_plottable_type` ( ot missing altogether).
kw_graph_plottable_type_unknown = {}
#: Attributes applying to all nodes with ``node [...]`` graphviz construct,
#: append in graph only if non-empty.
node_defaults = {"style": ["filled"], "fillcolor": "white"}
#: Attributes applying to all edges with ``edge [...]`` graphviz construct,
#: appended in graph only if non-empty.
edge_defaults = {}
##########
## DATA node
##
#: Keys to ignore from data styles & node-attrs,
#: because they are handled internally by HTML-Label, and/or
#: interact badly with that label.
data_bad_html_label_keys = {"label"}
#: Jinja2 params for the HTML-Table label
kw_data_label = {}
#: Reduce margins, since sideffects take a lot of space
#: (default margin: x=0.11, y=0.055O)
kw_data = {
"shape": "rect",
"fixedsize": "shape",
}
kw_data_inp = {}
kw_data_out = {}
kw_data_inp_only = {"shape": "invhouse", "tooltip": ["(input)"]}
kw_data_out_only = {"shape": "house", "tooltip": ["(output)"]}
kw_data_io = {
"shape": "hexagon",
"tooltip": ["(input+output)"],
}
kw_data_sideffect = {
"color": Ref("sideffect_color"),
"fontcolor": Ref("sideffect_color"),
}
kw_data_sideffected = {}
kw_data_to_evict = {
"color": Ref("evicted_color"),
"style": ["filled", "dashed"],
"tooltip": ["(to evict)"],
}
##
## data STATE
##
kw_data_pruned = {
"fontcolor": Ref("pruned_color"),
"color": Ref("pruned_color"),
"tooltip": ["(pruned)"],
}
kw_data_in_solution = {
"style": ["filled"],
"fillcolor": Ref("fill_color"),
"tooltip": [make_data_value_tooltip],
}
kw_data_in_solution_null = {
"fillcolor": Ref("null_color"),
"tooltip": ["(null-result)"],
}
kw_data_evicted = {"penwidth": "3", "tooltip": ["(evicted)"]}
kw_data_overwritten = {
"style": ["filled"],
"fillcolor": Ref("overwrite_color"),
"tooltip": [make_overwrite_tooltip],
}
kw_data_missing = {
"fontcolor": Ref("canceled_color"),
"color": Ref("canceled_color"),
"tooltip": ["(missing-optional or canceled)"],
}
data_template = make_template(
"""\
<<TABLE CELLBORDER="0" CELLSPACING="0" BORDER="0">
<TR>
{%- if solution and nx_item in solution and solution[nx_item]|attr('shape') -%}
{%- set val = solution[nx_item] %}
<TD STYLE="rounded" CELLSPACING="0" CELLPADDING="0" WIDTH="8"
TITLE="{{- val | vector_info | eee -}}"
TARGET="_top"
><FONT FACE="monospace" COLOR="{{ vector_color | eee }}"><B>#</B></FONT></TD>
{%- endif -%}
{%- if val is defined and val.index is defined %}
<TD STYLE="rounded" CELLSPACING="0" CELLPADDING="0" WIDTH="8"
TITLE="{{- val.index | string | eee -}}"
TARGET="_top"
><FONT FACE="monospace" COLOR="{{ vector_color | eee }}"><B>R</B></FONT></TD>
{%- endif -%}
{%- if val is defined and val.columns is defined %}
<TD STYLE="rounded" CELLSPACING="0" CELLPADDING="0" WIDTH="8"
TITLE="{{- val.columns | string | eee -}}"
TARGET="_top"
><FONT FACE="monospace" COLOR="{{ vector_color | eee }}"><B>C</B></FONT></TD>
{%- endif -%}
{%- if steps and nx_item in steps %}
<TD STYLE="rounded" CELLSPACING="2" CELLPADDING="4"
{{- {
'BGCOLOR': step_bgcolor | eee,
'TITLE': step_tooltip | eee,
'HREF': step_url | hrefer | ee,
'TARGET': step_target | eee
} | xmlattr }}
><FONT FACE="monospace" COLOR="{{ step_color | eee }}"><B>
{{- steps.index(nx_item) -}}
</B></FONT></TD>
{%- endif -%}
<TD>
{%- if nx_item | jsonp -%}
{%- for js_step in nx_item | jsonp -%}
{%- if loop.first -%}
{{- js_step | truncate -}}
/
{%- else -%}
{{- '\n' -}}
{{- ' ' * (loop.index - 1) -}}
+--
{{- js_step | truncate -}}
{%- if not loop.last -%}
/
{%- endif -%}
{%- endif -%}
<BR ALIGN="LEFT"/>
{%- endfor -%}
{%- elif nx_item | sfxed -%}
{{- nx_item | sideffected | truncate -}}
{%- else -%}
{{- nx_item | truncate -}}
{%- endif -%}
{%- if nx_item | sfxed -%}
<BR/>(<I>sfxed:</I> {{ nx_item | sfx_list | join(', ') | eee }})
{%- endif -%}
</TD>
</TR>
</TABLE>>
"""
)
##########
## OPERATION node
##
#: Keys to ignore from operation styles & node-attrs,
#: because they are handled internally by HTML-Label, and/or
#: interact badly with that label.
op_bad_html_label_keys = {"shape", "label", "style"}
#: props for operation node (outside of label))
kw_op = {
"name": lambda pa: quote_node_id(pa.nx_item.name),
"shape": "plain", # dictated by Graphviz docs
# Set some base tooltip, or else, "TABLE" shown...
"tooltip": [lambda pa: graphviz_html_string(pa.nx_item.name)],
}
kw_op_executed = {"fillcolor": Ref("fill_color")}
kw_op_endured = {
"penwidth": Ref("resched_thickness"),
"style": ["dashed"],
"tooltip": ["(endured)"],
"badges": ["!"],
}
kw_op_rescheduled = {
"penwidth": Ref("resched_thickness"),
"style": ["dashed"],
"tooltip": ["(rescheduled)"],
"badges": ["?"],
}
kw_op_parallel = {"badges": ["|"]}
kw_op_marshalled = {"badges": ["&"]}
kw_op_returns_dict = {"badges": ["}"]}
##
## op STATE
##
kw_op_pruned = {"color": Ref("pruned_color"), "fontcolor": Ref("pruned_color")}
kw_op_prune_comment = {"op_tooltip": [make_op_prune_comment]}
kw_op_failed = {
"fillcolor": Ref("failed_color"),
"tooltip": [make_template("{{ solution.executed[nx_item] if solution | ex }}")],
}
kw_op_canceled = {"fillcolor": Ref("canceled_color"), "tooltip": ["(canceled)"]}
#: Operation styles may specify one or more "letters"
#: in a `badges` list item, as long as the "letter" is contained in the dictionary
#: below.
op_badge_styles = {
"badge_styles": {
"!": {
"tooltip": "endured",
"bgcolor": "#04277d",
"color": "white",
"URL": "https://graphtik.readthedocs.io/en/latest/arch.html#term-endured",
"target": "_top",
},
"?": {
"tooltip": "rescheduled",
"bgcolor": "#fc89ac",
"color": "white",
"URL": "https://graphtik.readthedocs.io/en/latest/arch.html#term-partial-outputs",
"target": "_top",
},
"|": {
"tooltip": "parallel",
"bgcolor": "#b1ce9a",
"color": "white",
"URL": "https://graphtik.readthedocs.io/en/latest/arch.html#term-parallel-execution",
"target": "_top",
},
"&": {
"tooltip": "marshalled",
"bgcolor": "#4e3165",
"color": "white",
"URL": "https://graphtik.readthedocs.io/en/latest/arch.html#term-marshalling",
"target": "_top",
},
"}": {
"tooltip": "returns_dict",
"bgcolor": "#cc5500",
"color": "white",
"URL": "https://graphtik.readthedocs.io/en/latest/arch.html#term-returns-dictionary",
"target": "_top",
},
}
}
#: Jinja2 params for the HTML-Table label, applied 1ST.
kw_op_label = {
"op_name": lambda pa: pa.nx_item.name,
"fn_name": lambda pa: pa.nx_item
and func_name(pa.nx_item.fn, mod=1, fqdn=1, human=1),
"op_truncate": Ref("truncate_args"),
"fn_truncate": Ref("truncate_args"),
"op_url": Ref("op_url", default=None),
"op_link_target": "_top",
"fn_url": Ref("fn_url", default=None),
"fn_link_target": "_top",
}
#: Jinja2 params for the HTML-Table label applied AT THE END.
kw_op_label2 = {
"op_tooltip": [make_op_tooltip], # ensure
"fn_tooltip": [make_fn_tooltip],
}
#: Try to mimic a regular `Graphviz`_ node attributes
#: (see examples in ``test.test_plot.test_op_template_full()`` for params).
#: TODO: fix jinja2 template is un-picklable!
op_template = make_template(
"""\
<<TABLE CELLBORDER="0" CELLSPACING="0" STYLE="rounded"
{{- {
'BORDER': penwidth | ee,
'COLOR': color | ee,
'BGCOLOR': fillcolor | ee
} | xmlattr -}}>
<TR>
<TD BORDER="1" SIDES="b" ALIGN="left"
{{- {
'TOOLTIP': op_tooltip | truncate | eee,
'HREF': op_url | hrefer | ee,
'TARGET': op_link_target | eee
} | xmlattr }}
>
{%- if fontcolor -%}<FONT COLOR="{{ fontcolor }}">{%- endif -%}
{{- '<B>OP:</B> <I>%s</I>' % op_name | truncate(*op_truncate[0], **op_truncate[1]) | ee if op_name -}}
{%- if fontcolor -%}</FONT>{%- endif -%}
</TD>
<TD BORDER="1" SIDES="b" ALIGN="right">
{%- if badges or (steps and op_name in steps) -%}
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="1" CELLPADDING="2" ALIGN="right">
<TR>
{%- if | |
3, 5, 6, 9], None),
("21", [[3, 4], [6, 9], [4, 8]], ["22", "20", "21"], None, [4, 5, 8], "21"),
("21", [[3, 4], [5, 8], [4, 8]], ["22", None, None], None, [4, 5, 8], "21"),
("21", [[3, 4], [3, 8], [4, 8]], [None, None, None], None, [3, 4, 8], "21"),
(None, [[3, 4], [6, 9], [4, 8]], ["22", "20", "21"], None, [4, 5, 8], "21"),
(None, [[3, 4], [3, 8], [4, 8]], [None, None, None], None, [3, 4, 8], None),
# Contains static systems
("21", [[3, 4], None, [4, 8]], ["22", None, "21"], None, [4, 5, 8], "21"),
("21", [[3, 4], None, [4, 8]], ["22", None, None], None, [4, 5, 8], "21"),
("21", [[3, 4], None, [4, 8]], [None, None, None], None, [3, 4, 8], "21"),
(None, [[3, 4], None, [4, 8]], ["22", None, "21"], None, [4, 5, 8], "21"),
(None, [[3, 4], None, [4, 8]], [None, None, None], None, [3, 4, 8], None),
# include only specific edges
("21", [[3, 4], None, [4, 8]], ["22", None, "21"], [0, 1], [4, 5], "21"),
("21", [[3, 4], None, [4, 8]], ["22", None, None], [0, 1], [4, 5], "21"),
("21", [[3, 4], None, [4, 8]], [None, None, None], [0, 1], [3, 4], "21"),
(None, [[3, 4], None, [4, 8]], ["22", None, "21"], [0, 1], [4, 5], "21"),
(None, [[3, 4], None, [4, 8]], [None, None, None], [0, 1], [3, 4], None),
("21", [[3, 4], None, [4, 8]], ["22", None, "21"], [0, 2], [4, 5, 8], "21"),
("21", [[3, 4], None, [4, 8]], ["22", None, None], [0, 2], [4, 5, 8], "21"),
("21", [[3, 4], None, [4, 8]], [None, None, None], [0, 2], [3, 4, 8], "21"),
(None, [[3, 4], None, [4, 8]], ["22", None, "21"], [0, 2], [4, 5, 8], "21"),
(None, [[3, 4], None, [4, 8]], [None, None, None], [0, 2], [3, 4, 8], None),
("21", [[3, 4], None, [4, 8]], ["22", None, "21"], [1, 2], [4, 8], "21"),
("21", [[3, 4], None, [4, 8]], ["22", None, None], [1, 2], [4, 8], "21"),
("21", [[3, 4], None, [4, 8]], [None, None, None], [1, 2], [4, 8], "21"),
(None, [[3, 4], None, [4, 8]], ["22", None, "21"], [1, 2], [4, 8], "21"),
(None, [[3, 4], None, [4, 8]], [None, None, None], [1, 2], [4, 8], None),
],
)
def test_time_union(
csm_ref_time_day,
lcs_times,
lcs_ref_time_days,
edges,
exp_time,
exp_ref_time_day,
):
"""Test the time_union function of the CSM.
Parameters
----------
csm_ref_time_day : str
An arbitrary day number string in the range [1, 31] or `None`. The value is
used to create the reference timestamp of the CSM
lcs_times : List
A list containing an arbitrary number of time delta values (days) that are
used to create a corresponding number of `LocalCoordinateSystem` instances
which are added to the CSM. If a value is `None`, the generated coordinate
system will be static
lcs_ref_time_days : List
A list where the values are either arbitrary day number strings in the range
[1, 31] or `None`. Those values are used to create the reference timestamps
for the coordinate systems of the CSM. The list must have the same length as
the one passed to the ``lcs_times`` parameter
edges : List
A list that specifies the indices of the ``lcs_times`` parameter that should
be considered in the time union. If `None` is passed, all are used. Note
that the information is used to create the correct inputs to the
``time_union`` function and isn't passed directly.
exp_time : List
A list containing time delta values (days) that are used to generate the
expected result data
exp_ref_time_day : str
An arbitrary day number string in the range [1, 31] or `None`. The value is
used as reference time to create the expected result data. If it is set to
`None`, the expected result data type is a `pandas.TimedeltaIndex` and a
`pandas.DatetimeIndex` otherwise
"""
# create full time data
csm_time_ref = None
if csm_ref_time_day is not None:
csm_time_ref = f"2010-03-{csm_ref_time_day}"
lcs_time_ref = [None for _ in range(len(lcs_times))]
for i, _ in enumerate(lcs_times):
if lcs_times[i] is not None:
lcs_times[i] = pd.TimedeltaIndex(lcs_times[i], "D")
if lcs_ref_time_days[i] is not None:
lcs_time_ref[i] = pd.Timestamp(f"2010-03-{lcs_ref_time_days[i]}")
# create coordinate systems
lcs = []
for i, _ in enumerate(lcs_times):
if isinstance(lcs_times[i], pd.TimedeltaIndex):
coordinates = [[j, j, j] for j in range(len(lcs_times[i]))]
else:
coordinates = [1, 2, 3]
lcs += [
tf.LocalCoordinateSystem(
None,
coordinates,
lcs_times[i],
lcs_time_ref[i],
)
]
# create CSM and add coordinate systems
csm = tf.CoordinateSystemManager("root", "base", csm_time_ref)
for i in range(len(lcs_times)):
csm.add_cs(f"lcs_{i}", "root", lcs[i])
# create expected data type
exp_time = pd.TimedeltaIndex(exp_time, "D")
if exp_ref_time_day is not None:
exp_time = pd.Timestamp(f"2010-03-{exp_ref_time_day}") + exp_time
# create correct list of edges
if edges is not None:
for i, _ in enumerate(edges):
edges[i] = ("root", f"lcs_{edges[i]}")
# check time_union result
assert np.all(csm.time_union(list_of_edges=edges) == exp_time)
# test_get_local_coordinate_system_no_time_dep -------------------------------------
@staticmethod
@pytest.mark.parametrize(
"system_name, reference_name, exp_orientation, exp_coordinates",
[
("lcs_1", None, r_mat_z(0.5), [1, 2, 3]),
("lcs_2", None, r_mat_y(0.5), [3, -3, 1]),
("lcs_3", None, r_mat_x(0.5), [1, -1, 3]),
("lcs_3", "root", [[0, 1, 0], [0, 0, -1], [-1, 0, 0]], [6, -4, 0]),
("root", "lcs_3", [[0, 0, -1], [1, 0, 0], [0, -1, 0]], [0, -6, -4]),
("lcs_3", "lcs_1", [[0, 0, -1], [0, -1, 0], [-1, 0, 0]], [-6, -5, -3]),
("lcs_1", "lcs_3", [[0, 0, -1], [0, -1, 0], [-1, 0, 0]], [-3, -5, -6]),
],
)
def test_get_local_coordinate_system_no_time_dep(
system_name, reference_name, exp_orientation, exp_coordinates
):
"""Test the ``get_cs`` function without time dependencies.
Have a look into the tests setup section to see which coordinate systems are
defined in the CSM.
Parameters
----------
system_name : str
Name of the system that should be returned
reference_name : str
Name of the reference system
exp_orientation : List or numpy.ndarray
The expected orientation of the returned system
exp_coordinates
The expected coordinates of the returned system
"""
# setup
csm = tf.CoordinateSystemManager(root_coordinate_system_name="root")
csm.create_cs("lcs_1", "root", r_mat_z(0.5), [1, 2, 3])
csm.create_cs("lcs_2", "root", r_mat_y(0.5), [3, -3, 1])
csm.create_cs("lcs_3", "lcs_2", r_mat_x(0.5), [1, -1, 3])
check_coordinate_system(
csm.get_cs(system_name, reference_name),
exp_orientation,
exp_coordinates,
True,
)
# test_get_local_coordinate_system_time_dep -------------------------------------
@staticmethod
@pytest.mark.parametrize(
"function_arguments, time_refs, exp_orientation, exp_coordinates,"
"exp_time_data, exp_failure",
[
# get cs in its parent system - no reference times
(
("cs_1",),
[None, None, None, None],
[np.eye(3) for _ in range(3)],
[[i, 0, 0] for i in [0, 0.25, 1]],
([0, 3, 12], None),
False,
),
# get cs in its parent system - only CSM has reference time
(
("cs_1",),
["2000-03-03", None, None, None],
[np.eye(3) for _ in range(3)],
[[i, 0, 0] for i in [0, 0.25, 1]],
([0, 3, 12], "2000-03-03"),
False,
),
# get cs in its parent system - only system has reference time
(
("cs_1",),
[None, "2000-03-03", "2000-03-03", "2000-03-03"],
[np.eye(3) for _ in range(3)],
[[i, 0, 0] for i in [0, 0.25, 1]],
([0, 3, 12], "2000-03-03"),
False,
),
# get cs in its parent system - function and CSM have reference times
(
("cs_1", None, pd.TimedeltaIndex([6, 9, 18], "D"), "2000-03-10"),
["2000-03-16", None, None, None],
[np.eye(3) for _ in range(3)],
[[i, 0, 0] for i in [0, 0.25, 1]],
([6, 9, 18], "2000-03-10"),
False,
),
# get cs in its parent system - system and CSM have diff. reference times
(
("cs_1",),
["2000-03-10", "2000-03-16", None, None],
[np.eye(3) for _ in range(3)],
[[i, 0, 0] for i in [0, 0.25, 1]],
([6, 9, 18], "2000-03-10"),
False,
),
# get transformed cs - no reference times
(
("cs_3", "root"),
[None, None, None, None],
[np.eye(3) for _ in range(7)],
[[1, 0, 0] for _ in range(7)],
([0, 3, 4, 6, 8, 9, 12], None),
False,
),
# get transformed cs - only CSM has reference time
(
("cs_3", "root"),
["2000-03-10", None, None, None],
[np.eye(3) for _ in range(7)],
[[1, 0, 0] for _ in range(7)],
([0, 3, 4, 6, 8, 9, 12], "2000-03-10"),
False,
),
# get transformed cs - CSM and two systems have a reference time
(
("cs_3", "root"),
["2000-03-10", "2000-03-04", None, "2000-03-16"],
| |
'918380':{'en': 'Idea'},
'559999641':{'en': 'Oi'},
'9178882':{'en': 'Idea'},
'559999643':{'en': 'Oi'},
'559999642':{'en': 'Oi'},
'559999645':{'en': 'Oi'},
'559999644':{'en': 'Oi'},
'559999647':{'en': 'Oi'},
'559999646':{'en': 'Oi'},
'918220':{'en': 'Airtel'},
'658571':{'en': 'StarHub'},
'658570':{'en': 'StarHub'},
'5582988':{'en': 'Oi'},
'5582989':{'en': 'Oi'},
'5582986':{'en': 'Oi'},
'5582987':{'en': 'Oi'},
'5582985':{'en': 'Oi'},
'918222':{'en': 'Idea'},
'62481991':{'en': 'Esia'},
'62481990':{'en': 'Esia'},
'62481993':{'en': 'Esia'},
'62481992':{'en': 'Esia'},
'62481994':{'en': 'Esia'},
'918224':{'en': 'Idea'},
'9175909':{'en': 'Idea'},
'9174792':{'en': 'Vodafone'},
'556298131':{'en': 'TIM'},
'9175505':{'en': 'Aircel'},
'9174793':{'en': 'Vodafone'},
'6223291':{'en': 'Esia'},
'918227':{'en': 'Idea'},
'9174790':{'en': 'Vodafone'},
'558599932':{'en': 'TIM'},
'558599933':{'en': 'TIM'},
'558599931':{'en': 'TIM'},
'558599936':{'en': 'TIM'},
'558599937':{'en': 'TIM'},
'558599934':{'en': 'TIM'},
'558599935':{'en': 'TIM'},
'9174796':{'en': 'Telenor'},
'558599938':{'en': 'TIM'},
'558599939':{'en': 'TIM'},
'9174797':{'en': 'Telenor'},
'9177850':{'en': 'Dishnet'},
'9174794':{'en': 'Telenor'},
'9177858':{'en': 'Dishnet'},
'9177859':{'en': 'Dishnet'},
'9176112':{'en': 'Aircel'},
'9175400':{'en': 'Airtel'},
'55549921':{'en': 'Claro BR'},
'55549920':{'en': 'Claro BR'},
'9175406':{'en': 'Dishnet'},
'852661':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'852660':{'en': 'PCCW Mobile', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852663':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'852662':{'en': 'PCCW Mobile', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852665':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852664':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852667':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852669':{'en': 'PCCW Mobile', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852668':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'9183674':{'en': 'Idea'},
'65963':{'en': 'SingTel'},
'65961':{'en': 'SingTel'},
'65967':{'en': 'SingTel'},
'65966':{'en': 'SingTel'},
'65965':{'en': 'SingTel'},
'65964':{'en': 'SingTel'},
'65969':{'en': 'M1'},
'65968':{'en': 'M1'},
'559599933':{'en': 'Oi'},
'559398115':{'en': 'TIM'},
'9177668':{'en': 'Airtel'},
'9174002':{'en': 'Airtel'},
'9174003':{'en': 'Airtel'},
'9174000':{'en': 'Airtel'},
'9174001':{'en': 'Airtel'},
'9174006':{'en': 'Tata Docomo'},
'9174007':{'en': 'Tata Docomo'},
'9174004':{'en': 'Airtel'},
'9174005':{'en': 'Tata Docomo'},
'9174008':{'en': 'Tata Docomo'},
'9174009':{'en': 'Tata Docomo'},
'559398111':{'en': 'TIM'},
'918387':{'en': 'Aircel'},
'556998438':{'en': 'Brasil Telecom GSM'},
'556998439':{'en': 'Brasil Telecom GSM'},
'5585987':{'en': 'Oi'},
'5585986':{'en': 'Oi'},
'5585985':{'en': 'Oi'},
'556798141':{'en': 'TIM'},
'556998432':{'en': 'Brasil Telecom GSM'},
'556998433':{'en': 'Brasil Telecom GSM'},
'5585989':{'en': 'Oi'},
'556998431':{'en': 'Brasil Telecom GSM'},
'556998436':{'en': 'Brasil Telecom GSM'},
'556998437':{'en': 'Brasil Telecom GSM'},
'556998434':{'en': 'Brasil Telecom GSM'},
'556998435':{'en': 'Brasil Telecom GSM'},
'556798411':{'en': 'Brasil Telecom GSM'},
'556798416':{'en': 'Brasil Telecom GSM'},
'556798417':{'en': 'Brasil Telecom GSM'},
'556798414':{'en': 'Brasil Telecom GSM'},
'559398118':{'en': 'TIM'},
'556798415':{'en': 'Brasil Telecom GSM'},
'559398119':{'en': 'TIM'},
'62736996':{'en': 'Esia'},
'9181708':{'en': 'Airtel'},
'918389':{'en': 'Reliance'},
'84169':{'en': 'Viettel Mobile'},
'9181700':{'en': 'Airtel'},
'918388':{'en': 'Reliance'},
'62736997':{'en': 'Esia'},
'658368':{'en': 'M1'},
'658369':{'en': 'StarHub'},
'658360':{'en': 'SingTel'},
'658361':{'en': 'StarHub'},
'658362':{'en': 'StarHub'},
'658363':{'en': 'StarHub'},
'658364':{'en': 'StarHub'},
'658366':{'en': 'M1'},
'658367':{'en': 'StarHub'},
'558199374':{'en': 'Claro BR'},
'559199944':{'en': 'Oi'},
'559199942':{'en': 'Oi'},
'5586987':{'en': 'Oi'},
'559199940':{'en': 'Oi'},
'5586985':{'en': 'Oi'},
'5586988':{'en': 'Oi'},
'5586989':{'en': 'Oi'},
'559199949':{'en': 'Oi'},
'558199370':{'en': 'Claro BR'},
'62294911':{'en': 'Esia'},
'62294910':{'en': 'Esia'},
'62294913':{'en': 'Esia'},
'558199371':{'en': 'Claro BR'},
'558199372':{'en': 'Claro BR'},
'9180795':{'en': 'Idea'},
'6226692':{'en': 'Esia'},
'6226693':{'en': 'Esia'},
'6226690':{'en': 'Esia'},
'6226691':{'en': 'Esia'},
'9174909':{'en': 'Airtel'},
'677792':{'en': 'Solomon Telekom'},
'677793':{'en': 'Solomon Telekom'},
'677790':{'en': 'Solomon Telekom'},
'677791':{'en': 'Solomon Telekom'},
'677794':{'en': 'Solomon Telekom'},
'917379':{'en': 'Vodafone'},
'917373':{'en': 'Aircel'},
'917377':{'en': 'Idea'},
'917376':{'en': 'Cellone'},
'556798135':{'en': 'TIM'},
'556798426':{'en': 'Brasil Telecom GSM'},
'556798425':{'en': 'Brasil Telecom GSM'},
'556798136':{'en': 'TIM'},
'556798131':{'en': 'TIM'},
'556798422':{'en': 'Brasil Telecom GSM'},
'556798133':{'en': 'TIM'},
'556798132':{'en': 'TIM'},
'9175368':{'en': 'Idea'},
'9175369':{'en': 'Dishnet'},
'556798139':{'en': 'TIM'},
'556798138':{'en': 'TIM'},
'556798429':{'en': 'Brasil Telecom GSM'},
'556798428':{'en': 'Brasil Telecom GSM'},
'55879811':{'en': 'Vivo'},
'55879810':{'en': 'Vivo'},
'64204':{'en': 'Skinny'},
'555399948':{'en': 'Vivo'},
'555399949':{'en': 'Vivo'},
'9183839':{'en': 'Reliance Jio'},
'555399941':{'en': 'Vivo'},
'555399942':{'en': 'Vivo'},
'555399943':{'en': 'Vivo'},
'555399944':{'en': 'Vivo'},
'555399945':{'en': 'Vivo'},
'555399946':{'en': 'Vivo'},
'555399947':{'en': 'Vivo'},
'659235':{'en': 'SingTel'},
'659234':{'en': 'SingTel'},
'659237':{'en': 'StarHub'},
'659236':{'en': 'SingTel'},
'659231':{'en': 'SingTel'},
'659230':{'en': 'SingTel'},
'659233':{'en': 'SingTel'},
'659232':{'en': 'SingTel'},
'659239':{'en': 'StarHub'},
'659238':{'en': 'StarHub'},
'9174580':{'en': 'Airtel'},
'9174589':{'en': 'Airtel'},
'9174588':{'en': 'Airtel'},
'559898159':{'en': 'TIM'},
'559898158':{'en': 'TIM'},
'559898151':{'en': 'TIM'},
'559898153':{'en': 'TIM'},
'559898152':{'en': 'TIM'},
'559898155':{'en': 'TIM'},
'559898154':{'en': 'TIM'},
'559898157':{'en': 'TIM'},
'559898156':{'en': 'TIM'},
'9173209':{'en': 'Airtel'},
'9173208':{'en': 'Airtel'},
'556898121':{'en': 'TIM'},
'9173758':{'en': 'Idea'},
'9173200':{'en': 'Airtel'},
'9173750':{'en': 'Idea'},
'7747':{'en': 'Tele2', 'ru': 'Tele2'},
'556599992':{'en': 'Vivo'},
'556599993':{'en': 'Vivo'},
'556599991':{'en': 'Vivo'},
'556599996':{'en': 'Vivo'},
'556599997':{'en': 'Vivo'},
'556599994':{'en': 'Vivo'},
'556599995':{'en': 'Vivo'},
'556599998':{'en': 'Vivo'},
'556599999':{'en': 'Vivo'},
'9176189':{'en': 'Idea'},
'9176188':{'en': 'Idea'},
'9176181':{'en': 'Vodafone'},
'9176180':{'en': 'Idea'},
'9176183':{'en': 'Vodafone'},
'9176182':{'en': 'Vodafone'},
'9176185':{'en': 'Vodafone'},
'9176184':{'en': 'Vodafone'},
'9176187':{'en': 'Airtel'},
'9176186':{'en': 'Vodafone'},
'59469418':{'en': 'SFR'},
'59469419':{'en': 'SFR'},
'59469412':{'en': 'Digicel'},
'59469413':{'en': 'Digicel'},
'59469414':{'en': 'Digicel'},
'59469415':{'en': 'Digicel'},
'59469416':{'en': 'Digicel'},
'59469417':{'en': 'SFR'},
'9177058':{'en': 'Vodafone'},
'9177059':{'en': 'Vodafone'},
'9177050':{'en': 'Vodafone'},
'555499615':{'en': 'Vivo'},
'555499614':{'en': 'Vivo'},
'555499617':{'en': 'Vivo'},
'555499616':{'en': 'Vivo'},
'555499611':{'en': 'Vivo'},
'555499613':{'en': 'Vivo'},
'555499612':{'en': 'Vivo'},
'55859820':{'en': 'Vivo'},
'555499619':{'en': 'Vivo'},
'555499618':{'en': 'Vivo'},
'6235199':{'en': 'Esia'},
'6235198':{'en': 'Esia'},
'62548994':{'en': 'Esia'},
'559399179':{'en': 'Vivo'},
'559399178':{'en': 'Vivo'},
'62548993':{'en': 'Esia'},
'62548992':{'en': 'Esia'},
'559399175':{'en': 'Vivo'},
'559399174':{'en': 'Vivo'},
'559399177':{'en': 'Vivo'},
'559399176':{'en': 'Vivo'},
'559399171':{'en': 'Vivo'},
'559399173':{'en': 'Vivo'},
'559399172':{'en': 'Vivo'},
'62518990':{'en': 'Esia'},
'62518991':{'en': 'Esia'},
'62518992':{'en': 'Esia'},
'62518993':{'en': 'Esia'},
'9181398':{'en': 'Airtel'},
'9181399':{'en': 'Idea'},
'557199984':{'en': 'Vivo'},
'557199985':{'en': 'Vivo'},
'557199986':{'en': 'Vivo'},
'557199987':{'en': 'Vivo'},
'557199981':{'en': 'Vivo'},
'557199982':{'en': 'Vivo'},
'557199983':{'en': 'Vivo'},
'557199988':{'en': 'Vivo'},
'557199989':{'en': 'Vivo'},
'62284994':{'en': 'Esia'},
'62284995':{'en': 'Esia'},
'8536278':{'en': '3'},
'8536279':{'en': '3'},
'8536276':{'en': '3'},
'8536277':{'en': '3'},
'8536274':{'en': 'CTM'},
'8536275':{'en': 'CTM'},
'8536272':{'en': 'CTM'},
'8536273':{'en': 'CTM'},
'8536270':{'en': 'SmarTone'},
'8536271':{'en': 'SmarTone'},
'557199188':{'en': 'TIM'},
'559998139':{'en': 'TIM'},
'9181578':{'en': 'Idea'},
'9181579':{'en': 'Idea'},
'559998134':{'en': 'TIM'},
'557199185':{'en': 'TIM'},
'559998136':{'en': 'TIM'},
'557199187':{'en': 'TIM'},
'559998131':{'en': 'TIM'},
'559998132':{'en': 'TIM'},
'557199183':{'en': 'TIM'},
'9174610':{'en': 'Airtel'},
'9174619':{'en': 'Airtel'},
'9174618':{'en': 'Airtel'},
'556198551':{'en': 'Brasil Telecom GSM'},
'556198553':{'en': 'Brasil Telecom GSM'},
'556198552':{'en': 'Brasil Telecom GSM'},
'556198555':{'en': 'Brasil Telecom GSM'},
'556198554':{'en': 'Brasil Telecom GSM'},
'556198557':{'en': 'Brasil Telecom GSM'},
'556198556':{'en': 'Brasil Telecom GSM'},
'556198559':{'en': 'Brasil Telecom GSM'},
'556198558':{'en': 'Brasil Telecom GSM'},
'6228192':{'en': 'Esia'},
'6228191':{'en': 'Esia'},
'558299351':{'en': 'Claro BR'},
'559699129':{'en': 'Vivo'},
'559699128':{'en': 'Vivo'},
'559699125':{'en': 'Vivo'},
'559699124':{'en': 'Vivo'},
'559699127':{'en': 'Vivo'},
'559699126':{'en': 'Vivo'},
'559699121':{'en': 'Vivo'},
'559699123':{'en': 'Vivo'},
'559699122':{'en': 'Vivo'},
'55799812':{'en': 'Claro BR'},
'55799813':{'en': 'Claro BR'},
'55799810':{'en': 'Claro BR'},
'55799811':{'en': 'Claro BR'},
'55799816':{'en': 'Claro BR'},
'55799814':{'en': 'Claro BR'},
'55799815':{'en': 'Claro BR'},
'9174774':{'en': 'Airtel'},
'9174775':{'en': 'Airtel'},
'9174776':{'en': 'Airtel'},
'9174777':{'en': 'Airtel'},
'9174770':{'en': 'Airtel'},
'9174772':{'en': 'Airtel'},
'9174773':{'en': 'Airtel'},
'6226591':{'en': 'Esia'},
'6226590':{'en': 'Esia'},
'6226593':{'en': 'Esia'},
'6226592':{'en': 'Esia'},
'6226599':{'en': 'Esia'},
'917998':{'en': 'Dishnet'},
'9177880':{'en': 'Idea'},
'9180110':{'en': 'Airtel'},
'658559':{'en': 'StarHub'},
'558299371':{'en': 'Claro BR'},
'9181539':{'en': 'Idea'},
'558599918':{'en': 'TIM'},
'558599919':{'en': 'TIM'},
'558599911':{'en': 'TIM'},
'558599912':{'en': 'TIM'},
'558599913':{'en': 'TIM'},
'558599914':{'en': 'TIM'},
'558599915':{'en': 'TIM'},
'558599916':{'en': 'TIM'},
'558599917':{'en': 'TIM'},
'9177878':{'en': 'Idea'},
'9177879':{'en': 'Idea'},
'9177870':{'en': 'Idea'},
'623994':{'en': 'Esia'},
'557499194':{'en': 'TIM'},
'9174680':{'en': 'Dishnet'},
'622990':{'en': 'Esia'},
'84999':{'en': 'Indochina Telecom'},
'84998':{'en': 'Indochina Telecom'},
'84997':{'en': 'G-Mobile'},
'84996':{'en': 'G-Mobile'},
'84995':{'en': 'G-Mobile'},
'84994':{'en': 'G-Mobile'},
'84993':{'en': 'G-Mobile'},
'84992':{'en': 'VSAT'},
'55779980':{'en': 'Vivo'},
'917030':{'en': 'Vodafone'},
'559899944':{'en': 'Oi'},
'917032':{'en': 'Airtel'},
'917033':{'en': 'Airtel'},
'917034':{'en': 'Vodafone'},
'917035':{'en': 'Dishnet'},
'917036':{'en': 'Idea'},
'917037':{'en': 'Telewings'},
'917038':{'en': 'Idea'},
'917039':{'en': 'Idea'},
'9172610':{'en': 'Airtel'},
'9172619':{'en': 'Vodafone'},
'9172618':{'en': 'Airtel'},
'852649':{'en': 'PCCW Mobile', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852648':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852643':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852642':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852641':{'en': '1O1O / One2Free', 'zh': '1O1O / One2Free', 'zh_Hant': '1O1O / One2Free'},
'852640':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852647':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852646':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852644':{'en': 'PCCW Mobile', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'9177889':{'en': 'Idea'},
'918130':{'en': 'Airtel'},
'9177888':{'en': 'Idea'},
'9176538':{'en': 'CellOne'},
'9176539':{'en': 'CellOne'},
'557599841':{'en': 'Vivo'},
'557599840':{'en': 'Vivo'},
'557599843':{'en': 'Vivo'},
'557599842':{'en': 'Vivo'},
'622995':{'en': 'Esia'},
'9172960':{'en': 'Hexacom'},
'9172969':{'en': 'Airtel'},
'9172968':{'en': 'Airtel'},
'65909':{'en': 'M1'},
'65908':{'en': 'SingTel'},
'61459':{'en': 'Telstra'},
'9176047':{'en': 'Reliance'},
'556598128':{'en': 'TIM'},
'556598129':{'en': 'TIM'},
'9176042':{'en': 'Reliance'},
'9176043':{'en': 'Reliance'},
'556598124':{'en': 'TIM'},
'556598125':{'en': 'TIM'},
'556598126':{'en': 'TIM'},
'556598127':{'en': 'TIM'},
'65905':{'en': 'SingTel'},
'556598121':{'en': 'TIM'},
'556598122':{'en': 'TIM'},
'556598123':{'en': 'TIM'},
'9176450':{'en': 'Telenor'},
'57303':{'en': 'Uff!'},
'9177648':{'en': 'Airtel'},
'558799606':{'en': 'TIM'},
'558799607':{'en': 'TIM'},
'558799604':{'en': 'TIM'},
'558799605':{'en': 'TIM'},
'558799602':{'en': 'TIM'},
'558799603':{'en': 'TIM'},
'558799601':{'en': 'TIM'},
'558799608':{'en': 'TIM'},
'558799609':{'en': 'TIM'},
'556998411':{'en': 'Brasil Telecom GSM'},
'556998412':{'en': 'Brasil Telecom GSM'},
'556998413':{'en': 'Brasil Telecom GSM'},
'556998414':{'en': 'Brasil Telecom GSM'},
'556998415':{'en': 'Brasil Telecom GSM'},
'556998416':{'en': 'Brasil Telecom GSM'},
'556998417':{'en': 'Brasil Telecom GSM'},
'556998418':{'en': 'Brasil Telecom GSM'},
'556998419':{'en': 'Brasil Telecom GSM'},
'569743':{'en': 'Movistar'},
'918092':{'en': 'Tata Docomo'},
'9181760':{'en': 'Tata Docomo'},
'9181769':{'en': 'Tata Docomo'},
'6011105':{'en': 'REDtone'},
'85567':{'en': 'Beeline'},
'85566':{'en': 'Beeline'},
'85560':{'en': 'Beeline'},
'85569':{'en': 'Smart'},
'85568':{'en': 'Beeline'},
'56966':{'en': 'Entel'},
'56961':{'en': 'Movistar'},
'559499972':{'en': 'Oi'},
'559999109':{'en': 'Vivo'},
'556398423':{'en': 'Brasil Telecom GSM'},
'556398422':{'en': 'Brasil Telecom GSM'},
'556398421':{'en': 'Brasil Telecom GSM'},
'658264':{'en': 'SingTel'},
'57316':{'en': 'Movistar'},
'559999103':{'en': 'Vivo'},
'57311':{'en': 'Claro'},
'9181189':{'en': 'Hexacom'},
'556199643':{'en': 'Vivo'},
'57313':{'en': 'Claro'},
'556398424':{'en': 'Brasil Telecom GSM'},
'9176458':{'en': 'Telenor'},
'556798119':{'en': 'TIM'},
'556798118':{'en': 'TIM'},
'556798449':{'en': 'Brasil Telecom GSM'},
'556798448':{'en': 'Brasil Telecom GSM'},
'556798113':{'en': 'TIM'},
'556798112':{'en': 'TIM'},
'556798111':{'en': 'TIM'},
'556798446':{'en': 'Brasil Telecom GSM'},
'556798117':{'en': 'TIM'},
'556798116':{'en': 'TIM'},
'556798115':{'en': 'TIM'},
'556798114':{'en': 'TIM'},
'59669689':{'en': 'SFR/Rife'},
'59669688':{'en': 'SFR/Rife'},
'59669683':{'en': 'Orange'},
'59669682':{'en': 'Orange'},
'59669681':{'en': 'Orange'},
'59669680':{'en': 'Orange'},
'59669687':{'en': 'SFR/Rife'},
'59669686':{'en': 'Orange'},
'59669685':{'en': 'Orange'},
'59669684':{'en': 'Orange'},
'658261':{'en': 'SingTel'},
'66909':{'en': 'True Move'},
'555399928':{'en': 'Vivo'},
'555399929':{'en': 'Vivo'},
'555399927':{'en': 'Vivo'},
'6225299':{'en': 'Esia'},
'6225298':{'en': 'Esia'},
'658267':{'en': 'SingTel'},
'658266':{'en': 'SingTel'},
'9180580':{'en': 'Idea'},
'6236196':{'en': 'Esia'},
'559198480':{'en': 'Claro BR'},
'559198481':{'en': 'Claro BR'},
'559198482':{'en': 'Claro BR'},
'559198483':{'en': 'Claro BR'},
'559198484':{'en': 'Claro BR'},
'559198485':{'en': 'Claro BR'},
'559198486':{'en': 'Claro BR'},
'658260':{'en': 'SingTel'},
'658263':{'en': 'SingTel'},
'658262':{'en': 'SingTel'},
'558299631':{'en': 'TIM'},
'559898179':{'en': 'TIM'},
'559898178':{'en': 'TIM'},
'559898177':{'en': 'TIM'},
'559898176':{'en': 'TIM'},
'559898175':{'en': 'TIM'},
'559898174':{'en': 'TIM'},
'559898173':{'en': 'TIM'},
'559898172':{'en': 'TIM'},
'559898171':{'en': | |
_form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['chain_type'][right_locs].to_numpy()
return output
def get_molecule_index_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['molecule_index'][right_locs].to_numpy()
return output
def get_molecule_id_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['molecule_id'][right_locs].to_numpy()
return output
def get_molecule_name_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['molecule_name'][right_locs].to_numpy()
return output
def get_molecule_type_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['molecule_type'][right_locs].to_numpy()
return output
def get_entity_index_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['entity_index'][right_locs].to_numpy()
return output
def get_entity_id_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['entity_id'][right_locs].to_numpy()
return output
def get_entity_name_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['entity_name'][right_locs].to_numpy()
return output
def get_entity_type_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_component_index_from_component(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['component_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['entity_type'][right_locs].to_numpy()
return output
def get_n_atoms_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output = get_atom_index_from_component (item, indices=indices, check=False)
output = [ii.shape[0] for ii in output]
return output
def get_n_groups_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output = get_group_index_from_component (item, indices=indices, check=False)
output = [ii.shape[0] for ii in output]
return output
def get_n_components_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
if indices is 'all':
return get_n_components_from_system (item, check=False)
else:
output = get_component_index_from_component (item, indices=indices, check=False)
return output.shape[0]
def get_n_molecules_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
if indices is 'all':
return get_n_molecules_from_system (item, check=False)
else:
output = get_molecule_index_from_component (item, indices=indices, check=False)
output = _np.unique(output)
return output.shape[0]
def get_n_chains_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
if indices is 'all':
return get_n_chains_from_system (item, check=False)
else:
output = get_chain_index_from_component (item, indices=indices, check=False)
output = _np.unique(output)
return output.shape[0]
def get_n_entities_from_component (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
if indices is 'all':
return get_n_entities_from_system (item, check=False)
else:
output = get_entity_index_from_component (item, indices=indices, check=False)
output = _np.unique(output)
return output.shape[0]
## molecule
def get_atom_index_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output = []
if indices is 'all':
n_indices = get_n_molecules_from_system(item, check=False)
indices = range(n_indices)
for ii in indices:
mask = (item.atoms_dataframe['molecule_index']==ii)
output.append(item.atoms_dataframe['atom_index'][mask].to_numpy())
output = _np.array(output, dtype=object)
return output
def get_atom_id_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_atom_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_atom_id_from_atom(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_atom_name_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_atom_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_atom_name_from_atom(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_atom_type_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_atom_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_atom_type_from_atom(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_group_index_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output = []
if indices is 'all':
n_indices = get_n_molecules_from_system(item, check=False)
indices = range(n_indices)
for ii in indices:
mask = (item.atoms_dataframe['molecule_index']==ii)
output.append(item.atoms_dataframe['group_index'][mask].unique())
output = _np.array(output, dtype=object)
return output
def get_group_id_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_group_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_group_id_from_group(item, indices=aux_indices, check=False))
output = _np.array(output, dtype=object)
return output
def get_group_name_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_group_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_group_name_from_group(item, indices=aux_indices, check=False))
output = _np.array(output, dtype=object)
return output
def get_group_type_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_group_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_group_type_from_group(item, indices=aux_indices, check=False))
output = _np.array(output, dtype=object)
return output
def get_component_index_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output = []
if indices is 'all':
n_indices = get_n_molecules_from_system(item, check=False)
indices = range(n_indices)
for ii in indices:
mask = (item.atoms_dataframe['molecule_index']==ii)
output.append(item.atoms_dataframe['component_index'][mask].unique())
output = _np.array(output, dtype=object)
return output
def get_component_id_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_component_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_component_id_from_component(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_component_name_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_component_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_component_name_from_component(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_component_type_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_component_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_component_type_from_component(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_chain_index_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output = []
if indices is 'all':
n_indices = get_n_molecules_from_system(item, check=False)
indices = range(n_indices)
for ii in indices:
mask = (item.atoms_dataframe['molecule_index']==ii)
output.append(item.atoms_dataframe['chain_index'][mask].unique())
output = _np.array(output, dtype=object)
return output
def get_chain_id_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_chain_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_chain_id_from_chain(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_chain_name_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_chain_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_chain_name_from_chain(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_chain_type_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
output=[]
tmp_indices = get_chain_index_from_molecule(item, indices=indices, check=False)
for aux_indices in tmp_indices:
output.append(get_chain_type_from_chain(item, indices=aux_indices, check=False))
output = _np.array(output)
return output
def get_molecule_index_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
if indices is 'all':
output = item.atoms_dataframe['molecule_index'].unique()
else:
output = indices
return output
def get_molecule_id_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_molecule_index_from_molecule(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['molecule_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['molecule_id'][right_locs].to_numpy()
return output
def get_molecule_name_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_molecule_index_from_molecule(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['molecule_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['molecule_name'][right_locs].to_numpy()
return output
def get_molecule_type_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_molecule_index_from_molecule(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['molecule_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['molecule_type'][right_locs].to_numpy()
return output
def get_entity_index_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_molecule_index_from_molecule(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['molecule_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['entity_index'][right_locs].to_numpy()
return output
def get_entity_id_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_molecule_index_from_molecule(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['molecule_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['entity_id'][right_locs].to_numpy()
return output
def get_entity_name_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices = get_molecule_index_from_molecule(item, indices=indices, check=False)
all_indices = item.atoms_dataframe['molecule_index'].to_numpy()
right_locs = [next((idx for idx, val in _np.ndenumerate(all_indices) if val==ii))[0] for ii in tmp_indices]
output = item.atoms_dataframe['entity_name'][right_locs].to_numpy()
return output
def get_entity_type_from_molecule (item, indices='all', check=True):
if check:
_digest_item(item, _form)
indices = _digest_indices(indices)
tmp_indices | |
that are of type 'typed_relation'. Each value (don't forget multivalued fields) needs to have
# this pattern: string:string:int.
validate_typed_relation_values(config, field_definitions, csv_data)
validate_csv_field_cardinality_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
validate_csv_field_cardinality(config, field_definitions, validate_csv_field_cardinality_csv_data)
validate_csv_field_length_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
validate_csv_field_length(config, field_definitions, validate_csv_field_length_csv_data)
# Validating values in CSV taxonomy fields requires a View installed by the Islandora Workbench Integration module.
# If the View is not enabled, Drupal returns a 404. Use a dummy vocabulary ID or we'll get a 404 even if the View
# is enabled.
terms_view_url = config['host'] + '/vocabulary/dummyvid?_format=json'
terms_view_response = issue_request(config, 'GET', terms_view_url)
if terms_view_response.status_code == 404:
logging.warning('Not validating taxonomy term IDs used in CSV file. To use this feature, install the Islandora Workbench Integration module.')
print('Warning: Not validating taxonomy term IDs used in CSV file. To use this feature, install the Islandora Workbench Integration module.')
else:
validate_taxonomy_field_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
validate_taxonomy_field_values(config, field_definitions, validate_taxonomy_field_csv_data)
# Validate length of 'title'.
if config['validate_title_length']:
validate_title_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
for count, row in enumerate(validate_title_csv_data, start=1):
if len(row['title']) > 255:
message = "The 'title' column in row " + str(count) + " of your CSV file exceeds Drupal's maximum length of 255 characters."
logging.error(message)
sys.exit('Error: ' + message)
# Validate existence of nodes specified in 'field_member_of'. This could be generalized out to validate node IDs in other fields.
# See https://github.com/mjordan/islandora_workbench/issues/90.
validate_field_member_of_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
for count, row in enumerate(validate_field_member_of_csv_data, start=1):
if 'field_member_of' in csv_column_headers:
parent_nids = row['field_member_of'].split(config['subdelimiter'])
for parent_nid in parent_nids:
if len(parent_nid) > 0:
parent_node_exists = ping_node(config, parent_nid)
if parent_node_exists is False:
message = "The 'field_member_of' field in row " + str(count) + " of your CSV file contains a node ID (" + parent_nid + ") that doesn't exist."
logging.error(message)
sys.exit('Error: ' + message)
# Validate 'langcode' values if that field exists.
if langcode_was_present:
validate_langcode_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
for count, row in enumerate(validate_langcode_csv_data, start=1):
langcode_valid = validate_language_code(row['langcode'])
if not langcode_valid:
message = "Row " + str(count) + " of your CSV file contains an invalid Drupal language code (" + row['langcode'] + ") in its 'langcode' column."
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete':
if 'node_id' not in csv_column_headers:
message = 'For "delete" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'add_media':
if 'node_id' not in csv_column_headers:
message = 'For "add_media" tasks, your CSV file must contain a "node_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
if 'file' not in csv_column_headers:
message = 'For "add_media" tasks, your CSV file must contain a "file" column.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'delete_media':
if 'media_id' not in csv_column_headers:
message = 'For "delete_media" tasks, your CSV file must contain a "media_id" column.'
logging.error(message)
sys.exit('Error: ' + message)
# Check for existence of files listed in the 'file' column.
if (config['task'] == 'create' or config['task'] == 'add_media') and config['paged_content_from_directories'] is False:
file_check_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
if config['allow_missing_files'] is False:
for count, file_check_row in enumerate(file_check_csv_data, start=1):
if len(file_check_row['file']) == 0:
message = 'Row ' + file_check_row[config['id_field']] + ' contains an empty "file" value.'
logging.error(message)
sys.exit('Error: ' + message)
file_path = os.path.join(config['input_dir'], file_check_row['file'])
if not os.path.exists(file_path) or not os.path.isfile(file_path):
message = 'File ' + file_path + ' identified in CSV "file" column for record with ID field value ' + file_check_row[config['id_field']] + ' not found.'
logging.error(message)
sys.exit('Error: ' + message)
message = 'OK, files named in the CSV "file" column are all present.'
print(message)
logging.info(message)
empty_file_values_exist = False
if config['allow_missing_files'] is True:
for count, file_check_row in enumerate(file_check_csv_data, start=1):
if len(file_check_row['file']) == 0:
empty_file_values_exist = True
else:
file_path = os.path.join(config['input_dir'], file_check_row['file'])
if not os.path.exists(file_path) or not os.path.isfile(file_path):
message = 'File ' + file_path + ' identified in CSV "file" column not found.'
logging.error(message)
sys.exit('Error: ' + message)
if empty_file_values_exist is True:
message = 'OK, files named in the CSV "file" column are all present; the "allow_missing_files" option is enabled and empty "file" values exist.'
print(message)
logging.info(message)
else:
message = 'OK, files named in the CSV "file" column are all present.'
print(message)
logging.info(message)
# To do: check that each file's extension is allowed for the current media type usin get_registered_media_extensions().
# See https://github.com/mjordan/islandora_workbench/issues/126. Maybe also compare allowed extensions with those in
# 'media_type[s]' config option?
# Check that either 'media_type' or 'media_types' are present in the config file.
if ('media_type' not in config and 'media_types' not in config):
message = 'You must configure media type using either the "media_type" or "media_types" option.'
logging.error(message)
sys.exit('Error: ' + message)
if config['task'] == 'create' and config['paged_content_from_directories'] is True:
if 'paged_content_page_model_tid' not in config:
message = 'If you are creating paged content, you must include "paged_content_page_model_tid" in your configuration.'
logging.error('Configuration requires "paged_content_page_model_tid" setting when creating paged content.')
sys.exit('Error: ' + message)
paged_content_from_directories_csv_data = get_csv_data(config['input_dir'], config['input_csv'], config['delimiter'])
for count, file_check_row in enumerate(paged_content_from_directories_csv_data, start=1):
dir_path = os.path.join(config['input_dir'], file_check_row[config['id_field']])
if not os.path.exists(dir_path) or os.path.isfile(dir_path):
message = 'Page directory ' + dir_path + ' for CSV record with ID "' + file_check_row[config['id_field']] + '"" not found.'
logging.error(message)
sys.exit('Error: ' + message)
page_files = os.listdir(dir_path)
if len(page_files) == 0:
print('Warning: Page directory ' + dir_path + ' is empty; is that intentional?')
logging.warning('Page directory ' + dir_path + ' is empty.')
for page_file_name in page_files:
if config['paged_content_sequence_seprator'] not in page_file_name:
message = 'Page file ' + os.path.join(dir_path, page_file_name) + ' does not contain a sequence separator (' + config['paged_content_sequence_seprator'] + ').'
logging.error(message)
sys.exit('Error: ' + message)
print('OK, page directories are all present.')
# If nothing has failed by now, exit with a positive message.
print("Configuration and input data appear to be valid.")
logging.info('Configuration checked for "%s" task using config file %s, no problems found.', config['task'], args.config)
sys.exit(0)
def get_registered_media_extensions(field_definitions):
# Unfinished. See https://github.com/mjordan/islandora_workbench/issues/126.
for field_name, field_def in field_definitions.items():
print("Field name: " + field_name + ' / ' + str(field_def))
"""
print(field_def)
if field_def['entity_type'] == 'media':
if 'file_extensions' in field_def:
print('Allowed file extensions for ' + field_def['media_type'] + ' :' + field_def['file_extensions'])
else:
print("No file extensions for " + field_def['media_type'])
"""
def check_input_for_create_from_files(config, args):
"""Validate the config file and input data if task is 'create_from_files'.
"""
if config['task'] != 'create_from_files':
message = 'Your task must be "create_from_files".'
logging.error(message)
sys.exit('Error: ' + message)
logging.info('Starting configuration check for "%s" task using config file %s.', config['task'], args.config)
ping_islandora(config)
config_keys = list(config.keys())
unwanted_in_create_from_files = ['check', 'delimiter', 'subdelimiter', 'allow_missing_files', 'validate_title_length',
'paged_content_from_directories', 'delete_media_with_nodes', 'allow_adding_terms']
for option in unwanted_in_create_from_files:
if option in config_keys:
config_keys.remove(option)
# If you introduce a new optional key, add it to this list. Note thatoptional_config_key optional keys are not validated.
joiner = ', '
optional_config_keys = ['log_file_path', 'log_file_mode', 'preprocessors', 'bootstrap', 'published', 'pause',
'published', 'validate_title_length', 'media_type', 'media_types', 'media_types',
'model', 'models', 'output_csv','log_json', 'user_agent', 'allow_redirects']
for optional_config_key in optional_config_keys:
if optional_config_key in config_keys:
config_keys.remove(optional_config_key)
# Check for presence of required config keys.
create_options = ['task', 'host', 'username', 'password', 'content_type',
'input_dir', 'media_use_tid', 'drupal_filesystem']
if not set(config_keys) == set(create_options):
message = 'Please check your config file for required values: ' + joiner.join(create_options) + '.'
logging.error(message)
sys.exit('Error: ' + message)
# Check existence of input directory.
if os.path.exists(config['input_dir']):
message = 'OK, input directory "' + config['input_dir'] + '"" found.'
print(message)
logging.info(message)
else:
message = 'Input directory "' + config['input_dir'] + '"" not found.'
logging.error(message)
sys.exit('Error: ' + message)
# Validate length of 'title'.
files = os.listdir(config['input_dir'])
for file_name in files:
filename_without_extension = os.path.splitext(file_name)[0]
if len(filename_without_extension) > 255:
message = 'The filename "' + filename_without_extension + '" exceeds Drupal\'s maximum length of 255 characters and cannot be used for a node title.'
logging.error(message)
sys.exit('Error: ' + message)
# Check that either 'media_type' or 'media_types' are present in the config file.
if ('media_type' not in config and 'media_types' not in config):
message = 'You must configure media type using either the "media_type" or "media_types" option in your configuration.'
logging.error(message)
sys.exit('Error: ' + message)
# Check that either 'model' or 'models' are present in the config file.
| |
globalpath = f'{datapath}/global_datasets'
_,_,_ = get_global_soil(globalpath,sitedata)
_,_ = get_global_qanth(globalpath,sitedata)
_,_ = get_climate(globalpath,sitedata)
# site attrs
print('setting attributes\n')
siteattrs = pd.Series(name='siteattrs',dtype='object')
siteattrs['sitename'] = sitename
siteattrs['sitepath'] = sitepath
siteattrs['out_suffix'] = out_suffix
siteattrs['sitedata_suffix'] = sitedata_suffix
siteattrs['long_sitename'] = long_sitename
siteattrs['local_utc_offset_hours'] = local_utc_offset_hours
siteattrs['obs_contact'] = obs_contact
siteattrs['obs_reference'] = obs_reference
siteattrs['obs_comment'] = obs_comment
siteattrs['history'] = history
siteattrs['photo_source'] = photo_source
fpath = f'{sitepath}/{sitename}_siteattrs_{out_suffix}.csv'
siteattrs.to_csv(fpath,header=True,index=True)
siteattrs = pd.read_csv(fpath,index_col=0,squeeze=True)
# reformat loaded utc_offset as float
siteattrs['local_utc_offset_hours'] = float(siteattrs['local_utc_offset_hours'])
return sitedata,siteattrs
def set_raw_attributes(raw_ds, siteattrs):
sitename = siteattrs['sitename']
sitepath = siteattrs['sitepath']
print('setting raw attributes\n')
raw_ds.attrs['time_analysis_start'] = pd.to_datetime(raw_ds.time[0].values).strftime('%Y-%m-%d %H:%M:%S')
raw_ds.attrs['timestep_number_analysis'] = len(raw_ds.time)
raw_ds = set_global_attributes(raw_ds,siteattrs,ds_type='raw_obs')
raw_ds = set_variable_attributes(raw_ds)
print(f'writing raw observations to NetCDF\n')
fpath = f'{sitepath}/timeseries/{sitename}_raw_observations_{siteattrs["out_suffix"]}.nc'
write_netcdf_file(ds=raw_ds,fpath_out=fpath)
return raw_ds
def post_process_site(sitedata,siteattrs,datapath,
raw_ds,forcing_ds,clean_ds,era_ds,watch_ds,corr_ds,lin_ds,
forcingplots,create_outofsample_obs):
'''
final site data plotting and error calculation
runs after pipeline main from create_dataset_{sitename}.py
'''
sitename = siteattrs['sitename']
sitepath = siteattrs['sitepath']
# make website
create_markdown_observations(forcing_ds,siteattrs)
# compare corrected, era5 and wfde5 (watch) errors
compare_corrected_errors(clean_ds,era_ds,watch_ds,corr_ds,lin_ds,sitename,sitepath,'all')
if forcingplots:
plot_forcing(datapath,siteattrs,forcing_ds,with_era=False)
if create_outofsample_obs:
in_ds, out_ds = test_out_of_sample(clean_ds,era_ds,watch_ds,sitedata,siteattrs)
# Snow partitioning plot
plot_snow_partitioning(raw_ds,forcing_ds,era_ds,sitepath,sitename)
return
def get_era5_data(sitename,sitedata,syear,eyear,era5path,sitepath):
'''
get native era5 netcdf variables from raijin and combine into xarray dataset
'''
# era5 variables to collect from gadi
vz = '%iv' %wind_hgt # default v10
uz = '%iu' %wind_hgt # default u10
# ncvars = ['msdwswrf','msnswrf','msdwlwrf','msnlwrf','2t','2d','sp',vz,uz,'mtpr','msr','msshf','mslhf']
ncvars = ['msdwswrf','msdwlwrf','2t','2d','sp',vz,uz,'mtpr','msr']
if sitename == 'SG-TelokKurau': # nearest era tile over water, move to land
sitedata['latitude'] = sitedata['latitude'] + 0.25
sitedata['longitude'] = sitedata['longitude'] - 0.25
# longitude correction for era5 using 0<lon<360
lat = sitedata['latitude']
lon = sitedata['longitude']
if lon < 0.:
lon = lon + 360.
assert (0 <= lon < 360), 'longitude in era5 needs to be 0<lon<360'
assert (-90 <= lat < 90), 'latutude in era5 needs to be -90<lat<90'
ds = xr.Dataset()
years = [str(year) for year in range(syear,eyear+1)]
# loop through variables
for ncvar in ncvars:
print('collecting %s data' %ncvar)
files = []
# get list of files in path using glob wildcard
for year in years:
files = sorted(glob.glob('%s/%s/%s/*' %(era5path,ncvar,year)))
assert len(files)>0, 'no files found in %s/%s/%s/*' %(era5path,ncvar,year)
for file in files:
print('opening %s' %file)
tmp = xr.open_dataset(file).sel(latitude=sitedata['latitude'],
longitude=sitedata['longitude'],
method='nearest')
ds = xr.merge([ds,tmp])
longname = tmp[list(tmp.keys())[0]].long_name.lower()
print('done merging %s (%s)' %(longname,ncvar))
############################################################################
ds.attrs['source'] = era5path
# get static information (veg frac, type, soil, geopotential, land/sea mask, roughness)
for ncvar in ['cvl','cvh','tvl','tvh','slt','z','lsm','fsr']:
static = xr.open_dataset(f'{era5path}/{ncvar}/2000/{ncvar}_era5_oper_sfc_20000101-20000131.nc').sel(
latitude=sitedata['latitude'],longitude=sitedata['longitude'],time='2000-01-01 00:00', method='nearest')
ds[ncvar] = static[ncvar]
return ds
############################################################################
def convert_era5_to_alma(era5,siteattrs,sitename):
'''
opens ERA5 native datafile from site and converts to alma standard variables
'''
# convert dewtemp to specific humidity data
Qdata_np = convert_dewtemp_to_qair(
dewtemp = era5['d2m'].values,
temp = era5['t2m'].values,
pressure = era5['sp'].values)
Qair_xr = xr.DataArray(Qdata_np, coords=[era5.time.values], dims='time')
# convert era5 to alma form
ds = xr.Dataset()
ds = ds.assign( time = era5['time'])
ds = ds.assign( SWdown = era5['msdwswrf'],
LWdown = era5['msdwlwrf'],
Wind_N = era5['v%i' %wind_hgt],
Wind_E = era5['u%i' %wind_hgt],
Wind = (era5['v%i' %wind_hgt]**2 + era5['u%i' %wind_hgt]**2)**(1/2),
PSurf = era5['sp'],
Tair = era5['t2m'],
Qair = Qair_xr,
Rainf = era5['mtpr'] - era5['msr'],
Snowf = era5['msr'],
era_wind_hgt = wind_hgt)
for ncvar in ['cvl','cvh','tvl','tvh','slt','z','lsm','fsr']:
ds[ncvar] = era5[ncvar]
# # setting unphysical negative values to zero
ds.Rainf.values = ds.Rainf.where(ds.Rainf>1E-9,0.).values
ds.Snowf.values = ds.Snowf.where(ds.Snowf>1E-9,0.).values
ds.SWdown.values = ds.SWdown.where(ds.SWdown>1E-9,0.).values
ds = set_variable_attributes(ds)
ds = set_global_attributes(ds,siteattrs,ds_type='era5_raw')
return ds
###############################################################################
def correct_wind(ref_wind,local_z0,local_d0,local_wind_hgt,ref_wind_hgt,ref_z0,ref_d0,mode):
'''correct wind speed assuming log wind profile assuming all neutral conditions
Parameters
----------
ref_wind [m/s] reanalysis grid scalar wind speed at ref_wind_hgt
local_z0 [m] local zero-plane displacment
local_d0 [m] local roughness length (assumed constant)
local_wind_hgt [m] local site measurement height for wind
ref_wind_hgt [m] reanlysis site measurement height for wind
ref_z0 [m] reanalysis grid roughness length (assumed constant)
ref_d0 [m] reanalysis grid zero-plane displacment
mode [0,1] two different methods to calculate:
mode=0: including displacement height from Goret et al. 2019 Eq 9 (https://doi.org/10.1016/j.aeaoa.2019.100042)
mode=1: excluding displacement height from https://websites.pmc.ucsc.edu/~jnoble/wind/extrap/
Method results depend on assumptions about grid z_0 and d_0,
method=1 results in higher wind speeds overall
'''
# ref_wind_hgt = 10. # basis of measurement height for era5
if mode == 0: # log law with displacement height
local_wind = ref_wind*( ( np.log((local_wind_hgt-local_d0)/local_z0) )/( np.log((ref_wind_hgt-ref_d0)/ref_z0) ) )
if mode == 1: # log law described at: https://websites.pmc.ucsc.edu/~jnoble/wind/extrap/
local_wind = ref_wind*(np.log(local_wind_hgt/local_z0))/(np.log(ref_wind_hgt/ref_z0))
return local_wind
def correct_pressure(ref_height,ref_temp,ref_pressure,local_temp,local_height,mode=1):
'''correct pressure to local site based on height difference
Parameters
----------
ref_height [m] reference (converted from) height above sea level (asl)
ref_temp [K] reference (converted from) 2m air temperature
ref_pressure [Pa] reference (converted from) surface air pressure
local_temp [K] local site 2m air temperature
local_height [m] local site height for correction
mode [0,1] two different methods to calculate:
- 0: Hypsometric equation (assuming constant temperature)
- 1: Barometric equation (includes lapse rate)
- 2: Hydrostatic equation P = rho * grav * h_diff
- 3: WATCH method from Weeedon et al. (2010)
Negligible difference between methods
'''
rd = 287.04 # Gas constant for dry air [J K^-1 kg^-1]
grav = 9.80616 # gravity [m s^-2]
env_lapse = 6.5/1000. # environmental lapse rate [K m^-1]
if mode == 0: # hypsometric equation assumes constant temperature
local_pressure = ref_pressure*np.exp( (grav*(ref_height-local_height))/(rd*ref_temp) )
elif mode == 1: # barometric equation with varying temperature
local_pressure = ref_pressure*(ref_temp/local_temp)**(grav/(-env_lapse*rd))
elif mode == 2: # hydrostatic equation
air_density = 1.2
local_pressure = ref_pressure + air_density * grav * (ref_height - local_height)
elif mode ==3: # WATCH method
# weedon et al (2010) eq. 2
ref_temp_sea_level = ref_temp + ref_height * env_lapse
# weedon et al (2010) eq. 7
ref_pressure_sea_level = ref_pressure / ( ref_temp/ ref_temp_sea_level )**(grav/(-env_lapse*rd))
# weedon et al (2010) eq. 9
local_temp_sea_level = local_temp + ref_height * env_lapse
# weedon et al (2010) eq. 10
local_pressure = ref_pressure_sea_level * ( local_temp/local_temp_sea_level )**(grav/(-env_lapse*rd))
else:
raise SystemExit(0)
local_pressure = np.where(local_pressure < 90000, 100000, local_pressure)
return local_pressure
def calc_esat(temp,pressure,mode=0):
'''Calculates vapor pressure at saturation
From Weedon 2010, through Buck 1981:
New Equations for Computing Vapor Pressure and Enhancement Factor, Journal of Applied Meteorology
----------
temp [K] 2m air temperature
pressure [Pa] air pressure
mode [0,1] two different methods to calculate:
mode=0: from Wheedon et al. 2010
mode=1: from Ukkola et al., 2017
NOTE: mode 0 and 1 nearly identical
Ukkola et al uses the ws=qs approximation (which is not used here, see Weedon 2010)
'''
# constants
Rd = 287.05 # specific gas constant for dry air
Rv = 461.52 #specific gas constant for water vapour
Epsilon = Rd/Rv # = 0.622...
Beta = (1.-Epsilon) # = 0.378 ...
temp_C = temp - 273.15 # temperature conversion to [C]
if mode == 0: # complex calculation from Weedon et al. 2010
# values when over: water, ice
A = np.where( temp_C > 0., 6.1121, 6.1115 )
B = np.where( temp_C > 0., 18.729, 23.036 )
C = np.where( temp_C > 0., 257.87, 279.82 )
D = np.where( temp_C > 0., 227.3, 333.7 )
X = np.where( temp_C > 0., 0.00072, 0.00022 )
Y = np.where( temp_C > 0., 3.2E-6, 3.83E-6 )
Z = np.where( temp_C > 0., 5.9E-10, 6.4E-10 )
esat = A * np.exp( ((B - (temp_C/D) ) * temp_C)/(temp_C + C))
enhancement = 1. + X + pressure/100. * (Y + (Z*temp_C**2))
esat = esat*enhancement*100.
elif mode == 1:
'''simpler calculation from Ukkola et al., 2017
From Jones (1992), Plants and microclimate: A quantitative approach
to environmental plant physiology, p110 '''
esat = 613.75*np.exp( (17.502*temp_C)/(240.97+temp_C) )
else:
raise SystemExit(0)
return esat
def calc_qsat(esat,pressure):
'''Calculates specific humidity at saturation
Parameters
----------
esat [Pa] vapor pressure at saturation
pressure [Pa] air pressure
Returns
-------
qsat [g/g] specific humidity at saturation
'''
# constants
Rd = 287.05 # specific gas constant for dry air
Rv = 461.52 #specific gas constant for water vapour
Epsilon = Rd/Rv # = 0.622...
Beta = (1.-Epsilon) # = 0.378 ...
qsat = (Epsilon*esat)/(pressure - Beta*esat)
return qsat
def calc_density(temp,pressure):
'''https://www.omnicalculator.com/physics/air-density
'''
Rd = 287.05 # specific gas constant for dry air
Rv = 461.52 #specific gas constant for water vapour
esat = calc_esat(temp,pressure)
density = | |
<reponame>dhrone/pyIOT
# -*- coding: utf-8 -*-
from threading import Lock, Thread
import logging
import re
import queue
import time
class Component(object):
''' Components are responsible for monitoring the underlying physical component, updating dependent properties associated with the component, and responding to updates of those properties by sending the appropriate commands to the component to get it to update its status to be consistent with its published properties
Args:
name (str): The name of the component
stream (:obj:`IOBase`): A stream object that receives and can send data to the physical device
eol (str, optional): The substring that represents end of command within the stream for the component. Default is newline (e.g. `\\n`)
timeout (float, optional): The time in seconds to wait for input from the device before the read attempt times out. Default is 2 seconds.
queryTiming (float, optional): Sets how often in seconds a query for current status will be sent. Default is 5 seconds.
synchronous (bool, optional): Determines how reading and writing are handled. Synchronous devices only respond when written to. Default is False (e.g. asynchronous)
'''
_logger = logging.getLogger(__name__)
def __init__(self, name = None, stream = None, eol=b'\n', timeout=2.0, queryTiming=5.0, synchronous=False):
''' Initialize component driver and set it to receive updates from the Thing '''
self._stream = stream
self._eol = eol if type(eol) == bytes else eol.encode()
self._timeout = timeout
self._queryTiming = queryTiming
self._synchronous = synchronous
self.__name__ = name if name is not None else self.__class__.__name__
self._componentQueue = queue.Queue()
self._readlock = Lock()
self._waitFor = None # Are we waiting for a specific value from the component
self._exit = False # Set when a request has been made to exit the component driver
self._needQuery = True
self._lastRequestedStatus = 0
self._buffer = b'' # Buffer to hold input from component
self._initializeProperties() # Determine what properties are being handled
def __del__(self):
self._close()
def requestStatus(self):
# to prevent query storms (when a queryStatus causes the message that you are triggering a requestStatus from) check to make sure that a requestStatus has not been processed recently
if time.time() > self._lastRequestedStatus+4:
''' Request that component query the device to get its current status. This normally happens automatically but can be commanded to occur using this method '''
self._lastRequestedStatus = time.time()
self._needQuery = True
def _start(self, eventQueue):
''' Start the threads that will read and write data to the device. If the device is asynchronous two threads will be started. If synchronous only the write thread will be used.
Args:
eventQueue (:obj:`Queue`): The eventQueue allows the component to send property updates back to the Thing that it belongs to.
'''
self._eventQueue = eventQueue
# Starting event loops
_threadWrite = Thread(target=self._writeLoop)
_threadWrite.daemon = True
_threadWrite.start()
# If component is asynchronous, start an independent read thread
if not self._synchronous:
_threadRead = Thread(target=self._readLoop)
_threadRead.daemon = True
_threadRead.start()
def updateComponent(self, property, value):
''' This method is normally called by the Thing that contains the component to tell the component to update its status. It can also be called by other processes that need to tell the component to update itself
Args:
property (`str`): The name of the property being updated
value (any valid property value): The value the property has changed to
'''
self._componentQueue.put({'source': '__thing__', 'action': 'UPDATE', 'property': property, 'value': value })
def _updateThing(self, property, value):
''' Send message to thing telling it to update its properties to reflect the component's reported state '''
self._eventQueue.put({'source': self.__name__, 'action': 'UPDATE', 'property': property, 'value': value })
self._logger.info('{0} property change [{1}:{2}]'.format(self.__name__, property, value))
# update local property value
self.properties[property] = value
def exit(self):
''' Shut down component driver '''
self._exit = True
self._componentQueue.put({'action': 'EXIT'})
self._eventQueue.put({'source': self.__name__, 'action': 'EXIT'})
@classmethod
def componentToProperty(cls, property, regex):
''' Decorates the method that should be used to convert a particular response from the component to a property value.
A basic challenge when creating an AWS IOT driver is how to take state information from the component and convert it to the values you want to use to represent the component's state. **componentToProperty** allows you to decorate methods to handle each required translation from raw component input into the resulting property value(s).
Args:
property (str or `list` of str): the property name (or names) that the response is updating
regex (str): A regex that exactly matches a valid message coming from the physical component. The regex must include a group around each value that will be used to update the properties. The group value is what will be passed into the decorated function. The regex string should match the entire response to make sure that all of the message is correctly included in the property update.
**Examples:**
*These examples are from code used to control an Anthem AVM processor*
A method decorated by **componentToProperty** must accept the name of the parameter that is being handled and the value extracted from the message that was received from the component. This value is extracted using the regex included within the componentToProperty decoration call. Only the matched portion of the response (e.g. the part included within the parenthesis) will be passed into your method. The method must return the value that the IOT service should assign to the property. Any variable types allowed by the IOT service are supported.
If the method receives a property name or value that it can not handle, it should raise a TypeError or ValueError.
1st example:
.. code-block:: python
@Component.componentToProperty('powerState', '^P1P([01])$')
def toPowerState(self, property, value):
retval = { '1': 'ON', '0': 'OFF' }.get(value)
if retval:
return retval
raise ValueError('{0} is invalid for property {1}'.format(value, property))
In this example our AVM processor sends `P1P1` or `P1P0` depending on whether the AVM processor is on or off. We have decided that the IOT property name we will use to record whether the AVM processor is on or off will be called 'powerState'. Two things are worth noticing about the supplied regex. First, the regex begins with the ^ symbol and ends with the $ symbol. This forces the match to begin at the start of the response and end at the very end of the response. This is the safest way to handle a match. Second, the parenthesis surrounding the `[01]` term designate that the term inside is a match group. It is this term that controls what value the decorated function should expect to receive. In this case it will be either a '1' or a '0'.
2nd example:
.. code-block:: python
@Component.componentToProperty(['input', 'volume', 'muted'], '^P1S([0-9])V([+-][0-9]{2}[\\.][0-9])M([01])D[0-9]E[0-9]$')
def avmcombinedResponse(self, property, value):
if property == 'input':
val = { '0': 'CD', '1': '2-Ch', '2': '6-Ch', '3': 'TAPE', '4':'RADIO', '5': 'DVD', '6': 'TV', '7': 'SAT', '8': 'VCR', '9': 'AUX' }.get(value)
if val:
return val
raise ValueError('{0} is invalid for property {1}'.format(value, property))
elif property == 'volume':
try:
return self._dbToVolume(float(rawvol))
except:
raise ValueError('{0} is invalid for property {1}'.format(value, property))
elif property == 'muted':
val = { '1': True, '0': False }.get(value)
if val is not None:
return val
raise ValueError('{0} is invalid for property {1}'.format(value, property))
else:
raise TypeError('ERR: {0} INVALID {1} VALUE'.format(value, property))
In our second example we show how to handle multiple properties from a single device message. The AVM processor can send messages which refer to multiple properties. In this example we are handling a message which provides the selected input, current volume, the mute status. We want to extract those three properties. So, the provided regex includes exactly three match groups. When used in this way, you should expect the decorated function to be called three times whenever the matching input is sent from the component. This is once per property. In each call, the property that is being handled will be provided for the property parameter and the corresponding group value will be provided for the value parameter.
'''
def decorateinterface(func):
transform = getattr(func, '__componentToProperty__', {})
cre = re.compile(regex)
transform[cre] = (property, func)
| |
<filename>loguetools/og.py
from loguetools import common
import struct
import copy
from collections import namedtuple
patch_value = namedtuple("Field", ["name", "type"])
minilogue_og_patch_struct = (
# 0
("str_PROG", "4s"),
("program_name", "12s"),
("reserved1", "4B"),
("vco_1_pitch_b2_9", "B"),
("vco_1_shape_b2_9", "B"),
("vco_2_pitch_b2_9", "B"),
("vco_2_shape_b2_9", "B"),
("cross_mod_depth_b2_9", "B"),
("vco_2_pitch_eg_int_b2_9", "B"),
("vco_1_level_b2_9", "B"),
("vco_2_level_b2_9", "B"),
("noise_level_b2_9", "B"),
("cutoff_b2_9", "B"),
("resonance_b2_9", "B"),
("cutoff_eg_int_b2_9", "B"),
("reserved2", "B"),
("amp_velocity", "B"),
("amp_eg_attack_b2_9", "B"),
("amp_eg_decay_b2_9", "B"),
("amp_eg_sustain_b2_9", "B"),
("amp_eg_release_b2_9", "B"),
("eg_attack_b2_9", "B"),
("eg_decay_b2_9", "B"),
("eg_sustain_b2_9", "B"),
("eg_release_b2_9", "B"),
("lfo_rate_b2_9", "B"),
("lfo_int_b2_9", "B"),
("reserved3", "5B"),
("delay_hi_pass_cutoff_b2_9", "B"),
# 50
("delay_time_b2_9", "B"),
("delay_feedback_b2_9", "B"),
("vco_1_pitch_shape_octave_wave", "B"),
("vco_2_pitch_shape_octave_wave", "B"),
("xmod_vco2_pitch_vco1_lvl_vco2_lvl", "B"),
("sync_ring_noise_cutoff_res", "B"),
("cutoff_params", "B"),
("amp_adsr", "B"),
("eg_adsr", "B"),
("lfo_rate_int_tgt_eg", "B"),
("lfo_wave_dly", "B"),
("portamento_time", "B"),
("delay_b0_1", "B"),
("reserved4", "B"),
("voice_mode_and_depth_b0_1", "B"),
("reserved5", "B"),
("bend_range_plusminus", "B"),
("reserved6", "2B"),
("lfo_portamento_params", "B"),
("voice_mode_depth_b2_9", "B"),
("program_level", "B"),
("slider_assign", "B"),
("keyboard_octave", "B"),
("reserved7", "22B"),
("str_SEQD", "4s"),
# 100
("bpm", "<H"),
("reserved8", "B"),
("step_length", "B"),
("swing", "B"),
("default_gate_time", "B"),
("step_resolution", "B"),
("reserved9", "B"),
("step1_16", "<H"),
("step1_16_switch", "<H"),
("motion_slot_1_0_parameter", "B"),
("motion_slot_1_1_parameter", "B"),
("motion_slot_2_0_parameter", "B"),
("motion_slot_2_1_parameter", "B"),
("motion_slot_3_0_parameter", "B"),
("motion_slot_3_1_parameter", "B"),
("motion_slot_4_0_parameter", "B"),
("motion_slot_4_1_parameter", "B"),
("motion_slot_1_step1_16", "<H"),
("motion_slot_2_step1_16", "<H"),
("motion_slot_3_step1_16", "<H"),
("motion_slot_4_step1_16", "<H"),
# 128
("step_01_event_data", "20s"),
("step_02_event_data", "20s"),
("step_03_event_data", "20s"),
("step_04_event_data", "20s"),
("step_05_event_data", "20s"),
("step_06_event_data", "20s"),
("step_07_event_data", "20s"),
("step_08_event_data", "20s"),
("step_09_event_data", "20s"),
("step_10_event_data", "20s"),
("step_11_event_data", "20s"),
("step_12_event_data", "20s"),
("step_13_event_data", "20s"),
("step_14_event_data", "20s"),
("step_15_event_data", "20s"),
("step_16_event_data", "20s"),
)
"""
A translation table for normalising the minilogue OG patch data by combining
fields that are split across different fields. Each tuple takes the form
('dest_name', 'src1_name_XX_x', 'src2_name_XX_x', ..., 'srcN_name_XX_x'), where N >= 1.
'dest_name' is the name of the new field to be created.
'src1_name_XX_x' contains 'src1_name', the name of a source field, a hex bit mask XX and
a 2's-complement single-hex-digit x encoding the number of bits to shift up (+ve values)
or down (-ve) the masked values before adding them to the destination.
+---------------------+----------------------------+-----------------------
| | offset for |offset for lower 2bits
| Description | upper 8bits +----------+------------
| | | Byte | Bit
+---------------------+----------------------------+----------+------------
| VCO 1 PITCH |20 vco_1_pitch_b2_9 | 52 vco_1_pitch_shape_octave_wave |0~1|
| VCO 1 SHAPE |21 vco_1_shape_b2_9 | 52 vco_1_pitch_shape_octave_wave |2~3|
| VCO 2 PITCH |22 vco_2_pitch_b2_9 | 53 vco_2_pitch_shape_octave_wave |0~1|
| VCO 2 SHAPE |23 vco_2_shape_b2_9 | 53 vco_2_pitch_shape_octave_wave |2~3|
| CROSS MOD DEPTH |24 cross_mod_depth_b2_9 | 54 xmod_vco2_pitch_vco1_lvl_vco2_lvl |0~1|
| VCO 2 PITCH EG INT |25 vco_2_pitch_eg_int_b2_9 | 54 xmod_vco2_pitch_vco1_lvl_vco2_lvl |2~3|
| VCO 1 LEVEL |26 vco_1_level_b2_9 | 54 xmod_vco2_pitch_vco1_lvl_vco2_lvl |4~5|
| VCO 2 LEVEL |27 vco_2_level_b2_9 | 54 xmod_vco2_pitch_vco1_lvl_vco2_lvl |6~7|
| NOISE LEVEL |28 noise_level_b2_9 | 55 sync_ring_noise_cutoff_res |2~3|
| CUTOFF |29 cutoff_b2_9 | 55 sync_ring_noise_cutoff_res |4~5|
| RESONANCE |30 resonance_b2_9 | 55 sync_ring_noise_cutoff_res |6~7|
| CUTOFF EG INT |31 cutoff_eg_int_b2_9 | 56 cutoff_params |0~1|
| AMP EG ATTACK |34 amp_eg_attack_b2_9 | 57 amp_adsr |0~1|
| AMP EG DECAY |35 amp_eg_decay_b2_9 | 57 amp_adsr |2~3|
| AMP EG SUSTAIN |36 amp_eg_sustain_b2_9 | 57 amp_adsr |4~5|
| AMP EG RELEASE |37 amp_eg_release_b2_9 | 57 amp_adsr |6~7|
| AMP ATTACK |38 eg_attack_b2_9 | 58 eg_adsr |0~1|
| AMP DECAY |39 eg_decay_b2_9 | 58 eg_adsr |2~3|
| AMP SUSTAIN |40 eg_sustain_b2_9 | 58 eg_adsr |4~5|
| AMP RELEASE |41 eg_release_b2_9 | 58 eg_adsr |6~7| !Note: Korg docs say 59
| LFO RATE |42 lfo_rate_b2_9 | 60 lfo_wave_dly |0~1|
| LFO INT |43 lfo_int_b2_9 | 60 lfo_wave_dly |2~3|
| DELAY HI PASS CUTOFF|49 delay_hi_pass_cutoff_b2_9| 62 delay_b0_1 |2~3|
| DELAY TIME |50 delay_time_b2_9 | 62 delay_b0_1 |4~5|
| DELAY FEEDBACK |51 delay_feedback_b2_9 | 62 delay_b0_1 |6~7|
| VOICE MODE DEPTH |70 voice_mode_depth_b2_9 | 64 voice_mode |4~5|
+---------------------+----------------------------+----------+------------
"""
minilogue_og_patch_normalisation = (
("vco_1_pitch", "vco_1_pitch_b2_9_FF_2", "vco_1_pitch_shape_octave_wave_03_0"),
("vco_1_shape", "vco_1_shape_b2_9_FF_2", "vco_1_pitch_shape_octave_wave_0C_E"),
("vco_1_octave", "vco_1_pitch_shape_octave_wave_30_C"),
("vco_1_wave", "vco_1_pitch_shape_octave_wave_C0_A"),
("vco_2_pitch", "vco_2_pitch_b2_9_FF_2", "vco_2_pitch_shape_octave_wave_03_0"),
("vco_2_shape", "vco_2_shape_b2_9_FF_2", "vco_2_pitch_shape_octave_wave_0C_E"),
("vco_2_octave", "vco_2_pitch_shape_octave_wave_30_C"),
("vco_2_wave", "vco_2_pitch_shape_octave_wave_C0_A"),
("cross_mod_depth", "cross_mod_depth_b2_9_FF_2", "xmod_vco2_pitch_vco1_lvl_vco2_lvl_03_0"),
("vco_2_pitch_eg_int", "vco_2_pitch_eg_int_b2_9_FF_2", "xmod_vco2_pitch_vco1_lvl_vco2_lvl_0C_E"),
("vco_1_level", "vco_1_level_b2_9_FF_2", "xmod_vco2_pitch_vco1_lvl_vco2_lvl_30_C"),
("vco_2_level", "vco_2_level_b2_9_FF_2", "xmod_vco2_pitch_vco1_lvl_vco2_lvl_C0_A"),
("noise_level", "noise_level_b2_9_FF_2", "sync_ring_noise_cutoff_res_0C_E"),
("cutoff", "cutoff_b2_9_FF_2", "sync_ring_noise_cutoff_res_30_C"),
("resonance", "resonance_b2_9_FF_2", "sync_ring_noise_cutoff_res_C0_A"),
("sync", "sync_ring_noise_cutoff_res_01_0"),
("ring", "sync_ring_noise_cutoff_res_02_F"),
("cutoff_eg_int", "cutoff_eg_int_b2_9_FF_2", "cutoff_params_03_0"),
("cutoff_velocity", "cutoff_params_0C_E"),
("cutoff_kbd_track", "cutoff_params_30_C"),
("cutoff_type", "cutoff_params_40_A"),
("amp_eg_attack", "amp_eg_attack_b2_9_FF_2", "amp_adsr_03_0"),
("amp_eg_decay", "amp_eg_decay_b2_9_FF_2", "amp_adsr_0C_E"),
("amp_eg_sustain", "amp_eg_sustain_b2_9_FF_2", "amp_adsr_30_C"),
("amp_eg_release", "amp_eg_release_b2_9_FF_2", "amp_adsr_C0_A"),
("eg_attack", "eg_attack_b2_9_FF_2", "eg_adsr_03_0"),
("eg_decay", "eg_decay_b2_9_FF_2", "eg_adsr_0C_E"),
("eg_sustain", "eg_sustain_b2_9_FF_2", "eg_adsr_30_C"),
("eg_release", "eg_release_b2_9_FF_2", "eg_adsr_C0_A"),
("lfo_rate", "lfo_rate_b2_9_FF_2", "lfo_rate_int_tgt_eg_03_0"),
("lfo_int", "lfo_int_b2_9_FF_2", "lfo_rate_int_tgt_eg_0C_E"),
("lfo_target", "lfo_rate_int_tgt_eg_30_C"),
("lfo_eg", "lfo_rate_int_tgt_eg_C0_A"),
("lfo_wave", "lfo_wave_dly_03_0"),
("delay_output_routing", "lfo_wave_dly_C0_A"),
("delay_hi_pass_cutoff", "delay_hi_pass_cutoff_b2_9_FF_2", "delay_b0_1_0C_E"),
("delay_time", "delay_time_b2_9_FF_2", "delay_b0_1_30_C"),
("delay_feedback", "delay_feedback_b2_9_FF_2", "delay_b0_1_C0_A"),
("voice_mode", "voice_mode_and_depth_b0_1_07_0"),
("voice_mode_depth", "voice_mode_depth_b2_9_FF_2", "voice_mode_and_depth_b0_1_30_C"),
("bend_range_plus", "bend_range_plusminus_0F_0"),
("bend_range_minus", "bend_range_plusminus_F0_C"),
("lfo_key_sync", "lfo_portamento_params_01_0"),
("lfo_bpm_sync", "lfo_portamento_params_02_F"),
("lfo_voice_sync", "lfo_portamento_params_04_E"),
("portamento_bpm", "lfo_portamento_params_08_D"),
("portamento_mode", "lfo_portamento_params_10_C"),
)
minilogue_og_postnormalisation_deletions = (
"vco_1_pitch_b2_9",
"vco_1_shape_b2_9",
"vco_1_pitch_shape_octave_wave",
"vco_2_pitch_b2_9",
"vco_2_shape_b2_9",
"vco_2_pitch_shape_octave_wave",
"cross_mod_depth_b2_9",
"vco_2_pitch_eg_int_b2_9",
"vco_1_level_b2_9",
"vco_2_level_b2_9",
"noise_level_b2_9",
"cutoff_b2_9",
"resonance_b2_9",
"cutoff_eg_int_b2_9",
"amp_eg_attack_b2_9",
"amp_eg_decay_b2_9",
"amp_eg_sustain_b2_9",
"amp_eg_release_b2_9",
"eg_attack_b2_9",
"eg_decay_b2_9",
"eg_sustain_b2_9",
"eg_release_b2_9",
"lfo_rate_b2_9",
"lfo_int_b2_9",
"delay_hi_pass_cutoff_b2_9",
"delay_time_b2_9",
"delay_feedback_b2_9",
"delay_b0_1",
"voice_mode_depth_b2_9",
"voice_mode_and_depth_b0_1",
"bend_range_plusminus",
"xmod_vco2_pitch_vco1_lvl_vco2_lvl",
"sync_ring_noise_cutoff_res",
"cutoff_params",
"amp_adsr",
"eg_adsr",
"lfo_rate_int_tgt_eg",
"lfo_wave_dly",
"lfo_portamento_params",
)
def normalise_og_patch(patch):
"""Expand all encoded fields into a normalised form of patch object. This makes it
printable and easier to translate. Uses the minilogue_og_patch_normalisation
translation table:
("vco_1_pitch", "vco_1_pitch_b2_9_FF_2", "vco_1_pitch_shape_octave_wave_03_0"),
Args:
patch (Patch instance): raw minilogue og patch, read using
minilogue_og_patch_struct
Returns:
Patch instance: Decoded/expanded patch
"""
norm_patch = copy.deepcopy(patch)
for t in minilogue_og_patch_normalisation:
# t has form ('dest_name', 'src1_name_XX_x', 'src2_name_XX_x', ...)
dest_name, *srcs = t
dest_val = 0
for s in srcs:
src_name, mask, shift = decode_src_string(s)
source_val = getattr(patch, src_name) & mask
dest_val += common.signed_shift(source_val, shift)
setattr(norm_patch, dest_name, dest_val)
# Delete all encoded fields that won't be used anymore
for t in minilogue_og_postnormalisation_deletions:
delattr(norm_patch, t)
return norm_patch
def decode_src_string(src_string):
"""Decodes minilogue_og_patch_normalisation tuple src strings.
The tuples take the form (Note here N >= 1)
('dest_name', 'src1_name_XX_x', 'src2_name_XX_x', ..., 'srcN_name_XX_x')
'src1_name_XX_x' contains 'src1_name', the name of a source field, a hex bit mask XX
and a 2's-complement single-hex-digit x encoding the number of bits to shift up
(+ve values) or down (-ve) the masked values before adding them to the destination.
Args:
src_string (str): A string of the form described above.
Returns:
str: src_name substring
int: 8-bit bitmask defining a masked region of the src_name field
int: number of bits by which to shift the masked bit region
"""
src_parts = src_string.split("_")
src_name = "_".join(src_parts[:-2])
mask = int(src_parts[-2], base=16)
shift = int(src_parts[-1], base=16)
if shift > 7:
shift = -(~shift & 7) - 1 # decode negative part of signed 2's complement
return src_name, mask, shift
favorite_template = """\
<?xml version="1.0" encoding="UTF-8"?>
<minilogue_Favorite>
<Bank>
<Data>0</Data>
<Data>35</Data>
<Data>67</Data>
<Data>95</Data>
<Data>108</Data>
<Data>136</Data>
<Data>151</Data>
<Data>176</Data>
</Bank>
</minilogue_Favorite>
"""
"""
Korg's program format tables for the minilogue original (OG).
Any personal notes are designated Gn.
Minilogue OG
+-------+-------+---------+---------------------------------------------+
| Offset| Bit | Range | Description |
+-------+-------+---------+---------------------------------------------+
| 0~3 | | ASCII | 'PROG' |
+-------+-------+---------+---------------------------------------------+
| 4~15 | | ASCII | PROGRAM NAME [12] |
+-------+-------+---------+---------------------------------------------+
| 16~19 | | | Reserved |
+-------+-------+---------+---------------------------------------------+
| 20 | 0~7 | | VCO 1 PITCH (bit2~9) *note P1,P2 |
+-------+-------+---------+---------------------------------------------+
| 21 | 0~7 | | VCO 1 SHAPE (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 22 | 0~7 | | VCO 2 PITCH (bit2~9) *note P1,P2 |
+-------+-------+---------+---------------------------------------------+
| 23 | 0~7 | | VCO 2 SHAPE (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 24 | 0~7 | | CROSS MOD DEPTH (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 25 | 0~7 | | VCO 2 PITCH EG INT (bit2~9) *note P1,P3 |
+-------+-------+---------+---------------------------------------------+
| 26 | 0~7 | | VCO 1 LEVEL (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 27 | 0~7 | | VCO 2 LEVEL (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 28 | 0~7 | | NOISE LEVEL (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 29 | 0~7 | | CUTOFF (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 30 | 0~7 | | RESONANCE (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 31 | 0~7 | | CUTOFF EG INT (bit2~9) *note P1,P4 |
+-------+-------+---------+---------------------------------------------+
| 32 | | | Reserved |
+-------+-------+---------+---------------------------------------------+
| 33 | 0~7 | 0~127 | Amp Velocity |
+-------+-------+---------+---------------------------------------------+
| 34 | 0~7 | | AMP EG ATTACK (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 35 | 0~7 | | AMP EG DECAY (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 36 | 0~7 | | AMP EG SUSTAIN (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 37 | 0~7 | | AMP EG RELEASE (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 38 | 0~7 | | EG ATTACK (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 39 | 0~7 | | EG DECAY (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 40 | 0~7 | | EG SUSTAIN (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 41 | 0~7 | | EG RELEASE (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 42 | 0~7 | | LFO RATE (bit2~9) *note P1,P5 |
+-------+-------+---------+---------------------------------------------+
| 43 | 0~7 | | LFO INT (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| 44~48 | | | Reserved |
+-------+-------+---------+---------------------------------------------+
| 49 | 0~7 | | DELAY HI PASS CUTOFF (bit2~9) *note P1 |
+-------+-------+---------+---------------------------------------------+
| | |
experiment.configuration)
def check_cmd_args_config(self, tmp_path, conf_file, monkeypatch):
"""Check that cmdargs configuration overrides global/envvars/local configuration"""
command = f"hunt --worker-max-trials 0 -c {conf_file} --branch-from test-name --enable-evc"
command += " " + " ".join(
"--{} {}".format(name, value) for name, value in self.cmdargs.items()
)
orion.core.cli.main(command.split(" "))
storage = get_storage()
experiment = get_experiment("exp-name")
assert experiment.name == "exp-name"
assert experiment.node.parent.name == "test-name"
assert experiment.version == 1
assert experiment.metadata["user"] == self.cmdargs["user"]
assert experiment.max_trials == self.cmdargs["exp-max-trials"]
assert experiment.max_broken == self.cmdargs["exp-max-broken"]
assert experiment.working_dir == self.cmdargs["working-dir"]
class TestWorkerConfig(ConfigurationTestSuite):
"""Test suite for worker configuration"""
config = {
"worker": {
"n_workers": 2,
"pool_size": 2,
"executor": "dask",
"executor_configuration": {"threads_per_worker": 1},
"heartbeat": 30,
"max_trials": 10,
"max_broken": 5,
"reservation_timeout": 16,
"max_idle_time": 15,
"interrupt_signal_code": 131,
"user_script_config": "cfg",
}
}
env_vars = {
"ORION_N_WORKERS": 3,
"ORION_POOL_SIZE": 1,
"ORION_EXECUTOR": "joblib",
"ORION_HEARTBEAT": 40,
"ORION_WORKER_MAX_TRIALS": 20,
"ORION_WORKER_MAX_BROKEN": 6,
"ORION_RESERVATION_TIMEOUT": 17,
"ORION_MAX_IDLE_TIME": 16,
"ORION_INTERRUPT_CODE": 132,
"ORION_USER_SCRIPT_CONFIG": "envcfg",
}
local = {
"worker": {
"n_workers": 4,
"pool_size": 5,
"executor": "dask",
"executor_configuration": {"threads_per_worker": 2},
"heartbeat": 50,
"max_trials": 30,
"max_broken": 7,
"reservation_timeout": 17,
"max_idle_time": 16,
"interrupt_signal_code": 133,
"user_script_config": "lclcfg",
}
}
cmdargs = {
"n-workers": 1,
"pool-size": 6,
"executor": "dask",
"heartbeat": 70,
"worker-max-trials": 1,
"worker-max-broken": 8,
"reservation-timeout": 18,
"max-idle-time": 17,
"interrupt-signal-code": 134,
"user-script-config": "cmdcfg",
}
def sanity_check(self):
"""Check that defaults are different than testing configuration"""
assert orion.core.config.to_dict()["worker"] != self.config["worker"]
def _mock(self, monkeypatch):
self._mock_exp_client(monkeypatch)
self._mock_consumer(monkeypatch)
self._mock_producer(monkeypatch)
self._mock_workon(monkeypatch)
def _mock_exp_client(self, monkeypatch):
self.exp_client = None
old_init = orion.client.experiment.ExperimentClient.__init__
def init(c_self, *args, **kwargs):
old_init(c_self, *args, **kwargs)
self.exp_client = c_self
monkeypatch.setattr(orion.client.experiment.ExperimentClient, "__init__", init)
def _mock_consumer(self, monkeypatch):
self.consumer = None
old_init = orion.core.cli.hunt.Consumer.__init__
def init(c_self, *args, **kwargs):
old_init(c_self, *args, **kwargs)
self.consumer = c_self
monkeypatch.setattr(orion.core.cli.hunt.Consumer, "__init__", init)
def _mock_producer(self, monkeypatch):
self.producer = None
old_init = orion.core.cli.hunt.Producer.__init__
def init(p_self, *args, **kwargs):
old_init(p_self, *args, **kwargs)
self.producer = p_self
monkeypatch.setattr(orion.core.cli.hunt.Producer, "__init__", init)
def _mock_workon(self, monkeypatch):
workon = orion.core.cli.hunt.workon
self.workon_kwargs = None
def mocked_workon(experiment, **kwargs):
self.workon_kwargs = kwargs
return workon(experiment, **kwargs)
monkeypatch.setattr("orion.core.cli.hunt.workon", mocked_workon)
def _check_mocks(self, config):
self._check_exp_client(config)
self._check_consumer(config)
self._check_workon(config)
def _check_exp_client(self, config):
assert self.exp_client.heartbeat == config["heartbeat"]
def _check_consumer(self, config):
assert (
self.consumer.template_builder.config_prefix == config["user_script_config"]
)
assert self.consumer.interrupt_signal_code == config["interrupt_signal_code"]
def _check_workon(self, config):
assert self.workon_kwargs["n_workers"] == config["n_workers"]
assert self.workon_kwargs["executor"] == config["executor"]
assert (
self.workon_kwargs["executor_configuration"]
== config["executor_configuration"]
)
assert self.workon_kwargs["pool_size"] == config["pool_size"]
assert (
self.workon_kwargs["reservation_timeout"] == config["reservation_timeout"]
)
assert self.workon_kwargs["max_trials"] == config["max_trials"]
assert self.workon_kwargs["max_broken"] == config["max_broken"]
def check_global_config(self, tmp_path, monkeypatch):
"""Check that global configuration is set properly"""
assert orion.core.config.to_dict()["worker"] == self.config["worker"]
self._mock(monkeypatch)
command = f"hunt --exp-max-trials 0 -n test python {script} -x~uniform(0,1)"
orion.core.cli.main(command.split(" "))
self._check_mocks(self.config["worker"])
def check_env_var_config(self, tmp_path, monkeypatch):
"""Check that env vars overrides global configuration"""
env_var_config = {
"n_workers": self.env_vars["ORION_N_WORKERS"],
"pool_size": self.env_vars["ORION_POOL_SIZE"],
"executor": self.env_vars["ORION_EXECUTOR"],
"executor_configuration": self.config["worker"]["executor_configuration"],
"heartbeat": self.env_vars["ORION_HEARTBEAT"],
"max_trials": self.env_vars["ORION_WORKER_MAX_TRIALS"],
"max_broken": self.env_vars["ORION_WORKER_MAX_BROKEN"],
"reservation_timeout": self.env_vars["ORION_RESERVATION_TIMEOUT"],
"max_idle_time": self.env_vars["ORION_MAX_IDLE_TIME"],
"interrupt_signal_code": self.env_vars["ORION_INTERRUPT_CODE"],
"user_script_config": self.env_vars["ORION_USER_SCRIPT_CONFIG"],
}
assert orion.core.config.to_dict()["worker"] == env_var_config
# Override executor configuration otherwise joblib will fail.
orion.core.config.worker.executor_configuration = {}
env_var_config["executor_configuration"] = {}
self._mock(monkeypatch)
command = f"hunt --exp-max-trials 0 -n test python {script} -x~uniform(0,1)"
orion.core.cli.main(command.split(" "))
self._check_mocks(env_var_config)
def check_db_config(self):
"""No Storage config in DB, no test"""
pass
def check_local_config(self, tmp_path, conf_file, monkeypatch):
"""Check that local configuration overrides global/envvars configuration"""
self._mock(monkeypatch)
# Override executor so that executor and configuration are coherent in global config
os.environ["ORION_EXECUTOR"] = "dask"
command = f"hunt --exp-max-trials 0 -n test -c {conf_file} python {script} -x~uniform(0,1)"
orion.core.cli.main(command.split(" "))
self._check_mocks(self.local["worker"])
def check_cmd_args_config(self, tmp_path, conf_file, monkeypatch):
"""Check that cmdargs configuration overrides global/envvars/local configuration"""
config = {
"n_workers": self.cmdargs["n-workers"],
"executor": self.cmdargs["executor"],
"executor_configuration": {"threads_per_worker": 2},
"pool_size": self.cmdargs["pool-size"],
"reservation_timeout": self.cmdargs["reservation-timeout"],
"heartbeat": self.cmdargs["heartbeat"],
"max_trials": self.cmdargs["worker-max-trials"],
"max_broken": self.cmdargs["worker-max-broken"],
"interrupt_signal_code": self.cmdargs["interrupt-signal-code"],
"user_script_config": self.cmdargs["user-script-config"],
}
self._mock(monkeypatch)
# Override executor so that executor and configuration are coherent in global config
os.environ["ORION_EXECUTOR"] = "dask"
command = f"hunt --worker-max-trials 0 -c {conf_file} -n cmd-test"
command += " " + " ".join(
"--{} {}".format(name, value) for name, value in self.cmdargs.items()
)
command += f" python {script} -x~uniform(0,1)"
orion.core.cli.main(command.split(" "))
self._check_mocks(config)
class TestEVCConfig(ConfigurationTestSuite):
"""Test for EVC configuration"""
config = {
"evc": {
"enable": False,
"auto_resolution": False,
"manual_resolution": True,
"non_monitored_arguments": ["test", "one"],
"ignore_code_changes": True,
"algorithm_change": True,
"code_change_type": "noeffect",
"cli_change_type": "noeffect",
"config_change_type": "noeffect",
"orion_version_change": True,
}
}
env_vars = {
"ORION_EVC_ENABLE": "true",
"ORION_EVC_MANUAL_RESOLUTION": "",
"ORION_EVC_NON_MONITORED_ARGUMENTS": "test:two:others",
"ORION_EVC_IGNORE_CODE_CHANGES": "",
"ORION_EVC_ALGO_CHANGE": "",
"ORION_EVC_CODE_CHANGE": "unsure",
"ORION_EVC_CMDLINE_CHANGE": "unsure",
"ORION_EVC_CONFIG_CHANGE": "unsure",
"ORION_EVC_ORION_VERSION_CHANGE": "",
}
local = {
"evc": {
"enable": False,
"manual_resolution": True,
"non_monitored_arguments": ["test", "local"],
"ignore_code_changes": False,
"algorithm_change": True,
"code_change_type": "break",
"cli_change_type": "break",
"config_change_type": "noeffect",
"orion_version_change": True,
}
}
cmdargs = {
"enable-evc": True,
"manual-resolution": False,
"non-monitored-arguments": "test:cmdargs",
"ignore-code-changes": True,
"algorithm-change": False,
"code-change-type": "noeffect",
"cli-change-type": "unsure",
"config-change-type": "break",
"orion-version-change": False,
}
def sanity_check(self):
"""Check that defaults are different than testing configuration"""
assert orion.core.config.to_dict()["evc"] != self.config["evc"]
def _mock_consumer(self, monkeypatch):
self.consumer = None
old_init = orion.core.cli.hunt.Consumer.__init__
def init(c_self, *args, **kwargs):
old_init(c_self, *args, **kwargs)
self.consumer = c_self
monkeypatch.setattr(orion.core.cli.hunt.Consumer, "__init__", init)
def _check_consumer(self, config):
assert self.consumer.ignore_code_changes == config["ignore_code_changes"]
def check_global_config(self, tmp_path, monkeypatch):
"""Check that global configuration is set properly"""
assert orion.core.config.to_dict()["evc"] == self.config["evc"]
name = "global-test"
command = f"hunt --enable-evc --worker-max-trials 0 -n {name} python {script} -x~uniform(0,1)"
assert orion.core.cli.main(command.split(" ")) == 0
# Test that manual_resolution is True and it branches when changing cli (thus crash)
assert orion.core.cli.main(f"{command} --cli-change ".split(" ")) == 1
command = "hunt --auto-resolution " + command[5:]
self._check_enable(name, command.replace(" --enable-evc", ""), enabled=False)
self._check_cli_change(name, command, change_type="noeffect")
self._check_non_monitored_arguments(
name, command, non_monitored_arguments=["test", "one"]
)
self._check_script_config_change(
tmp_path, name, command, change_type="noeffect"
)
# EVC not enabled, code change should be ignored even if option is set to True
assert self.config["evc"]["enable"] is False
with monkeypatch.context() as m:
self._check_code_change(
m,
name,
command.replace("--enable-evc ", ""),
mock_ignore_code_changes=True,
ignore_code_changes=True,
change_type=self.config["evc"]["code_change_type"],
enable_evc=False,
)
# EVC is enabled, option should be honored
with monkeypatch.context() as m:
self._check_code_change(
m,
name,
command,
mock_ignore_code_changes=None,
ignore_code_changes=self.config["evc"]["ignore_code_changes"],
change_type=self.config["evc"]["code_change_type"],
enable_evc=True,
)
def check_env_var_config(self, tmp_path, monkeypatch):
"""Check that env vars overrides global configuration"""
assert orion.core.config.evc.enable
assert not orion.core.config.evc.manual_resolution
assert not orion.core.config.evc.ignore_code_changes
assert not orion.core.config.evc.algorithm_change
assert orion.core.config.evc.non_monitored_arguments == [
"test",
"two",
"others",
]
assert orion.core.config.evc.code_change_type == "unsure"
assert orion.core.config.evc.cli_change_type == "unsure"
assert orion.core.config.evc.config_change_type == "unsure"
assert not orion.core.config.evc.orion_version_change
name = "env-var-test"
command = (
f"hunt --worker-max-trials 0 -n {name} python {script} -x~uniform(0,1)"
)
assert orion.core.cli.main(command.split(" ")) == 0
self._check_enable(name, command, enabled=True)
self._check_cli_change(name, command, change_type="unsure")
self._check_non_monitored_arguments(
name, command, non_monitored_arguments=["test", "two", "others"]
)
self._check_script_config_change(tmp_path, name, command, change_type="unsure")
# Enable EVC, ignore_code_changes is False
with monkeypatch.context() as m:
self._check_code_change(
m,
name,
command,
mock_ignore_code_changes=None,
ignore_code_changes=bool(
self.env_vars["ORION_EVC_IGNORE_CODE_CHANGES"]
),
change_type=self.env_vars["ORION_EVC_CODE_CHANGE"],
enable_evc=True,
)
# Disable EVC, ignore_code_changes is True for Consumer
os.environ["ORION_EVC_ENABLE"] = ""
with monkeypatch.context() as m:
self._check_code_change(
m,
name,
command,
mock_ignore_code_changes=None,
ignore_code_changes=bool(
self.env_vars["ORION_EVC_IGNORE_CODE_CHANGES"]
),
change_type=self.env_vars["ORION_EVC_CODE_CHANGE"],
enable_evc=False,
)
def check_db_config(self):
"""No Storage config in DB, no test"""
pass
def check_local_config(self, tmp_path, conf_file, monkeypatch):
"""Check that local configuration overrides global/envvars configuration"""
name = "local-test"
command = (
f"hunt --enable-evc --worker-max-trials 0 -n {name} -c {conf_file} "
f"python {script} -x~uniform(0,1)"
)
assert orion.core.cli.main(command.split(" ")) == 0
# Test that manual_resolution is True and it branches when changing cli
assert orion.core.cli.main(f"{command} --cli-change ".split(" ")) == 1
command = "hunt --auto-resolution " + command[5:]
self._check_enable(name, command.replace(" --enable-evc", ""), enabled=False)
self._check_cli_change(
name, command, change_type=self.local["evc"]["cli_change_type"]
)
self._check_non_monitored_arguments(
name,
command,
non_monitored_arguments=self.local["evc"]["non_monitored_arguments"],
)
self._check_script_config_change(
tmp_path,
name,
command,
change_type=self.local["evc"]["config_change_type"],
)
# enabled evc, ignore code changes so to True
with monkeypatch.context() as m:
self._check_code_change(
m,
name,
command,
mock_ignore_code_changes=False,
ignore_code_changes=self.local["evc"]["ignore_code_changes"],
change_type=self.local["evc"]["code_change_type"],
enable_evc=True,
)
# disabled evc, ignore code changes so to True
with monkeypatch.context() as m:
self._check_code_change(
m,
name,
command.replace("--enable-evc ", ""),
mock_ignore_code_changes=False,
ignore_code_changes=self.local["evc"]["ignore_code_changes"],
change_type=self.local["evc"]["code_change_type"],
enable_evc=False,
)
def check_cmd_args_config(self, tmp_path, conf_file, monkeypatch):
"""Check that cmdargs configuration overrides global/envvars/local configuration"""
name = "cmd-test"
command = (
f"hunt --worker-max-trials 0 -c {conf_file} -n {name} "
"--enable-evc "
"--auto-resolution "
f"python {script} -x~uniform(0,1)"
)
assert orion.core.cli.main(command.split(" ")) == 0
self._check_enable(name, command, enabled=True)
self._check_cli_change(
name,
command="hunt --cli-change-type unsure " + command[5:],
change_type=self.cmdargs["cli-change-type"],
)
self._check_non_monitored_arguments(
name,
command="hunt --non-monitored-arguments test:cmdargs " + command[5:],
non_monitored_arguments=self.cmdargs["non-monitored-arguments"].split(":"),
)
self._check_script_config_change(
tmp_path,
name,
command="hunt --config-change-type break " + command[5:],
change_type=self.cmdargs["config-change-type"],
)
# Mock local to ignore_code_changes=False
fetch_config = orion.core.io.resolve_config.fetch_config
def mock_local(cmdargs):
config = fetch_config(cmdargs)
config["evc"]["ignore_code_changes"] = False
return config
monkeypatch.setattr(orion.core.io.resolve_config, "fetch_config", mock_local)
# Check that ignore_code_changes is rightly False
with monkeypatch.context() as m:
self._check_code_change(
m,
name,
command="hunt --code-change-type noeffect " + command[5:],
mock_ignore_code_changes=False,
ignore_code_changes=False,
change_type=self.cmdargs["code-change-type"],
enable_evc=True,
)
# Check that ignore_code_changes is now True because --ignore-code-changes was added
with | |
<reponame>sorrento/sdopt-tearing
# Copyright (C) 2014, 2015 University of Vienna
# All rights reserved.
# BSD license.
# Author: <NAME> <<EMAIL>>
from __future__ import print_function
from itertools import chain, groupby
from six import itervalues
from networkx import Graph, NetworkXUnfeasible, relabel_nodes
from networkx.algorithms.bipartite import is_bipartite_node_set
from matching import maxmatch_len
from pqueue import PriorityQueue as minheap
from py3compat import irange, cPickle_dumps, cPickle_loads, cPickle_HIGHEST_PROTOCOL
from utils import izip, pairwise
#-------------------------------------------------------------------------------
# The input is assumed to be correct and is not checked!
# The functions in this section are subject to removal. The coordinate format
# seems superior to the row-wise stored sparse matrices.
def to_bipartite(rows, cols_rowwise):
'Returns: (g, eqs). Assumes disjoint row and column identifier sets.'
edge_list = ((r, c) for r, cols in izip(rows, cols_rowwise) for c in cols)
g = Graph(edge_list)
g.add_nodes_from(rows) # Empty rows are allowed (but not empty columns)
eqs = set(rows)
assert is_bipartite_node_set(g, eqs)
return g, eqs
def to_bipartite_w_forbidden(rows, cols_rowwise, vals_rowwise):
'''Returns: (g, eqs, forbidden). Assumes disjoint row and column
identifier sets.'''
g, forbidden = Graph(), set()
g.add_nodes_from(rows) # Empty rows are allowed (but not empty columns)
for r, cols, vals in izip(rows, cols_rowwise, vals_rowwise):
for c, v in izip(cols, vals):
g.add_edge(r, c)
if v > 1:
forbidden.add((r, c))
assert is_bipartite_node_set(g, rows)
return g, set(rows), forbidden
def to_bipart_w_weights(cols_rowwise, vals_rowwise):
'''Returns the tuple of: g, eqs, mapping (a list) to undo the row
permutation by weight, and the row weights in the same order as in the
input. This function does not receive the row identifiers but makes up
new ones: 0, 1, ..., n_rows-1.'''
n_rows = len(cols_rowwise)
rows = list(irange(n_rows))
row_weights = [sum(vals, 0.0) for vals in vals_rowwise]
row_pos = argsort(row_weights)
#print('Row weights: ', row_weights)
#print('Row position:', row_pos)
g = Graph()
g.add_nodes_from(rows) # Empty rows are allowed (but not empty columns)
# Apply row permutation row_pos
edges = ((i,c) for i, r in enumerate(row_pos) for c in cols_rowwise[r])
g.add_edges_from(edges)
assert is_bipartite_node_set(g, rows) # Same ID for both a col and a row?
return g, set(rows), row_pos, row_weights
#-------------------------------------------------------------------------------
def argsort(seq, reverse=False):
return sorted(range(len(seq)), key=seq.__getitem__, reverse=reverse)
def get_row_weights(g, n_rows):
return [sum(d['weight'] for d in itervalues(g[r])) for r in irange(n_rows)]
def partial_relabel(g, mapping):
return relabel_nodes(g, mapping, copy=True)
#-------------------------------------------------------------------------------
def bipartite_from_empty_matrix(shape):
n_rows, n_cols = shape
assert n_rows >= 0 and n_cols >= 0
r_nodes = list(irange(n_rows))
cols = [ ]
g = Graph()
g.add_nodes_from(r_nodes)
g.add_nodes_from(irange(n_rows, n_rows+n_cols))
assert len(g) == n_rows + n_cols
return g, set(r_nodes), cols
def coo_matrix_to_bipartite(rows, cols, values, shape):
check_coordinate_format(rows, cols, values, shape)
# See _check_coordinate_format in rpc_api too
n_rows, n_cols = shape
r_nodes = list(irange(n_rows))
# relabel the columns, the caller must undo it later
cols = [c+n_rows for c in cols]
g = Graph()
g.add_nodes_from(r_nodes)
g.add_nodes_from(irange(n_rows, n_rows+n_cols))
assert len(g) == n_rows + n_cols
g.add_edges_from(izip(rows, cols, ({'weight': int(v)} for v in values)))
return g, set(r_nodes), cols
def check_coordinate_format(rows, cols, values, shape):
n_rows, n_cols = shape
assert n_rows >= 1 and n_cols >= 1, 'At least one row and one column are expected'
assert rows and cols and values, 'There should be at least one entry'
assert len(rows) == len(cols) and len(rows) == len(values), 'Array length mismatch'
check_if_indices_are_in_range(rows, shape[0], 'row')
check_if_indices_are_in_range(cols, shape[1], 'column')
def check_if_indices_are_in_range(idx, n_elems, elem_type, empty_allowed=False):
if empty_allowed and not idx:
return
min_idx = min(idx)
assert min_idx >= 0, 'The smallest {} index is {}'.format(elem_type, min_idx)
max_idx = max(idx)
assert max_idx < n_elems, 'The largest {} index is {}'.format(elem_type, max_idx)
def get_inverse_perm(rperm, cperm):
rowp = [-1]*len(rperm)
colp = [-1]*len(cperm)
for i, r in enumerate(rperm):
rowp[r] = i
for i, c in enumerate(cperm):
colp[c] = i
assert rowp.count(-1) == 0
assert colp.count(-1) == 0
return rowp, colp
#-------------------------------------------------------------------------------
# Helper functions for the bipartite case, without any block structure.
# The functions in this section are subject to removal. They do not produce
# proper spiked forms (the blocks are not properly nested).
def get_row_col_perm(eqs, dag, tears, sinks, order):
'''Returns the row and the column identifiers in permuted order for the
spiked form, given by the bipartite dag.'''
gen_cols = (n for n in order if n not in eqs)
indexof = { name : i for i, name in enumerate(gen_cols) }
tear_stack = list(tears)
tear_stack.sort(key=lambda v: indexof[v], reverse=True)
resids = set(sinks)
rows = [ n for n in order if n in eqs ]
cols = [ ]
for n in rows: # we walk along the envelope
if n not in resids:
# eq must have exactly one out edge in a valid elimination order
(var,) = dag[n]
else:
var = tear_stack.pop() # Throws if structurally singular
cols.append(var)
assert not tear_stack
assert len(rows) == len(eqs)
assert len(cols) == len(dag) - len(eqs)
return (rows, cols)
def colp_to_spiked_form(rowp, colp_hess, matches, tear_set, sink_set):
'''Returns the column permutation vector to spiked form, given by the
bipartite matching and row permutation vector rowp.'''
assert len(rowp) == len(colp_hess)
tear_stack = list(n for n in reversed(colp_hess) if n in tear_set)
colp = [ ]
for n in rowp:
if n not in sink_set:
var = matches[n]
assert var not in tear_set
else:
var = tear_stack.pop() # Throws if structurally singular
colp.append(var)
assert not tear_stack
assert len(colp) == len(colp_hess)
return colp
def check_spiked_form(g_orig, rowp, colp, tear_set):
# A rather weak test: does not check for proper nesting of the blocks.
assert len(rowp) == len(colp)
r_index = { name : i for i, name in enumerate(n for n in rowp) }
first_elem = [ min(r_index[r] for r in g_orig[c]) for c in colp ]
for i, c in enumerate(colp):
first_nonzero = first_elem[i]
if c in tear_set:
assert first_nonzero <= i # on or above the diagonal
else:
assert first_nonzero == i # on the diagonal
#-------------------------------------------------------------------------------
# The underscore-prefixed version of get_hessenberg_order and build_colp
# puts the tears first.
def get_hessenberg_order(g, eqs, rowp):
colp = build_colp(g, rowp)
# append all isolated columns at the back
isolated_cols = sorted(n for n in g if n not in eqs and len(g[n])==0)
colp.extend(isolated_cols)
assert len(rowp) == len(eqs)
assert len(colp) == len(g) - len(eqs)
check_nonincreasing_envelope(g, rowp, colp)
return colp
def build_colp(g, rowp):
colp, seen_cols = [ ], set()
adj = g.adj
for r in rowp:
cols = set(adj[r]) - seen_cols
if cols:
to_append = sorted(cols)
colp.extend(to_append)
seen_cols.update(to_append)
return colp
def check_nonincreasing_envelope(g, rowp, colp):
c_index = { name : i for i, name in enumerate(n for n in colp) }
r_index = { name : i for i, name in enumerate(n for n in rowp) }
# Last occupied columns rowwise, empty rows allowed
adj = g.adj
last_elem = [max(c_index[c] for c in adj[r]) if adj[r] else -1 for r in rowp]
c_viol = _non_monotone_indices(last_elem)
#if c_viol:
# from plot_ordering import plot_bipartite
# plot_bipartite(g, set(), rowp, colp)
assert not c_viol, 'Non-monotone last elements in rows: {}'.format(c_viol)
# First occupied rows columnwise
n_rows = len(rowp)
first_elem = [min(r_index[r] for r in adj[c]) if adj[c] else n_rows for c in colp]
r_viol = _non_monotone_indices(first_elem)
assert not r_viol, 'Non-monotone first elements in cols: {}'.format(r_viol)
return last_elem, first_elem # <-- hessenberg_to_spike needs this
def _non_monotone_indices(lst):
return [i for i, (u, v) in enumerate(pairwise(lst)) if u > v]
#-------------------------------------------------------------------------------
# Similar to heap_md.min_degree but orders primarily according to the g_torn
def permute_to_hessenberg(g_orig, eqs, forbidden, tears):
g_allowed, g, g_torn = _setup_graphs(g_orig, eqs, forbidden, tears)
#from plot_ordering import plot_bipartite_no_red_greedy_order as plot_bipartite
#plot_bipartite(g_torn, eqs, set(edge for edge in g_torn.edges_iter(eqs)
# if edge in forbidden) )
tear_set = set(tears)
heap = _create_heap(g_allowed, g, g_torn, eqs)
rowp, matches = [ ], { }
while heap:
(torn_cost, cost, tot, eq), _ = heap.popitem()
#print('Eq:', eq)
rowp.append(eq)
candidates = set(g_allowed[eq])-tear_set
if candidates:
var = sorted(candidates)[0] # or [-1] for last
assert eq not in matches
assert var not in matches
matches[eq] = var
matches[var] = eq
#print('Var:', var)
vrs = sorted(g[eq])
eqs_update = set(chain.from_iterable(g[v] for v in vrs))
eqs_update.discard(eq)
g_allowed.remove_node(eq)
g.remove_node(eq)
g_torn.remove_node(eq)
g_allowed.remove_nodes_from(vrs)
g.remove_nodes_from(vrs)
g_torn.remove_nodes_from(vrs)
for e in sorted(eqs_update): # keep in sync with create_heap
nbrs = g_torn[e]
torn_tot = len(nbrs)
has_allowed_edge = any(g_allowed.has_edge(e, v) for v in nbrs)
torn_cost = torn_tot-1 if has_allowed_edge else torn_tot
tot = len(g[e])
cost | |
"""
Create an interactive map for selecting satellite imagery and exporting image files.
"""
# Load modules
import datacube
import itertools
import numpy as np
import matplotlib.pyplot as plt
from odc.ui import select_on_a_map
from datacube.utils.geometry import CRS
from datacube.utils import masking
from skimage import exposure
from ipyleaflet import (WMSLayer, basemaps, basemap_to_tiles)
from traitlets import Unicode
from deafrica_tools.spatial import reverse_geocode
from deafrica_tools.dask import create_local_dask_cluster
def select_region_app(date,
satellites,
size_limit=10000):
"""
An interactive app that allows the user to select a region from a
map using imagery from Sentinel-2 and Landsat. The output of this
function is used as the input to :func:`export_image_app` to export high-
resolution satellite images.
Last modified: September 2021
Parameters
----------
date : str
The exact date used to plot imagery on the interactive map
(e.g. ``date='1988-01-01'``).
satellites : str
The satellite data to plot on the interactive map. The
following options are supported:
``'Landsat-9'``: data from the Landsat 9 satellite
``'Landsat-8'``: data from the Landsat 8 satellite
``'Landsat-7'``: data from the Landsat 7 satellite
``'Landsat-5'``: data from the Landsat 5 satellite
``'Sentinel-2'``: data from Sentinel-2A and Sentinel-2B
``'Sentinel-2 geomedian'``: data from the Sentinel-2 annual geomedian
size_limit : int, optional
An optional size limit for the area selection in sq km.
Defaults to 10000 sq km.
Returns
-------
A dictionary containing:
* 'geopolygon' (defining the area to export imagery from),
* 'date' (date used to export imagery), and
* 'satellites' (the satellites from which to extract imagery).
These are passed to the :func:`export_image_app` function to export the image.
"""
########################
# Select and load data #
########################
# Load DEA WMS
class TimeWMSLayer(WMSLayer):
time = Unicode('').tag(sync=True, o=True)
# WMS layers
wms_params = {
'Landsat-9': 'ls9_sr',
'Landsat-8': 'ls8_sr',
'Landsat-7': 'ls7_sr',
'Landsat-5': 'ls5_sr',
'Sentinel-2': 's2_l2a',
'Sentinel-2 geomedian': 'gm_s2_annual'
}
time_wms = TimeWMSLayer(url='https://ows.digitalearth.africa/',
layers=wms_params[satellites],
time=date,
format='image/png',
transparent=True,
attribution='Digital Earth Africa')
# Plot interactive map to select area
basemap = basemap_to_tiles(basemaps.OpenStreetMap.Mapnik)
geopolygon = select_on_a_map(height='1000px',
layers=(
basemap,
time_wms,
),
center=(4, 20),
zoom=4)
# Test size of selected area
area = geopolygon.to_crs(crs=CRS('epsg:6933')).area / 1000000
if area > size_limit:
print(f'Warning: Your selected area is {area:.00f} sq km. '
f'Please select an area of less than {size_limit} sq km.'
f'\nTo select a smaller area, re-run the cell '
f'above and draw a new polygon.')
else:
return {'geopolygon': geopolygon,
'date': date,
'satellites': satellites}
def export_image_app(geopolygon,
date,
satellites,
style='True colour',
resolution=None,
vmin=0,
vmax=2000,
percentile_stretch=None,
power=None,
image_proc_funcs=None,
output_format="jpg",
standardise_name=False):
"""
Exports Digital Earth Africa satellite data as an image file
based on the extent and time period selected using
:func:`select_region_app`. The function supports Sentinel-2 and Landsat
data, creating True and False colour images.
By default, files are named using:
``"<product> - <YYYY-MM-DD> - <site, state> - <description>.png"``
Set ``standardise_name=True`` for a machine-readable name:
``"<product>_<YYYY-MM-DD>_<site-state>_<description>.png"``
Last modified: September 2021
Parameters
----------
geopolygon : datacube.utils.geometry object
A datacube geopolygon providing the spatial bounds used to load
satellite data.
date : str
The exact date used to extract imagery
(e.g. `date='1988-01-01'`).
satellites : str
The satellite data to be used to extract imagery. The
following options are supported:
``'Landsat-9'``: data from the Landsat 9 satellite
``'Landsat-8'``: data from the Landsat 8 satellite
``'Landsat-7'``: data from the Landsat 7 satellite
``'Landsat-5'``: data from the Landsat 5 satellite
``'Sentinel-2'``: data from Sentinel-2A and Sentinel-2B
``'Sentinel-2 geomedian'``: data from the Sentinel-2 annual geomedian
style : str, optional
The style used to produce the image. Two options are currently
supported:
* ``'True colour'``: Creates a true colour image using the red,
green and blue satellite bands
* ``'False colour'``: Creates a false colour image using
short-wave infrared, infrared and green satellite bands.
The specific bands used vary between Landsat and Sentinel-2.
resolution : tuple, optional
The spatial resolution to load data. By default, the tool will
automatically set the best possible resolution depending on the
satellites selected (i.e 30 m for Landsat, 10 m for Sentinel-2).
Increasing this (e.g. to ``resolution=(-100, 100)``) can be useful
for loading large spatial extents.
vmin, vmax : int or float
The minimum and maximum surface reflectance values used to
clip the resulting imagery to enhance contrast.
percentile_stretch : tuple of floats, optional
An tuple of two floats (between 0.00 and 1.00) that can be used
to clip the imagery to based on percentiles to get more control
over the brightness and contrast of the image. The default is
``None``; ``(0.02, 0.98)`` is equivelent to ``robust=True``. If this
parameter is used, ``vmin`` and ``vmax`` will have no effect.
power : float, optional
Raises imagery by a power to reduce bright features and
enhance dark features. This can add extra definition over areas
with extremely bright features like snow, beaches or salt pans.
image_proc_funcs : list of funcs, optional
An optional list containing functions that will be applied to
the output image. This can include image processing functions
such as increasing contrast, unsharp masking, saturation etc.
The function should take AND return a `numpy.ndarray` with
shape ``[y, x, bands]``. If your function has parameters, you
can pass in custom values using a lambda function, e.g.:
``[lambda x: skimage.filters.unsharp_mask(x, radius=5, amount=0.2)]``
output_format : str, optional
The output file format of the image. Valid options include ``'jpg'``
and ``'png'``. Defaults to ``'jpg'``.
standardise_name : bool, optional
Whether to export the image file with a machine-readable
file name (e.g. ``<product>_<YYYY-MM-DD>_<site-state>_<description>.png``)
"""
###########################
# Set up satellite params #
###########################
sat_params = {
'Landsat-9': {
'products': ['ls9_sr'],
'resolution': [-30, 30],
'styles': {
'True colour': ['red', 'green', 'blue'],
'False colour': ['swir_1', 'nir', 'green']
}
},
'Landsat-8': {
'products': ['ls8_sr'],
'resolution': [-30, 30],
'styles': {
'True colour': ['red', 'green', 'blue'],
'False colour': ['swir_1', 'nir', 'green']
}
},
'Landsat-7': {
'products': ['ls7_sr'],
'resolution': [-30, 30],
'styles': {
'True colour': ['red', 'green', 'blue'],
'False colour': ['swir_1', 'nir', 'green']
}
},
'Landsat-5': {
'products': ['ls5_sr'],
'resolution': [-30, 30],
'styles': {
'True colour': ['red', 'green', 'blue'],
'False colour': ['swir_1', 'nir', 'green']
}
},
'Sentinel-2': {
'products': ['s2_l2a'],
'resolution': [-10, 10],
'styles': {
'True colour': ['red', 'green', 'blue'],
'False colour': ['swir_2', 'nir_1', 'green']
}
},
'Sentinel-2 geomedian': {
'products': ['gm_s2_annual'],
'resolution': [-10, 10],
'styles': {
'True colour': ['red', 'green', 'blue'],
'False colour': ['swir_2', 'nir_1', 'green']
}
},
}
#############
# Load data #
#############
# Connect to datacube database
dc = datacube.Datacube(app='Exporting_satellite_images')
# Configure local dask cluster
client = create_local_dask_cluster(return_client=True)
# Create query after adjusting interval time to UTC by
# adding a UTC offset of -10 hours.
start_date = np.datetime64(date)
query_params = {
'time': (str(start_date)),
'geopolygon': geopolygon
}
# Find matching datasets
dss = [
dc.find_datasets(product=i, **query_params)
for i in sat_params[satellites]['products']
]
dss = list(itertools.chain.from_iterable(dss))
# Get CRS and sensor
crs = str(dss[0].crs)
if satellites == 'Sentinel-2 geomedian':
sensor = satellites
else:
sensor = dss[0].metadata_doc['properties']['eo:platform'].capitalize()
sensor = sensor[0:-1].replace('_', '-') + sensor[-1].capitalize()
# Use resolution if provided, otherwise use default
if resolution:
sat_params[satellites]['resolution'] = resolution
load_params = {
'output_crs': crs,
'resolution': sat_params[satellites]['resolution'],
'resampling': 'bilinear'
}
# Load data from datasets
ds = dc.load(datasets=dss,
measurements=sat_params[satellites]['styles'][style],
group_by='solar_day',
dask_chunks={
'time': 1,
'x': 3000,
'y': 3000
},
**load_params,
**query_params)
ds = masking.mask_invalid_data(ds)
rgb_array = ds.isel(time=0).to_array().values
############
# Plotting #
############
# Create unique file name
centre_coords = geopolygon.centroid.coords[0][::-1]
site = reverse_geocode(coords=centre_coords)
fname = (f"{sensor} - {date} - {site} - {style}, "
f"{load_params['resolution'][1]} m resolution.{output_format}")
# Remove spaces and commas if requested
if standardise_name:
fname = fname.replace(' - ', '_').replace(', ',
'-').replace(' ',
'-').lower()
print(
f'\nExporting image to {fname}.\nThis may take several minutes to complete...'
)
# Convert to numpy array
rgb_array = np.transpose(rgb_array, axes=[1, 2, 0])
# If percentile stretch is supplied, calculate vmin and vmax
# from percentiles
if percentile_stretch:
vmin, vmax = np.nanpercentile(rgb_array, percentile_stretch)
# Raise by power to dampen bright features and enhance dark.
# Raise vmin and vmax by same amount to ensure proper stretch
if power:
rgb_array = rgb_array**power
vmin, vmax = vmin**power, vmax**power
# Rescale/stretch imagery between vmin and vmax
rgb_rescaled = exposure.rescale_intensity(rgb_array.astype(float),
in_range=(vmin, vmax),
out_range=(0.0, 1.0))
# Apply image processing funcs
if image_proc_funcs:
for i, func | |
import requests
from bs4 import BeautifulSoup
import dateparser
from espncricinfo.exceptions import PlayerNotFoundError
from espncricinfo.match import Match
class Player(object):
def __init__(self, player_id):
self.url = "https://www.espncricinfo.com/ci/content/player/{0}.html".format(str(player_id))
self.json_url = "https://core.espnuk.org/v2/sports/cricket/athletes/{0}".format(str(player_id))
self.parsed_html = self.get_html()
self.json = self.get_json()
self.player_information = self._parse_player_information()
self.cricinfo_id = str(player_id)
if self.parsed_html:
self.__unicode__ = self._full_name()
self.name = self._name()
self.first_name = self._first_name()
self.full_name = self._full_name()
self.date_of_birth = self._date_of_birth()
self.current_age = self._current_age()
self.major_teams = self._major_teams()
self.nickname = self._nickname()
self.playing_role = self._playing_role()
self.batting_style = self._batting_style()
self.bowling_style = self._bowling_style()
self.batting_fielding_averages = self._batting_fielding_averages()
self.bowling_averages = self._bowling_averages()
self.test_debut = self._test_debut()
self.last_test = self._last_test()
self.t20i_debut = self._t20i_debut()
self.last_t20i = self._last_t20i()
self.first_class_debut = self._first_class_debut()
self.last_first_class = self._last_first_class()
self.list_a_debut = self._list_a_debut()
self.last_list_a = self._last_list_a()
self.t20_debut = self._t20_debut()
self.last_t20 = self._last_t20()
self.odi_debut = self._odi_debut()
self.last_odi = self._last_odi()
self.recent_matches = self._recent_matches()
def get_html(self):
r = requests.get(self.url)
if r.status_code == 404:
raise PlayerNotFoundError
else:
soup = BeautifulSoup(r.text, 'html.parser')
return soup.find("div", class_="pnl490M")
def get_json(self):
r = requests.get(self.json_url)
if r.status_code == 404:
raise PlayerNotFoundError
else:
return r.json()
def _parse_player_information(self):
return self.parsed_html.find_all('p', class_='ciPlayerinformationtxt')
def _name(self):
return self.json['name']
def _first_name(self):
return self.json['firstName']
def _middle_name(self):
return self.json['middleName']
def _last_name(self):
return self.json['lastName']
def _full_name(self):
return self.json['fullName']
def _date_of_birth(self):
return self.json['dateOfBirth']
def _current_age(self):
return self.json['age']
def _major_teams(self):
return next((p.text.replace('Major teams ','').split(', ') for p in self.player_information if p.find('b').text == 'Major teams'), None)
def _nickname(self):
return next((p.find('span').text for p in self.player_information if p.find('b').text == 'Nickname'), None)
def _also_known_as(self):
return next((p.find('span').text for p in self.player_information if p.find('b').text == 'Also known as'), None)
def _playing_role(self):
return self.json['position']
def _batting_style(self):
return next((x for x in self.json['style'] if x['type'] == 'batting'), None)
def _bowling_style(self):
return next((x for x in self.json['style'] if x['type'] == 'bowling'), None)
def _batting_fielding_averages(self):
if len(self.parsed_html.findAll('table', class_='engineTable')) == 4:
headers = ['matches', 'innings', 'not_out', 'runs', 'high_score', 'batting_average', 'balls_faced', 'strike_rate', 'centuries', 'fifties', 'fours', 'sixes', 'catches', 'stumpings']
bat_field = [td.text.strip() for td in self.parsed_html.find('table', class_='engineTable').findAll('td')]
num_formats = int(len(bat_field)/15)
format_positions = [15*x for x in range(num_formats)]
formats = [bat_field[x] for x in format_positions]
avg_starts = [x+1 for x in format_positions[:num_formats]]
avg_finish = [x+14 for x in avg_starts]
format_averages = [bat_field[x:y] for x,y in zip(avg_starts, avg_finish)]
combined = list(zip(formats, format_averages))
l = [{x: dict(zip(headers, y))} for x,y in combined]
return { k: v for d in l for k, v in d.items() }
else:
return None
def _bowling_averages(self):
if len(self.parsed_html.findAll('table', class_='engineTable')) == 4:
headers = ['matches', 'innings', 'balls_delivered', 'runs', 'wickets', 'best_innings', 'best_match', 'bowling_average', 'economy', 'strike_rate', 'four_wickets', 'five_wickets', 'ten_wickets']
bowling = [td.text.strip() for td in self.parsed_html.findAll('table', class_='engineTable')[1].findAll('td')]
num_formats = int(len(bowling)/14)
format_positions = [14*x for x in range(num_formats)]
formats = [bowling[x] for x in format_positions]
avg_starts = [x+1 for x in format_positions[:num_formats]]
avg_finish = [x+13 for x in avg_starts]
format_averages = [bowling[x:y] for x,y in zip(avg_starts, avg_finish)]
combined = list(zip(formats, format_averages))
l = [{x: dict(zip(headers, y))} for x,y in combined]
return { k: v for d in l for k, v in d.items() }
else:
return None
def _debuts_and_lasts(self):
if len(self.parsed_html.findAll('table', class_='engineTable')) == 4:
return self.parsed_html.findAll('table', class_='engineTable')[2]
else:
return None
def _test_debut(self):
if self._debuts_and_lasts() is not None:
test_debut = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Test debut'), None)
if test_debut:
url = 'http://www.espncricinfo.com'+test_debut.find('a')['href']
match_id = int(test_debut.find('a')['href'].split('/', 4)[4].split('.')[0])
title = test_debut.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _last_test(self):
if self._debuts_and_lasts() is not None:
last_test = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Last Test'), None)
if last_test:
url = 'http://www.espncricinfo.com'+last_test.find('a')['href']
match_id = int(last_test.find('a')['href'].split('/', 4)[4].split('.')[0])
title = last_test.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _t20i_debut(self):
if self._debuts_and_lasts() is not None:
t20i_debut = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'T20I debut'), None)
if t20i_debut:
url = 'http://www.espncricinfo.com'+t20i_debut.find('a')['href']
match_id = int(t20i_debut.find('a')['href'].split('/', 4)[4].split('.')[0])
title = t20i_debut.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _last_t20i(self):
if self._debuts_and_lasts() is not None:
last_t20i = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Last T20I'), None)
if last_t20i:
url = 'http://www.espncricinfo.com'+last_t20i.find('a')['href']
match_id = int(last_t20i.find('a')['href'].split('/', 4)[4].split('.')[0])
title = last_t20i.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _first_class_debut(self):
if self._debuts_and_lasts() is not None:
first_class_debut = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'First-class debut'), None)
if first_class_debut:
try:
url = 'http://www.espncricinfo.com'+first_class_debut.find('a')['href']
match_id = int(first_class_debut.find('a')['href'].split('/', 4)[4].split('.')[0])
title = first_class_debut.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
except:
return {'url': None, 'match_id': None, 'title': first_class_debut.findAll('td')[1].text}
else:
return None
else:
return None
def _last_first_class(self):
if self._debuts_and_lasts() is not None:
last_first_class = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Last First-class'), None)
if last_first_class:
url = 'http://www.espncricinfo.com'+last_first_class.find('a')['href']
match_id = int(last_first_class.find('a')['href'].split('/', 4)[4].split('.')[0])
title = last_first_class.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
return None
def _list_a_debut(self):
if self._debuts_and_lasts() is not None:
list_a_debut = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'List A debut'), None)
if list_a_debut:
try:
url = 'http://www.espncricinfo.com'+list_a_debut.find('a')['href']
match_id = int(list_a_debut.find('a')['href'].split('/', 4)[4].split('.')[0])
title = list_a_debut.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
except:
return {'url': None, 'match_id': None, 'title': list_a_debut.findAll('td')[1].text}
else:
return None
else:
return None
def _last_list_a(self):
if self._debuts_and_lasts() is not None:
last_list_a = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Last List A'), None)
if last_list_a:
url = 'http://www.espncricinfo.com'+last_list_a.find('a')['href']
match_id = int(last_list_a.find('a')['href'].split('/', 4)[4].split('.')[0])
title = last_list_a.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _t20_debut(self):
if self._debuts_and_lasts() is not None:
t20_debut = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Twenty20 debut'), None)
if t20_debut:
url = 'http://www.espncricinfo.com'+t20_debut.find('a')['href']
match_id = int(t20_debut.find('a')['href'].split('/', 4)[4].split('.')[0])
title = t20_debut.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _last_t20(self):
if self._debuts_and_lasts() is not None:
last_t20 = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Last Twenty20'), None)
if last_t20:
url = 'http://www.espncricinfo.com'+last_t20.find('a')['href']
match_id = int(last_t20.find('a')['href'].split('/', 4)[4].split('.')[0])
title = last_t20.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _odi_debut(self):
if self._debuts_and_lasts() is not None:
odi_debut = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'ODI debut'), None)
if odi_debut:
url = 'http://www.espncricinfo.com'+odi_debut.find('a')['href']
match_id = int(odi_debut.find('a')['href'].split('/', 4)[4].split('.')[0])
title = odi_debut.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _last_odi(self):
if self._debuts_and_lasts() is not None:
last_odi = next((tr for tr in self._debuts_and_lasts().findAll('tr') if tr.find('b').text == 'Last ODI'), None)
if last_odi:
url = 'http://www.espncricinfo.com'+last_odi.find('a')['href']
match_id = int(last_odi.find('a')['href'].split('/', 4)[4].split('.')[0])
title = last_odi.findAll('td')[1].text.replace(' scorecard','')
return {'url': url, 'match_id': match_id, 'title': title}
else:
return None
else:
return None
def _recent_matches(self):
try:
table = self.parsed_html.findAll('table', class_='engineTable')[-1]
return [x.find('a')['href'].split('/', 4)[4].split('.')[0] for x in table.findAll('tr')[1:]]
except:
return None
def in_team_for_match(self, match_id):
m = Match(match_id)
if next((p for p in m.team_1_players if p['object_id'] == self.cricinfo_id), None) or next((p for p in m.team_2_players if p['object_id'] == self.cricinfo_id), None):
return True
else:
return False
def batting_for_match(self, match_id):
batting_stats = []
m = Match(match_id)
for innings in list(m.full_scorecard['innings'].keys()):
stats = next((x for x in m.full_scorecard['innings'][innings]['batsmen'] if x['href'] == self.url), None)
if stats:
batting_stats.append({ 'innings': innings, 'balls_faced': next((x['value'] for x in stats['stats'] if x['name'] == 'ballsFaced'), None), 'minutes': next((x['value'] for x in stats['stats'] if x['name'] == 'minutes'), None), 'runs': next((x['value'] for x in stats['stats'] if x['name'] == 'runs'), None), 'fours': next((x['value'] for x in stats['stats'] if x['name'] == 'fours'), None), 'sixes': next((x['value'] for x in stats['stats'] if x['name'] == 'sixes'), None), 'strike_rate': next((x['value'] for x in stats['stats'] if x['name'] == 'strikeRate'), None) })
return batting_stats
def bowling_for_match(self, match_id):
bowling_stats = []
m = Match(match_id)
for innings in list(m.full_scorecard['innings'].keys()):
stats = next((x for x in m.full_scorecard['innings'][innings]['bowlers'] if x['href'] == self.url), None)
if stats:
bowling_stats.append({ 'innings': innings, 'overs': next((x['value'] for x in stats['stats'] if x['name'] == 'overs')), 'maidens': next((x['value'] for x in stats['stats'] if x['name'] == 'maidens')), 'conceded': next((x['value'] for x in stats['stats'] if x['name'] == 'conceded')), 'wickets': next((x['value'] for x in stats['stats'] if x['name'] == 'wickets')), 'economy_rate': next((x['value'] for x in stats['stats'] if x['name'] == 'economyRate')), 'dots': next((x['value'] for x in stats['stats'] if x['name'] == 'dots'), None), 'fours_conceded': next((x['value'] | |
xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = credit_notes
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/CreditNotes")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="CreditNotes",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_or_create_credit_notes"
)
def update_or_create_employees(
self,
xero_tenant_id,
employees,
summarize_errors=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Creates a single new employees used in Xero payrun # noqa: E501
OAuth2 scope: accounting.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Employees employees: Employees with array of Employee object in body of request (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Employees
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_or_create_employees`"
)
# verify the required parameter 'employees' is set
if employees is None:
raise ValueError(
"Missing the required parameter `employees` "
"when calling `update_or_create_employees`"
)
collection_formats = {}
path_params = {}
query_params = []
if summarize_errors is not empty:
query_params.append(("summarizeErrors", summarize_errors))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = employees
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Employees")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Employees",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_or_create_employees")
def update_or_create_invoices(
self,
xero_tenant_id,
invoices,
summarize_errors=empty,
unitdp=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates or creates one or more sales invoices or purchase bills # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Invoices invoices: (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param int unitdp: e.g. unitdp=4 – (Unit Decimal Places) You can opt in to use four decimal places for unit amounts
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Invoices
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_or_create_invoices`"
)
# verify the required parameter 'invoices' is set
if invoices is None:
raise ValueError(
"Missing the required parameter `invoices` "
"when calling `update_or_create_invoices`"
)
collection_formats = {}
path_params = {}
query_params = []
if summarize_errors is not empty:
query_params.append(("summarizeErrors", summarize_errors))
if unitdp is not empty:
query_params.append(("unitdp", unitdp))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = invoices
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Invoices")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Invoices",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_or_create_invoices")
def update_or_create_items(
self,
xero_tenant_id,
items,
summarize_errors=empty,
unitdp=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates or creates one or more items # noqa: E501
OAuth2 scope: accounting.settings
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Items items: (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param int unitdp: e.g. unitdp=4 – (Unit Decimal Places) You can opt in to use four decimal places for unit amounts
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: Items
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_or_create_items`"
)
# verify the required parameter 'items' is set
if items is None:
raise ValueError(
"Missing the required parameter `items` "
"when calling `update_or_create_items`"
)
collection_formats = {}
path_params = {}
query_params = []
if summarize_errors is not empty:
query_params.append(("summarizeErrors", summarize_errors))
if unitdp is not empty:
query_params.append(("unitdp", unitdp))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = items
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/Items")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="Items",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(error, self, "update_or_create_items")
def update_or_create_manual_journals(
self,
xero_tenant_id,
manual_journals,
summarize_errors=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates or creates a single manual journal # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param ManualJournals manual_journals: ManualJournals array with ManualJournal object in body of request (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: ManualJournals
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_or_create_manual_journals`"
)
# verify the required parameter 'manual_journals' is set
if manual_journals is None:
raise ValueError(
"Missing the required parameter `manual_journals` "
"when calling `update_or_create_manual_journals`"
)
collection_formats = {}
path_params = {}
query_params = []
if summarize_errors is not empty:
query_params.append(("summarizeErrors", summarize_errors))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = manual_journals
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/ManualJournals")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ManualJournals",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_or_create_manual_journals"
)
def update_or_create_purchase_orders(
self,
xero_tenant_id,
purchase_orders,
summarize_errors=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates or creates one or more purchase orders # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param PurchaseOrders purchase_orders: (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param bool _return_http_data_only: return received data only
:param bool _preload_content: load received data in models
:param bool _request_timeout: maximum wait time for response
:return: PurchaseOrders
"""
# verify the required parameter 'xero_tenant_id' is set
if xero_tenant_id is None:
raise ValueError(
"Missing the required parameter `xero_tenant_id` "
"when calling `update_or_create_purchase_orders`"
)
# verify the required parameter 'purchase_orders' is set
if purchase_orders is None:
raise ValueError(
"Missing the required parameter `purchase_orders` "
"when calling `update_or_create_purchase_orders`"
)
collection_formats = {}
path_params = {}
query_params = []
if summarize_errors is not empty:
query_params.append(("summarizeErrors", summarize_errors))
header_params = {
"xero-tenant-id": xero_tenant_id,
}
local_var_files = {}
form_params = []
body_params = purchase_orders
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
# HTTP header `Content-Type`
header_params["Content-Type"] = self.api_client.select_header_content_type(
["application/json"]
)
# Authentication setting
auth_settings = ["OAuth2"]
url = self.get_resource_url("/PurchaseOrders")
try:
return self.api_client.call_api(
url,
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="PurchaseOrders",
response_model_finder=self.get_model_finder(),
auth_settings=auth_settings,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
except exceptions.HTTPStatusException as error:
raise translate_status_exception(
error, self, "update_or_create_purchase_orders"
)
def update_or_create_quotes(
self,
xero_tenant_id,
quotes,
summarize_errors=empty,
_return_http_data_only=True,
_preload_content=True,
_request_timeout=None,
):
"""Updates or creates one or more quotes # noqa: E501
OAuth2 scope: accounting.transactions
:param str xero_tenant_id: Xero identifier for Tenant (required)
:param Quotes quotes: (required)
:param bool summarize_errors: If false return 200 OK and mix of successfully created objects and any with validation errors
:param bool _return_http_data_only: return received data | |
"""A class that defines the network being modeled and that contains all
modeled objects in the network such as Nodes, Interfaces, Circuits,
and Demands."""
import networkx as nx
from pprint import pprint
from .circuit import Circuit
from .demand import Demand
from .interface import Interface
from .exceptions import ModelException
from .node import Node
from .rsvp import RSVP_LSP
from .utilities import find_end_index
class Model(object):
"""
A network model object consisting of the following base components:
- Interface objects: network node interfaces. Interfaces have a
'capacity' attribute that determines how much traffic it can carry.
Note: Interfaces are matched into Circuit objects based on the
interface addresses
- Node objects: vertices on the network (aka 'layer 3 devices')
that contain Interface objects. Nodes are connected to each other
via a pair of matched Interfaces (Circuits)
- Demand objects: traffic loads on the network. Each demand starts
from a source node and transits the network to a destination node.
A demand also has a magnitude, representing how much traffic it
is carrying. The demand's magnitude will apply against each
interface's available capacity
"""
def __init__(self, interface_objects=set([]), node_objects=set([]),
demand_objects=set([]), rsvp_lsp_objects=set([])):
self.interface_objects = interface_objects
self.node_objects = node_objects
self.demand_objects = demand_objects
self._orphan_nodes = set([])
self.circuit_objects = set([])
self.rsvp_lsp_objects = rsvp_lsp_objects
def __repr__(self):
return 'Model(Interfaces: %s, Nodes: %s, Demands: %s, RSVP_LSPs: %s)' % (len(self.interface_objects),
len(self.node_objects),
len(self.demand_objects),
len(self.rsvp_lsp_objects))
@classmethod
def create_network_model(self, cls, network_interfaces):
"""
A tool that reads network interface info and returns a new model.
Interface info must be in format like below example:
.. highlight:: python
.. code-block:: python
network_interfaces = [
{'name':'A-B', 'cost':4,'capacity':100, 'node':'A', 'remote_node': 'B', 'address': 1, 'failed': False},
{'name':'A-C', 'cost':1,'capacity':200, 'node':'A', 'remote_node': 'C', 'address': 3, 'failed': False}
]
"""
interface_objects, node_objects = Model._make_network_interfaces(network_interfaces)
model = Model(interface_objects, node_objects)
self._make_circuits()
validated_network_model = model.validate_model()
return validated_network_model
def add_network_interfaces_from_list(self, network_interfaces):
"""
A tool that reads network interface info and updates an existing model.
Interface info must be in format like below example:
.. highlight:: python
.. code-block:: python
network_interfaces = [
{'name':'A-B', 'cost':4,'capacity':100, 'node':'A', 'remote_node': 'B', 'address': 1, 'failed': False},
{'name':'A-C', 'cost':1,'capacity':200, 'node':'A', 'remote_node': 'C', 'address': 3, 'failed': False}
]
"""
new_interface_objects, new_node_objects = self._make_network_interfaces(network_interfaces)
# print("new_node_objects =", new_node_objects)
self.node_objects = self.node_objects.union(new_node_objects)
self.interface_objects = self.interface_objects.union(new_interface_objects)
# print("validating model") # Debug
self.validate_model()
def validate_model(self):
"""
Validates that data fed into the model creates a valid network model
"""
# create circuits table, flags ints that are not part of a circuit
circuits = self._make_circuits(return_exception=True)
# Find objects in interface_objects that are not Interface objects
non_interface_objects = set()
non_bool_failed = set() # ints with non-boolean failed attribute
metrics_not_ints = set() # ints with non-integer cost value
capacity_not_number = set() # ints with non-numerical capacity value
error_data = [] # list of all errored checks
# Validate the individual interface entries
for interface in (interface for interface in self.interface_objects):
# Make sure is instance Interface
if not (isinstance(interface, Interface)):
non_interface_objects.add(interface)
# Make sure 'failed' values are either True or False
if isinstance(interface.failed, bool) is False:
non_bool_failed.add(interface)
# Make sure 'metric' values are integers
if isinstance(interface.cost, int) is False:
metrics_not_ints.add(interface)
# print("metric not integer for interface", interface) # Debug
# Make sure 'capacity' values are numbers
if isinstance(interface.capacity, (float, int)) is False:
capacity_not_number.add(interface)
# If creation of circuits returns a dict, there are problems
if isinstance(circuits, dict):
error_data.append({'ints_w_no_remote_int': circuits['data']})
# Append any failed checks to error_data
if len(non_interface_objects) > 0:
error_data.append(non_interface_objects)
if len(non_bool_failed) > 0:
error_data.append({'non_boolean_failed': non_bool_failed})
if len(metrics_not_ints) > 0:
error_data.append({'non_integer_metrics': metrics_not_ints})
if len(capacity_not_number) > 0:
error_data.append({'invalid_capacity': capacity_not_number})
# Validate there are no duplicate interfaces
unique_interfaces_per_node = self._unique_interface_per_node()
if unique_interfaces_per_node is not True:
error_data.append(unique_interfaces_per_node)
# Make validate_model() check for matching failed statuses
# on the interfaces and matching interface capacity
circuits_with_mismatched_interface_capacity = []
for ckt in (ckt for ckt in self.get_circuit_objects()):
int1 = ckt.get_circuit_interfaces(self)[0]
int2 = ckt.get_circuit_interfaces(self)[1]
# Match the failed status to True if they are different
if int1.failed != int2.failed:
int1.failed = True
int2.failed = True
# Make sure the interface capacities in the circuit match
if int1.capacity != int2.capacity:
circuits_with_mismatched_interface_capacity.append(ckt)
if len(circuits_with_mismatched_interface_capacity) > 0:
int_status_error_dict = {
'circuits_with_mismatched_interface_capacity':
circuits_with_mismatched_interface_capacity
}
error_data.append(int_status_error_dict)
# Verify no duplicate nodes
node_names = set([node.name for node in self.node_objects])
if (len(self.node_objects)) != (len(node_names)):
node_dict = {'len_node_objects': len(self.node_objects),
'len_node_names': len(node_names)}
error_data.append(node_dict)
# Read error_data
if len(error_data) > 0:
message = 'network interface validation failed, see returned data'
raise ModelException(message, error_data)
else:
return self
def get_circuit_objects(self):
"""
Returns a set of circuit objects in the Model
"""
return self.circuit_objects
# def update_simulation_old(self):
# """Returns model with updated interface traffic.
# Must be called to update the model whenever there is a topology change.
# """
# # This list of interfaces can be used to route traffic
# non_failed_interfaces = set()
# available_nodes = set()
# # Find all the non-failed interfaces in the model and
# # add them to non_failed_interfaces; also find all the nodes
# # associated with the non-failed interfaces
# for interface_object in self.interface_objects:
# if interface_object.failed == False:
# non_failed_interfaces.add(interface_object)
# available_nodes.add(interface_object.node_object)
# available_nodes.add(interface_object.remote_node_object)
# # Create a model consisting only of the non-failed interfaces and
# # corresponding nodes
# non_failed_interfaces_model = Model(non_failed_interfaces, available_nodes)
# # Find all demands that match up with source/dest for an LSP
# # TODO - here and in the rsvp_lsp, both separate out which demands
# # ride an LSP - it only need be done once I think. Look at this.
# # Determine demands that will ride an LSP
# lsp_demands = set([])
# for demand in (demand for demand in self.demand_objects):
# for lsp in (lsp for lsp in self.rsvp_lsp_objects):
# if demand.source_node_object == lsp.source_node_object and \
# demand.dest_node_object == lsp.dest_node_object:
# lsp_demands.add(demand)
# # Find all demands that don't match up source/dest with an LSP's
# # source/dest
# non_lsp_demands = set([])
# for demand in (demand for demand in self.demand_objects):
# if demand not in lsp_demands:
# non_lsp_demands.add(demand)
# # ROUTING ORDER
# # 1. Route demands that don't take LSPs in the
# # non_failed_interfaces_model
# for demand_object in non_lsp_demands:
# demand_object = demand_object.\
# _add_demand_path(non_failed_interfaces_model)
# # 2. Route LSPs in the non_failed_interfaces model
# # Find the amount of bandwidth each LSP will signal for
# for lsp in (lsp for lsp in self.rsvp_lsp_objects):
# lsp = lsp.route_lsp(non_failed_interfaces_model)
# # 3. Route lsp_demands over the lsp_model
# for demand_object in (demand for demand in lsp_demands):
# demand_object._add_demand_path(non_failed_interfaces_model)
# # In the model, in an interface is failed, set the traffic attribute
# # to 'Down', otherwise, initialize the traffic to zero
# for interface_object in self.interface_objects:
# if interface_object.failed == True:
# interface_object.traffic = 'Down'
# else:
# interface_object.traffic = 0.0
# # For each demand that is not Unrouted, add its traffic value to each
# # interface object in the path
# for demand_object in self.demand_objects:
# traffic = demand_object.traffic
# if demand_object.path != 'Unrouted':
# # Find each demands path list, determine the ECMP split, and find
# # the traffic per path
# demand_object_paths = demand_object.path
# num_demand_paths = float(len(demand_object_paths))
# ecmp_split = 1/num_demand_paths
# traffic_per_demand_path = traffic * ecmp_split
# # Add the traffic per path to each interface the demand touches.
# # Not sure if there's a way to optimize this since
# # we have to do a lookup to modify the traffic attribute
# for demand_object_path in demand_object_paths:
# for demand_path_interface in demand_object_path:
# # Get the interface's existing traffic and add the portion
# # of the demand's traffic
# existing_traffic = demand_path_interface.traffic
# existing_traffic = existing_traffic + traffic_per_demand_path
# demand_path_interface.traffic = existing_traffic
def _update_interface_utilization(self):
"""
Updates each interface's utilization; returns Model object with
updated interface utilization.
"""
# In the model, in an interface is failed, set the traffic attribute
# to 'Down', otherwise, initialize the traffic to zero
for interface_object in self.interface_objects:
if interface_object.failed is True:
interface_object.traffic = 'Down'
else:
interface_object.traffic = 0.0
# For each | |
<filename>daceml/onnx/onnx_importer.py
import collections
import logging
from itertools import chain, repeat
from typing import Dict, Union, Tuple, Any, List, Optional, OrderedDict, Callable
import numpy as np
import torch
import onnx
from dace.codegen import compiled_sdfg
from onnx import numpy_helper
import dace
from dace import data as dt, dtypes, nodes, SDFG, SDFGState
from dace.frontend.python import parser
from dace.symbolic import pystr_to_symbolic
from dace.transformation import dataflow
from daceml import transformation
from daceml.onnx.shape_inference import shape_inference
from daceml.onnx.converters import convert_attribute_proto, onnx_tensor_type_to_typeclass, clean_onnx_name
from daceml.onnx.schema import ONNXParameterType
from daceml.onnx.nodes.onnx_op import get_onnx_node, has_onnx_node, ONNXOp
from daceml.util import utils, is_cuda
log = logging.getLogger(__name__)
numpy_to_torch_dtype_dict = {
np.bool: torch.bool,
np.bool_: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
torch_to_numpy_dtype_dict = {
v: k
for k, v in numpy_to_torch_dtype_dict.items()
}
def _nested_HasField(obj, full_attr):
"""Performs a nested hasattr check, separating attr on dots."""
attrs = full_attr.split(".")
for attr in attrs:
if obj.HasField(attr):
obj = getattr(obj, attr)
else:
return False
return True
class ONNXModel:
""" Loads an ONNX model into an SDFG.
:Example:
First download an ONNX model, such as
`efficientnet <http://spclstorage.inf.ethz.ch/~rauscho/efficientnet-lite4-11.onnx>`_.
.. testsetup::
import subprocess
model_path = os.path.join("..", "tests", "onnx_files", "efficientnet.onnx")
# Download model
if not os.path.exists(model_path):
subprocess.check_call([
"wget",
"http://spclstorage.inf.ethz.ch/~rauscho/efficientnet-lite4-11.onnx",
"--output-document={}".format(model_path),
"--no-verbose"
])
.. testcode::
import onnx
import os
import numpy as np
from daceml.onnx import ONNXModel
model_path = os.path.join("..", "tests", "onnx_files", "efficientnet.onnx")
model = onnx.load(model_path)
dace_model = ONNXModel("efficientnet", model)
test_input = np.random.rand(1, 3, 224, 224).astype(np.float32)
dace_model(test_input)
"""
def __init__(self,
name: str,
model: onnx.ModelProto,
infer_shapes: bool = True,
cuda: bool = False,
simplify: bool = False,
auto_optimize: bool = True,
fold_constants: bool = True,
parent_pytorch_module: Optional[torch.nn.Module] = None,
storage: Optional[dtypes.StorageType] = None,
save_transients: Optional[Dict[str, torch.Tensor]] = None,
auto_merge: bool = False):
"""
:param name: the name for the SDFG.
:param model: the model to import.
:param infer_shapes: whether to infer shapes for the model. If this is ``False``, the model must have
value infos (with shapes) for all arrays, including intermediate values.
:param cuda: if ``True``, the model will be executed on the GPU.
:param simplify: if ``True``, apply simplification transformations after all nodes have been expanded.
:param auto_optimize: if ``True``, apply automatic optimizations before calling.
:param parent_pytorch_module: when not None, the weight tensors are loaded from the parameters of this model
rather than the ONNX graph.
:param storage: the storage type of the parameters, inputs and outputs. If None, will be set according to
``cuda``.
:param save_transients: if not None, save transients to this dict (for debugging).
:param auto_merge: whether to automatically merge symbolic shapes in symbolic shape inference.
"""
for opset in model.opset_import:
if opset.domain == "" and opset.version != 12:
log.warning(
f"Expected the onnx model to be exported with opset 12, got {opset.version}. This model may fail "
f"to import as a result.")
self.do_auto_optimize = auto_optimize
if infer_shapes:
model = shape_inference.infer_shapes(model, auto_merge=auto_merge)
graph: onnx.GraphProto = model.graph
self.save_transients = save_transients
self.sdfg: SDFG = SDFG(name) #: the generated SDFG.
self.sdfg._parent_onnx_model = self
self.cuda = cuda
self.simplify = simplify
self.fold_constants = fold_constants
self.state: SDFGState = self.sdfg.add_state(
) #: the state containing the model computation.
# Add all values to the SDFG, check for unsupported ops
##########################################
self.value_infos = {}
self.inputs: List[str] = [] #: the inputs to the model
self.outputs: List[str] = [] #: the outputs of the model
if storage is None:
storage = dtypes.StorageType.GPU_Global if self.cuda else dtypes.StorageType.Default
for value, is_input in chain(zip(graph.input, repeat(True)),
zip(graph.output, repeat(False))):
if not value.HasField("name"):
raise ValueError("Got input or output without name")
if is_input:
self.inputs.append(value.name)
else:
self.outputs.append(value.name)
self.value_infos[value.name] = value
storage = storage
self._add_value_info(value, storage=storage)
for value in graph.value_info:
if not value.HasField("name"):
raise ValueError("Got input or output without name")
if value.name not in self.value_infos:
self.value_infos[value.name] = value
# add weights
self.weights: Dict[str, torch.Tensor] = {
} #: mapping from weight name to array
for init in graph.initializer:
self._add_constant_tensor(init, parent_pytorch_module, storage)
access_nodes = {}
self._idx_to_node = []
for i, node in enumerate(graph.node):
if not has_onnx_node(node.op_type):
raise ValueError("Unsupported ONNX operator: '{}'".format(
node.op_type))
# extract the op attributes
op_attributes = {
attribute_proto.name: convert_attribute_proto(attribute_proto)
for attribute_proto in node.attribute
}
if node.op_type == "Constant":
# Add constants to weights immediately
possible_values = [
"sparse_value", "value", "value_float", "value_floats",
"value_int", "value_ints", "value_string", "value_strings"
]
# do some manual validation here since the node validation will never run
if set(op_attributes).difference(possible_values):
raise ValueError(
f"Got unexpected attributes on Constant node "
f"{set(op_attributes).difference(possible_values)}")
if len(op_attributes) != 1:
raise ValueError(
"Expected Constant node to have exactly one of its attributes set"
)
if len(node.input) != 0 or len(node.output) != 1:
raise ValueError(
"Expected Constant node to have no inputs and exactly 1 output"
)
value_name = next(iter(op_attributes))
if node.output[0] not in self.value_infos:
raise ValueError(
"Could not find array with name '{}'".format(
node.output[0]))
self._add_value_info(self.value_infos[node.output[0]],
storage=storage)
self.sdfg.arrays[clean_onnx_name(
node.output[0])].transient = False
self.weights[node.output[0]] = torch.from_numpy(
op_attributes[value_name].copy())
continue
if node.HasField("name"):
node_name = clean_onnx_name(node.name)
else:
node_name = node.op_type + "_" + str(i)
# construct the dace node
op_node = get_onnx_node(node.op_type)(node_name, **op_attributes)
self.state.add_node(op_node)
self._idx_to_node.append(op_node)
for param_idx, (name, is_input) in chain(
enumerate(zip(node.input, repeat(True))),
enumerate(zip(node.output, repeat(False)))):
if clean_onnx_name(name) not in self.sdfg.arrays:
if name not in self.value_infos:
raise ValueError(
"Could not find array with name '{}'".format(name))
self._add_value_info(self.value_infos[name])
# get the access node
if name in access_nodes:
access = access_nodes[name]
else:
access = nodes.AccessNode(clean_onnx_name(name))
self.state.add_node(access)
access_nodes[name] = access
# get the connector name
params = op_node.schema.inputs if is_input else op_node.schema.outputs
params_len = len(params)
if param_idx >= params_len:
# this is a variadic parameter. Then the last parameter of the parameter must be variadic.
if params[-1].param_type != ONNXParameterType.Variadic:
raise ValueError(
"Expected the last {i_or_o} parameter to be variadic,"
" since the {i_or_o} with idx {param_idx} has more parameters than the schema ({params_len})"
.format(i_or_o="input" if is_input else "output",
param_idx=param_idx,
params_len=params_len))
conn_name = params[-1].name + "__" + str(param_idx -
params_len + 1)
elif params[
param_idx].param_type == ONNXParameterType.Variadic:
# this is a variadic parameter, and it is within the range of params, so it must be the first
# instance of a variadic parameter
conn_name = params[param_idx].name + "__0"
else:
conn_name = params[param_idx].name
data_desc = self.sdfg.arrays[clean_onnx_name(name)]
# add the connector if required, and add an edge
if is_input:
if conn_name not in op_node.in_connectors:
assert op_node.add_in_connector(conn_name)
self.state.add_edge(
access, None, op_node, conn_name,
dace.Memlet.from_array(clean_onnx_name(name),
data_desc))
else:
if conn_name not in op_node.out_connectors:
assert op_node.add_out_connector(conn_name)
self.state.add_edge(
op_node, conn_name, access, None,
dace.Memlet.from_array(clean_onnx_name(name),
data_desc))
if self.fold_constants:
log.debug("Applying constant folding")
self.sdfg.apply_transformations_repeated([
transformation.ConstantFolding, dataflow.RedundantSecondArray
],
validate_all=True)
if self.cuda:
self.sdfg.apply_gpu_transformations()
def _add_constant_tensor(self, tensor: onnx.TensorProto, parent_pt_model,
storage: dtypes.StorageType):
if not tensor.HasField("name"):
raise ValueError("Got tensor without name")
if not tensor.HasField("data_type"):
raise ValueError("Initializer tensor '{}' has no type".format(
tensor.name))
if tensor.name in self.inputs:
# do not duplicate a weight if it is already an input
return
name = clean_onnx_name(tensor.name)
dtype = onnx_tensor_type_to_typeclass(tensor.data_type)
if len(tensor.dims) == 0:
# this is a scalar
self.sdfg.add_scalar(name, dtype, storage=storage)
else:
dims = [d for d in tensor.dims]
if name not in self.sdfg.arrays:
self.sdfg.add_array(name, dims, dtype, storage=storage)
else:
existing_arr = self.sdfg.arrays[name]
if existing_arr.dtype != dtype:
raise ValueError(
"Invalid ONNX model; found two values with name '{}', but different dtypes ({} and {})"
.format(name, existing_arr.dtype, dtype))
if tuple(existing_arr.shape) != tuple(dims):
raise ValueError(
"Invalid ONNX model; found two values with name '{}', but different dimensions ({} and {})"
.format(name, existing_arr.shape, dims))
weight_arr = numpy_helper.to_array(tensor)
if parent_pt_model is not None:
parent_parameters = dict(parent_pt_model.named_parameters())
if parent_pt_model is not None and tensor.name in parent_parameters:
self.weights[tensor.name] = parent_parameters[tensor.name].data
else:
# we need to copy here because the weight_arr tensor is not writable
self.weights[tensor.name] = torch.from_numpy(weight_arr.copy())
def _add_value_info(self,
value_info: onnx.ValueInfoProto,
storage=dtypes.StorageType.Default):
if not value_info.HasField("name"):
raise ValueError("Got value without name")
name = value_info.name
if not _nested_HasField(value_info, "type.tensor_type.shape"):
raise ValueError(
"Value '{}' does not have a shape in this graph."
" Please run shape inference before importing.".format(name))
tensor_type = value_info.type.tensor_type
if not tensor_type.HasField("elem_type"):
raise ValueError(
"Value '{}' does not have a type in this graph."
" Please run type inference before importing.".format(name))
shape = []
for d in tensor_type.shape.dim:
if d.HasField("dim_value"):
shape.append(d.dim_value)
elif d.HasField("dim_param"):
parsed = pystr_to_symbolic(d.dim_param)
for sym in parsed.free_symbols:
if clean_onnx_name(str(sym)) not in self.sdfg.symbols:
self.sdfg.add_symbol(clean_onnx_name(str(sym)),
stype=int)
parsed = parsed.subs(
sym, dace.symbol(clean_onnx_name(str(sym))))
shape.append(parsed)
else:
raise ValueError(
"Value '{}' does not have a shape in | |
<filename>ome_zarr/reader.py
"""Reading logic for ome-zarr."""
import logging
from abc import ABC
from typing import Any, Dict, Iterator, List, Optional, Type, Union, cast
import dask.array as da
from vispy.color import Colormap
from .io import BaseZarrLocation
from .types import JSONDict
LOGGER = logging.getLogger("ome_zarr.reader")
class Node:
"""Container for a representation of the binary data somewhere in the data
hierarchy."""
def __init__(
self,
zarr: BaseZarrLocation,
root: Union["Node", "Reader", List[BaseZarrLocation]],
visibility: bool = True,
):
self.zarr = zarr
self.root = root
self.seen: List[BaseZarrLocation] = []
if isinstance(root, Node) or isinstance(root, Reader):
self.seen = root.seen
else:
self.seen = cast(List[BaseZarrLocation], root)
self.__visible = visibility
# Likely to be updated by specs
self.metadata: JSONDict = dict()
self.data: List[da.core.Array] = list()
self.specs: List[Spec] = []
self.pre_nodes: List[Node] = []
self.post_nodes: List[Node] = []
# TODO: this should be some form of plugin infra over subclasses
if Labels.matches(zarr):
self.specs.append(Labels(self))
if Label.matches(zarr):
self.specs.append(Label(self))
if Multiscales.matches(zarr):
self.specs.append(Multiscales(self))
if OMERO.matches(zarr):
self.specs.append(OMERO(self))
@property
def visible(self) -> bool:
"""True if this node should be displayed by default.
An invisible node may have been requested by the instrument, by the
user, or by the ome_zarr library after determining that this node
is lower priority, e.g. to prevent too many nodes from being shown
at once.
"""
return self.__visible
@visible.setter
def visible(self, visibility: bool) -> bool:
"""
Set the visibility for this node, returning the previous value.
A change of the visibility will propagate to all subnodes.
"""
old = self.__visible
if old != visibility:
self.__visible = visibility
for node in self.pre_nodes + self.post_nodes:
node.visible = visibility
return old
def load(self, spec_type: Type["Spec"]) -> Optional["Spec"]:
for spec in self.specs:
if isinstance(spec, spec_type):
return spec
return None
def add(
self,
zarr: BaseZarrLocation,
prepend: bool = False,
visibility: Optional[bool] = None,
) -> "Optional[Node]":
"""Create a child node if this location has not yet been seen.
Newly created nodes may be considered higher or lower priority than
the current node, and may be set to invisible if necessary.
:param zarr: Location in the node hierarchy to be added
:param prepend: Whether the newly created node should be given higher
priority than the current node, defaults to False
:param visibility: Allows setting the node (and therefore layer)
as deactivated for initial display or if None the value of the
current node will be propagated to the new node, defaults to None
:return: Newly created node if this is the first time it has been
encountered; None if the node has already been processed.
"""
if zarr in self.seen:
LOGGER.debug(f"already seen {zarr}; stopping recursion")
return None
if visibility is None:
visibility = self.visible
self.seen.append(zarr)
node = Node(zarr, self, visibility=visibility)
if prepend:
self.pre_nodes.append(node)
else:
self.post_nodes.append(node)
return node
def write_metadata(self, metadata: JSONDict) -> None:
for spec in self.specs:
metadata.update(self.zarr.root_attrs)
def __repr__(self) -> str:
suffix = ""
if not self.visible:
suffix += " (hidden)"
return f"{self.zarr}{suffix}"
class Spec(ABC):
"""Base class for specifications that can be implemented by groups or arrays within
the hierarchy.
Multiple subclasses may apply.
"""
@staticmethod
def matches(zarr: BaseZarrLocation) -> bool:
raise NotImplementedError()
def __init__(self, node: Node) -> None:
self.node = node
self.zarr = node.zarr
LOGGER.debug(f"treating {self.zarr} as {self.__class__.__name__}")
for k, v in self.zarr.root_attrs.items():
LOGGER.info("root_attr: %s", k)
LOGGER.debug(v)
def lookup(self, key: str, default: Any) -> Any:
return self.zarr.root_attrs.get(key, default)
class Labels(Spec):
"""Relatively small specification for the well-known "labels" group which only
contains the name of subgroups which should be loaded as labeled images."""
@staticmethod
def matches(zarr: BaseZarrLocation) -> bool:
"""Does the Zarr Image group also include a /labels sub-group?"""
# TODO: also check for "labels" entry and perhaps version?
return bool("labels" in zarr.root_attrs)
def __init__(self, node: Node) -> None:
super().__init__(node)
label_names = self.lookup("labels", [])
for name in label_names:
child_zarr = self.zarr.create(name)
if child_zarr.exists():
node.add(child_zarr)
class Label(Spec):
"""An additional aspect to a multiscale image is that it can be a labeled image, in
which each discrete pixel value represents a separate object."""
@staticmethod
def matches(zarr: BaseZarrLocation) -> bool:
"""If label-specific metadata is present, then return true."""
return bool("image-label" in zarr.root_attrs)
def __init__(self, node: Node) -> None:
super().__init__(node)
image_label = self.lookup("image-label", {})
image = image_label.get("source", {}).get("image", None)
parent_zarr = None
if image:
# This is an ome mask, load the image
parent_zarr = self.zarr.create(image)
if parent_zarr.exists():
LOGGER.debug(f"delegating to parent image: {parent_zarr}")
node.add(parent_zarr, prepend=True, visibility=False)
else:
parent_zarr = None
if parent_zarr is None:
LOGGER.warn(f"no parent found for {self}: {image}")
# Metadata: TODO move to a class
colors: Dict[Union[int, bool], List[float]] = {}
color_list = image_label.get("colors", [])
if color_list:
for color in color_list:
try:
label_value = color["label-value"]
rgba = color.get("rgba", None)
if rgba:
rgba = [x / 255 for x in rgba]
if isinstance(label_value, bool) or isinstance(label_value, int):
colors[label_value] = rgba
else:
raise Exception("not bool or int")
except Exception as e:
LOGGER.error(f"invalid color - {color}: {e}")
# TODO: a metadata transform should be provided by specific impls.
name = self.zarr.basename()
node.metadata.update(
{
"visible": node.visible,
"name": name,
"color": colors,
"metadata": {"image": self.lookup("image", {}), "path": name},
}
)
class Multiscales(Spec):
@staticmethod
def matches(zarr: BaseZarrLocation) -> bool:
"""is multiscales metadata present?"""
if zarr.zgroup:
if "multiscales" in zarr.root_attrs:
return True
return False
def __init__(self, node: Node) -> None:
super().__init__(node)
try:
datasets = self.lookup("multiscales", [])[0]["datasets"]
datasets = [d["path"] for d in datasets]
self.datasets: List[str] = datasets
LOGGER.info("datasets %s", datasets)
except Exception as e:
LOGGER.error(f"failed to parse multiscale metadata: {e}")
return # EARLY EXIT
for resolution in self.datasets:
# data.shape is (t, c, z, y, x) by convention
data: da.core.Array = self.zarr.load(resolution)
chunk_sizes = [
str(c[0]) + (" (+ %s)" % c[-1] if c[-1] != c[0] else "")
for c in data.chunks
]
LOGGER.info("resolution: %s", resolution)
LOGGER.info(" - shape (t, c, z, y, x) = %s", data.shape)
LOGGER.info(" - chunks = %s", chunk_sizes)
LOGGER.info(" - dtype = %s", data.dtype)
node.data.append(data)
# Load possible node data
child_zarr = self.zarr.create("labels")
if child_zarr.exists():
node.add(child_zarr, visibility=False)
class OMERO(Spec):
@staticmethod
def matches(zarr: BaseZarrLocation) -> bool:
return bool("omero" in zarr.root_attrs)
def __init__(self, node: Node) -> None:
super().__init__(node)
# TODO: start checking metadata version
self.image_data = self.lookup("omero", {})
try:
model = "unknown"
rdefs = self.image_data.get("rdefs", {})
if rdefs:
model = rdefs.get("model", "unset")
channels = self.image_data.get("channels", None)
if channels is None:
return # EARLY EXIT
try:
len(channels)
except Exception:
LOGGER.warn(f"error counting channels: {channels}")
return # EARLY EXIT
colormaps = []
contrast_limits: Optional[List[Optional[Any]]] = [None for x in channels]
names: List[str] = [("channel_%d" % idx) for idx, ch in enumerate(channels)]
visibles: List[bool] = [True for x in channels]
for idx, ch in enumerate(channels):
# 'FF0000' -> [1, 0, 0]
color = ch.get("color", None)
if color is not None:
rgb = [(int(color[i : i + 2], 16) / 255) for i in range(0, 6, 2)]
# TODO: make this value an enumeration
if model == "greyscale":
rgb = [1, 1, 1]
colormaps.append(Colormap([[0, 0, 0], rgb]))
label = ch.get("label", None)
if label is not None:
names[idx] = label
visible = ch.get("active", None)
if visible is not None:
visibles[idx] = visible and node.visible
window = ch.get("window", None)
if window is not None:
start = window.get("start", None)
end = window.get("end", None)
if start is None or end is None:
# Disable contrast limits settings if one is missing
contrast_limits = None
elif contrast_limits is not None:
contrast_limits[idx] = [start, end]
node.metadata["name"] = names
node.metadata["visible"] = visibles
node.metadata["contrast_limits"] = contrast_limits
node.metadata["colormap"] = colormaps
except Exception as e:
LOGGER.error(f"failed to parse metadata: {e}")
class Reader:
"""Parses the given Zarr instance into a collection of Nodes properly ordered
depending on context.
Depending on the starting point, metadata may be followed up or down the Zarr
hierarchy.
"""
def __init__(self, zarr: BaseZarrLocation) -> None:
assert zarr.exists()
self.zarr = zarr
self.seen: List[BaseZarrLocation] = [zarr]
def __call__(self) -> Iterator[Node]:
node = Node(self.zarr, self)
if node.specs: # Something has matched
LOGGER.debug(f"treating {self.zarr} as ome-zarr")
yield from self.descend(node)
# TODO: API thoughts for the Spec type
# - ask for recursion or not
# - ask for "provides data", "overrides data"
elif self.zarr.zarray: # Nothing has matched
LOGGER.debug(f"treating {self.zarr} as raw zarr")
node.data.append(self.zarr.load())
yield node
else:
LOGGER.debug(f"ignoring {self.zarr}")
| |
new = self.__newInstance(newArr, self.__q)
for ri in new.__arr:
ri._Num__q = new.__q
openUnit()
return new
def __rmul__(self, obj):
closeUnit()
if type(obj) == int or type(obj) == float:
new = self.__newInstance([n.__rmul__(obj) for n in self.__arr], self.__q, dv=self.__gd_valid)
elif type(obj) == Const:
new = self.__newInstance([n.__rmul__(obj) for n in self.__arr], obj._Const__q * self.__q, dv=self.__gd_valid)
elif type(obj) == Num:
new = self.__newInstance([n.__rmul__(obj) for n in self.__arr], obj._Num__q * self.__q)
elif type(obj) == NumItem:
if len(self.__arr) != len(obj.__arr):
raise itemNotSameLengthException('进行数组运算的两个数组元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__rmul__(obj.__arr[i]))
new = self.__newInstance(newArr, obj.__q * self.__q)
elif type(obj) == list and (type(obj[0]) == int or type(obj[0]) == float):
if len(self.__arr) != len(obj):
raise itemNotSameLengthException('进行数组运算的纯数字列表与数组的元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__rmul__(obj[i]))
new = self.__newInstance(newArr, self.__q)
for ri in new.__arr:
ri._Num__q = new.__q
openUnit()
return new
def __truediv__(self, obj):
closeUnit()
if type(obj) == int or type(obj) == float:
new = self.__newInstance([n.__truediv__(obj) for n in self.__arr], self.__q, dv=self.__gd_valid)
elif type(obj) == Const:
new = self.__newInstance([n.__truediv__(obj) for n in self.__arr], self.__q / obj._Const__q, dv=self.__gd_valid)
elif type(obj) == Num:
new = self.__newInstance([n.__truediv__(obj) for n in self.__arr], self.__q / obj._Num__q)
elif type(obj) == NumItem:
if len(self.__arr) != len(obj.__arr):
raise itemNotSameLengthException('进行数组运算的两个数组元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__truediv__(obj.__arr[i]))
new = self.__newInstance(newArr, self.__q / obj.__q)
elif type(obj) == list and (type(obj[0]) == int or type(obj[0]) == float):
if len(self.__arr) != len(obj):
raise itemNotSameLengthException('进行数组运算的纯数字列表与数组的元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__truediv__(obj[i]))
new = self.__newInstance(newArr, self.__q)
for ri in new.__arr:
ri._Num__q = new.__q
openUnit()
return new
def __rtruediv__(self, obj):
closeUnit()
if type(obj) == int or type(obj) == float:
new = self.__newInstance([n.__rtruediv__(obj) for n in self.__arr], 1/self.__q)
elif type(obj) == Const:
new = self.__newInstance([n.__rtruediv__(obj) for n in self.__arr], obj._Const__q / self.__q)
elif type(obj) == Num:
new = self.__newInstance([n.__rtruediv__(obj) for n in self.__arr], obj._Num__q / self.__q)
elif type(obj) == NumItem:
if len(self.__arr) != len(obj.__arr):
raise itemNotSameLengthException('进行数组运算的两个数组元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__rtruediv__(obj.__arr[i]))
new = self.__newInstance(newArr, obj.__q / self.__q)
elif type(obj) == list and (type(obj[0]) == int or type(obj[0]) == float):
if len(self.__arr) != len(obj):
raise itemNotSameLengthException('进行数组运算的纯数字列表与数组的元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__rtruediv__(obj[i]))
new = self.__newInstance(newArr, 1/self.__q)
for ri in new.__arr:
ri._Num__q = new.__q
openUnit()
return new
def __pow__(self, b):
closeUnit()
if type(b) == int or type(b) == float:
new = self.__newInstance([n.__pow__(b) for n in self.__arr], self.__q**b, dv=self.__gd_valid)
elif type(b) == Const:
new = self.__newInstance([n.__pow__(b) for n in self.__arr], self.__q**b.value(), dv=self.__gd_valid)
elif type(b) == list and (type(b[0]) == int or type(b[0]) == float):
if len(self.__arr) != len(b):
raise itemNotSameLengthException('进行数组运算的纯数字列表与数组的元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__pow__(b[i]))
new = self.__newInstance(newArr)
for ri in new.__arr:
ri._Num__q = new.__q
openUnit()
return new
def __rpow__(self, a):
closeUnit()
if type(a) == int or type(a) == float or type(a) == Const:
new = self.__newInstance([n.__rpow__(a) for n in self.__arr])
elif type(a) == list and (type(a[0]) == int or type(a[0]) == float):
if len(self.__arr) != len(a):
raise itemNotSameLengthException('进行数组运算的纯数字列表与数组的元素个数必须一致!')
newArr = []
for i in range(len(self.__arr)):
newArr.append(self.__arr[i].__pow__(a[i]))
new = self.__newInstance(newArr)
for ri in new.__arr:
ri._Num__q = new.__q
openUnit()
return new
def resetUnit(self, unit=None):
'''重设NumItem数组的单位
【参数说明】
unit(可选,str):重设后的单位。默认unit=None,即没有单位。'''
if unit == None:
self.__q = 1
else:
self.__q = Quantity(1., unit) if type(unit) == str else unit
self.__qUpdate()
def setMu(self, mu):
'''设定真值μ,用于误差分析
【参数说明】
sym(str):要设定的真值。'''
if type(mu) == str:
self.__mu = Num(mu)
elif type(mu) == Num:
self.__mu = mu
def setSym(self, sym):
'''设定符号
【参数说明】
sym(str):要设定的符号。'''
self.__sym = '{%s}' % sym
def setMuSym(self, muSym):
'''设定真值的符号
【参数说明】
muSym(str):要设定的真值符号。'''
self.__muSym = muSym
def sym(self):
'''获得数组的符号
【返回值】
str:数组的符号。
'''
return self.__sym
def muSym(self):
'''获得真值的符号
【返回值】
str:真值的符号。
'''
return self.__muSym
def __abs__(self):
return self.__newInstance([n.__abs__() for n in self.__arr], self.__q, dv=self.__gd_valid)
def __neg__(self):
return self.__newInstance([n.__neg__() for n in self.__arr], self.__q, dv=self.__gd_valid)
def __next__(self):
if self.__index >= len(self.__arr):
self.__index = 0
raise StopIteration
else:
result = self.__arr[self.__index]
self.__index += 1
return result
def setIsRelative(self, isRelative):
'''设定是否为相对比(百分数形式)
【参数说明】
isRelative(bool):是否为相对比。'''
[n.setIsRelative(isRelative) for n in self.__arr]
self.__isRelative = isRelative
def toFloatList(self):
'''将当前数组转换成float列表
【返回值】
list<float>:数值的float形式组成的列表。'''
return [float(n._Num__value) for n in self.__arr]
def toIntList(self):
'''将当前数组转换成int列表
【返回值】
list<float>:数值的int形式组成的列表。'''
return [int(n._Num__value) for n in self.__arr]
def toNumpyArray(self):
'''将当前数组转换成numpy数组
【返回值】
list<float>:新生成的numpy数组。'''
return numpy.array([float(n._Num__value) for n in self.__arr])
def fix(self):
'''数字修约
【返回值】
NumItem:修约后的NumItem数组。'''
return self.__newInstance([n.fix() for n in self.__arr], self.__q, sym=self.__sym, dv=self.__gd_valid, qIsUpdate=False)
def sort(self, rev=False):
'''获得排序后的数组
【参数说明】
rev(bool):是否降序排列,False为升序排列,True为降序排列。默认rev=False。
【返回值】
NumItem:排序后的数组
【应用举例】
>>> h = NumItem('1.52 1.55 1.56 1.51 1.55 1.53')
>>> h.sort()
[1.51, 1.52, 1.53, 1.55, 1.55, 1.56]
>>> h.sort(rev=True)
[1.56, 1.55, 1.55, 1.53, 1.52, 1.51]'''
s = sorted(self.__arr, reverse=rev)
return self.__newInstance(s, self.__q, sym=self.__sym, mu=self.__mu, isRelative=self.__isRelative, dv=self.__gd_valid, qIsUpdate=False)
def setSciBound(self, bound):
'''设定数组中的数值使用科学记数法的边界条件
【参数说明】
bound(int):使用科学计数法的指数边界,即数值小于等于10**(-bound)数量级,或大于等于10**bound数量级时,使用科学记数法;否则仍使用一般数字表示法。未设定边界时,默认bound=3。
【应用举例】
>>> d = NumItem('15023 15029 15017 15020')
>>> d
[1.5023e+04, 1.5029e+04, 1.5017e+04, 1.5020e+04]
>>> d.setSciBound(5)
>>> d
[15023, 15029, 15017, 15020]'''
[n.setSciBound(bound) for n in self.__arr]
def isum(self, process=False, needValue=False):
'''数组求和
【参数说明】
1.process(可选,bool):是否获得计算过程。默认process=False。
2.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
【返回值】
①process为False时,返回值为Num类型的和。
②process为True且needValue为False时,返回值为LaTeX类型的计算过程。
③process为True且needValue为True时,返回值为Num类型的和和LaTeX类型的计算过程组成的元组。'''
s = Num(None)
s._Num__value = sum([n._Num__value for n in self.__arr])
s._Num__d_front = max([n._Num__d_front for n in self.__arr]) #取小数点前位数较大者作为小数点前位数
s._Num__d_behind = min([n._Num__d_behind for n in self.__arr if n._Num__value != 0]) #取小数点后位数较小者作为小数点后位数
s._Num__d_valid = self.__gd_valid
s._Num__q = self.__q
s = s.fix()
if process:
latex = LaTeX()
sciDigit = self.__sciDigit()
if sciDigit == 0:
sumExpr = '+'.join([n.dlatex(2) for n in self.__arr])
latex.add(r'\sum\limits_{i=1}^n %s_{i}=%s=%s' % (self.__sym, sumExpr, s.latex()))
else:
d_arr = self * 10**(-sciDigit)
sumExpr = '+'.join([n.dlatex(2) for n in d_arr])
if len([x for x in self.__arr if x < 0]) == 0:
latex.add(r'\sum\limits_{i=1}^n %s_{i}=\left(%s\right)\times 10^{%d}=%s' % (self.__sym, sumExpr, sciDigit, s.latex()))
else:
latex.add(r'\sum\limits_{i=1}^n %s_{i}=\left[%s\right]\times 10^{%d}=%s' % (self.__sym, sumExpr, sciDigit, s.latex()))
if needValue:
return s, latex
else:
return latex
return s
def mean(self, process=False, needValue=False, dec=False):
'''样本均值(mean)
【参数说明】
1.process(可选,bool):是否获得计算过程。默认process=False。
2.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
3.dec(可选,bool):是否已纯数字形式(int或float)给出样本均值。dec为True时,以纯数字形式给出;为False时,以数值(Num)形式给出。注意当dec=True时,process将会无效。默认dec=False。
【返回值】
①dec为True时,返回值为float类型的纯数字样本均值;
②dec为False时:
A.process为False时,返回值为Num类型的样本均值。
B.process为True且needValue为False时,返回值为LaTeX类型的计算过程。
C.process为True且needValue为True时,返回值为Num类型的样本均值和LaTeX类型的计算过程组成的元组。
'''
x = Num(None)
x._Num__value = sum([n._Num__value for n in self.__arr]) / len(self.__arr)
x._Num__d_front = max([ti._Num__d_front for ti in self.__arr]) #取小数点前位数较大者作为小数点前位数
x._Num__d_behind = min([ti._Num__d_behind for ti in self.__arr if ti._Num__value != 0]) #取小数点后位数较小者作为小数点后位数
x._Num__d_valid = self.__gd_valid
x._Num__q = self.__q
result = x
if dec:
return result._Num__value
else:
if process:
latex = LaTeX()
sciDigit = self.__sciDigit()
if sciDigit == 0:
sumExpr = '+'.join([n.dlatex(2) for n in self.__arr])
if len([x for x in self.__arr if x < 0]) == 0:
latex.add(r'\overline{%s}=\cfrac{1}{n}\sum\limits_{i=1}^n %s_{i}=\cfrac{1}{%d}\left(%s\right)=%s' % (self.__sym, self.__sym, len(self.__arr), sumExpr, result.latex()))
else:
latex.add(r'\overline{%s}=\cfrac{1}{n}\sum\limits_{i=1}^n %s_{i}=\cfrac{1}{%d}\left[%s\right]=%s' % (self.__sym, self.__sym, len(self.__arr), sumExpr, result.latex()))
else:
d_arr = self * 10**(-sciDigit)
sumExpr = '+'.join([n.dlatex(2) for n in d_arr])
if len([x for x in self.__arr if x < 0]) == 0:
latex.add(r'\overline{%s}=\cfrac{1}{n}\sum\limits_{i=1}^n %s_{i}=\cfrac{1}{%d}\left(%s\right)\times 10^{%d}=%s' % (self.__sym, self.__sym, len(self.__arr), sumExpr, sciDigit, result.latex()))
else:
latex.add(r'\overline{%s}=\cfrac{1}{n}\sum\limits_{i=1}^n %s_{i}=\cfrac{1}{%d}\left[%s\right]\times 10^{%d}=%s' % (self.__sym, self.__sym, len(self.__arr), sumExpr, sciDigit, result.latex()))
if needValue:
return result, latex
else:
return latex
return result
def mid(self, process=False, needValue=False):
'''中位数
【参数说明】
1.process(可选,bool):是否获得计算过程。默认process=False。
2.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
【返回值】
①process为False时,返回值为Num类型的中位数。
②process为True且needValue为False时,返回值为LaTeX类型的计算过程。
③process为True且needValue为True时,返回值为Num类型的中位数和LaTeX类型的计算过程组成的元组。'''
s = sorted(self.__arr)
n = len(s)
if n % 2 == 1: #若数值个数为奇数个,取最中间的数(例:5个元素,取2)
res = s[n//2]
else: #若数值个数为偶数个,取最中间的相邻两个数(例:6个元素,取2和3)
res = (s[n//2-1]+s[n//2]) / 2
if process:
latex = LaTeX(r'\text{数值从小到大排序得}%s=%s' % (self.__sym, NumItem(s, self.__q).latex()))
if n % 2 == 1:
latex.add(r'\text{中位数为}%s_{%d}=%s' % (self.__sym, n//2+1, res.latex()))
else:
latex.add(r'\text{中位数为}\cfrac{%s_{%d}+%s_{%d}}{2}=\cfrac{%s+%s}{2}=%s' % (self.__sym, n//2, self.__sym, n//2+1, s[n//2-1].dlatex(), s[n//2].dlatex(), res.latex()))
if needValue:
return res, latex
else:
return latex
return res
def devi(self, process=False, needValue=False):
'''偏差(deviation)
【参数说明】
1.process(可选,bool):是否获得计算过程。默认process=False。
2.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
【返回值】
①process为False时,返回值为NumItem类型的数组中各数值与样本均值的偏差组成的数组。
②process为True且needValue为False时,返回值为LaTeX类型的计算过程。
③process为True且needValue为True时,返回值为NumItem类型的数组中各数值与样本均值的偏差组成的数组和LaTeX类型的计算过程组成的元组。'''
mean = self.mean()
result = (self - mean).fix()
result.setSym('d')
if process:
mean, latex = self.mean(process=True, needValue=True)
meanExpr = mean.dlatex(2)
latex = LaTeX(r'\text{根据公式}d_{i}=%s_{i}-\overline{%s}\text{,得}' % (self.__sym, self.__sym))
for i in range(len(self.__arr)):
latex.add(r'd_{%d}=%s-%s=%s' % (i+1, self.__arr[i].dlatex(), meanExpr, result[i].latex()))
if needValue:
return result, latex
else:
return latex
return result
def staDevi(self, process=False, processWithMean=True, needValue=False, dec=False, remainOneMoreDigit=False):
'''样本标准偏差(standard deviation)
【参数说明】
1.process(可选,bool):是否获得计算过程。默认process=False。
2.processWithMean(可选,bool):在获得计算过程时,是否展示均值的计算过程。注意该参数仅在process=True时有效。默认processWithMean=True。
3.needValue(可选,bool):当获得计算过程时,是否返回计算结果。默认needValue=False。
4.dec(可选,bool):是否已纯数字形式(int或float)给出样本标准偏差。dec为True时,以纯数字形式给出;为False时,以数值(Num)形式给出。注意当dec=True时,其余参数都将无效。默认dec=False。
5.remainOneMoreDigit(可选,bool):结果是否多保留一位有效数字。默认remainOneMoreDigit=False。
【返回值】
①dec为True时,返回值为float类型的纯数字标准偏差;
②dec为False时:
A.process为False时,返回值为Num类型的标准偏差。
B.process为True且needValue为False时,返回值为LaTeX类型的计算过程。
C.process为True且needValue为True时,返回值为Num类型的标准偏差和LaTeX类型的计算过程组成的元组。'''
if dec:
mean = self.mean(dec=True)
dsum = sum([(ni._Num__value - mean)**2 for ni in self.__arr])
return sqrt(dsum / (len(self.__arr) - 1))
else:
latex = LaTeX()
if process and processWithMean:
mean, lsub = self.mean(process=True, needValue=True)
latex.add(lsub)
else:
mean = self.mean()
dsum = sum([(ni._Num__value - mean._Num__value)**2 for ni in self.__arr])
res = sqrt(dsum / (len(self.__arr) - | |
'''
Author: <NAME>
Date: 3/15/19
Summary:
- Contains computation methods that board.py uses to
manage valid move seeks and piece placement.
- Methods use Numba with jit decorator that precompiles
types and makes runtime faster than normal python.
'''
from numba import jit
import numpy as np
import math
# def dummy_jit(*args, **kwargs):
# def dumdum(f):
# return f
# return dumdum
#
# jit = dummy_jit
#### METHODS FOR check_shifted() ####
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), UniTuple(int64, 2), double)", nopython=True) # "int(int64, ...)"
def rotate_by_deg(index, offset_point, angle):
''' Rotates each point on piece around the index by the given angle
'''
ox, oy = index
px, py = offset_point
new_x = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
new_y = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return int(round(new_x, 1)), int(round(new_y, 1))
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), int64, int64)", nopython=True)
def flip_piece_x(index, x, y):
''' Takes the difference between index x and point x, then applies reverse
difference to the index point. y stays the same
'''
return index[0] - (index[0] - x) * -1, y
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), int64, int64)", nopython=True)
def flip_piece_y(index, x, y):
''' Takes the difference between index y and point y, then applies reverse
difference to the index point. x stays the same
'''
return x, index[1] + (y - index[1]) * -1
@jit("UniTuple(int64, 2)(UniTuple(int64, 2), int64, int64, unicode_type)", nopython=True)
def rotate_piece(index, x_offset, y_offset, piece_orientation):
''' Description: Orients piece around the index point
Parameters:
index: int tuple that specifies the index coordinate on the board (the coord the piece will rotate around)
offset: int tuple that specifies the offset from the index coord for the current cell
piece_orientation: string specifying what new orientation you want the point at
Returns:
2 ints x and y that are the new rotated piece coords
'''
piece_orientation = piece_orientation[:-1] # Takes out last character specifying the shift id (not needed in this method)
x_offset += index[0] # calculates the actual x coord on board
y_offset += index[1] # calculates the actual y coord on board
if piece_orientation == "north":
return rotate_by_deg(index, (x_offset, y_offset), math.radians(270))
elif piece_orientation == "northwest":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(270))
return flip_piece_x(index, new_x, new_y)
elif piece_orientation == "south":
return rotate_by_deg(index, (x_offset, y_offset), math.radians(90))
elif piece_orientation == "southeast":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(90))
return flip_piece_x(index, new_x, new_y)
elif piece_orientation == "west":
return rotate_by_deg(index, (x_offset, y_offset), math.radians(180))
elif piece_orientation == "southwest":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(180))
return flip_piece_y(index, new_x, new_y)
elif piece_orientation == "northeast":
new_x, new_y = rotate_by_deg(index, (x_offset, y_offset), math.radians(0))
return flip_piece_y(index, new_x, new_y)
else: # Default orientation (East)
return rotate_by_deg(index, (x_offset, y_offset), math.radians(0))
@jit("boolean(int64[:, ::1], int64, int64, int64)", nopython=True)
def is_valid_adjacents(board_contents, y, x, player_color):
''' Description: Invalid coord if left, right, bottom, or top cell is the same color as the current player.
Parameters:
board_contents: 20 by 20 numpy matrix representing the current state of the board
x: int x coord of the cell
y: int y coord of the cell
player_color: int representing current player color
Returns:
bool indicating whether the cell is a valid adjacent
'''
valid_adjacent = True
# Excludes top board edge from top cell check
if y != 0:
if board_contents[y - 1][x] == player_color:
valid_adjacent = False
# Excludes left board edge from left cell check
if x != 0:
if board_contents[y][x - 1] == player_color:
valid_adjacent = False
# Excludes bottom board edge from bottom cell check
if y != 19:
if board_contents[y + 1][x] == player_color:
valid_adjacent = False
# Excludes right board edge from right cell check
if x != 19:
if board_contents[y][x + 1] == player_color:
valid_adjacent = False
return valid_adjacent
@jit("boolean(int64[:, ::1], int64, int64, int64)", nopython=True)
def is_valid_cell(board_contents, x, y, player_color):
''' Description: If the cell x, y is empty, has no adjacent cells that are the same color,
and is not out of bounds of the 20x20 board, then the cell is valid
to put a part of a piece on it.
Parameters:
board_contents: 20 by 20 numpy matrix representing the current state of the board
x: int x coord of the cell
y: int y coord of the cell
player_color: int representing current player color
Returns:
bool indicating whether the cell is a valid cell
'''
# Out of bounds check
if x < 0 or x >= 20 or y < 0 or y >= 20:
return False
# Checks if cell is empty and a valid adjacent
if (board_contents[y][x] == 0 and is_valid_adjacents(board_contents, y, x, player_color)):
return True
else:
return False
@jit("int64[:](int64[:, ::1], int64, UniTuple(int64, 2), unicode_type, int64[:, :, ::1])", nopython=True)
def check_shifted(board_contents, player_color, index, orientation, shifted_offsets):
''' Description: Shifts entire piece N times were N is how many cells the piece takes up.
All shifted offsets are checked for the current orientation to see whether
the shifted set of offsets is a valid move.
Parameters:
board_contents: 20 by 20 numpy matrix representing the current state of the board
played_color: int representing current player color
index: int tuple that specifies the index coordinate on the board (the coord the piece will rotate around)
orientation: string specifying which orientation is being checked
shifted_offsets: list of a list of tuples where each element in the main list represents a different set of coords
for a shifted piece.
Returns:
Returns the list of ints representing the shifted offsets ids where the piece can be
placed at that set of shifted offsets
'''
shifted_ids = np.zeros(shifted_offsets.shape[0], np.int64)
num_items = 0
for shifted_id in range(shifted_offsets.shape[0]): # Shift piece N times where N is the number of cells in the piece
valid_placement = True
for offset_id in range(shifted_offsets.shape[1]):
offset = shifted_offsets[shifted_id, offset_id, :]
if offset[0] == 0 and offset[1] == 0: # No need to rotate coord since its the index and there is no offset
if not is_valid_cell(board_contents, index[0], index[1], player_color):
valid_placement = False
else:
new_piece = rotate_piece(index, offset[0], offset[1], orientation)
new_x = new_piece[0]
new_y = new_piece[1]
if not is_valid_cell(board_contents, new_x, new_y, player_color):
valid_placement = False
if valid_placement:
shifted_ids[num_items] = shifted_id
num_items += 1
return shifted_ids[:num_items]
#### METHODS FOR get_all_shifted_offsets() ####
@jit("int64[:, ::1](int64[:, ::1], unicode_type)", nopython=True)
def rotate_default_piece(offsets, orientation):
''' Description: Rotates the initial default piece orientation for shifting.
Parameters:
offsets: numpy array of tuples indicated all corresponding offsets to a specific piece type
orientation: string indicating the orientation to rotate the offset pieces
Returns:
numpy list of all offsets for given orientation
'''
orientation_offsets_to_shift = np.zeros((len(offsets), 2), np.int64)
for index in range(len(offsets)):
if offsets[index][0] == 0 and offsets[index][1] == 0:
orientation_offsets_to_shift[index, :] = (0, 0)
else:
new_coord = rotate_piece((0, 0), offsets[index][0], offsets[index][1], orientation + "!") # adding dummy character to end since rotate ignores last character of orientation
orientation_offsets_to_shift[index, :] = (new_coord[0], new_coord[1])
return orientation_offsets_to_shift
@jit("int64[:, ::1](int64[:, ::1], int64)", nopython=True)
def shift_offsets(offsets, offset_id):
''' Description: Shifts the offsets so that the offset that corresponds to the offset_id is the new index
Parameters:
offsets: numpy array of tuples containing the coords of offsets needing do be shifted
offset_id: identifies which coord becomes the new index in the list of offsets
Returns:
numpy array of tuples containing the newly shifted offsets
'''
shifted_offsets = np.zeros((len(offsets), 2), np.int64)
if offset_id == 0: # No need to shift the offsets for the default piece shape defined in the global space
return offsets
new_origin_y_diff = offsets[offset_id][0]
new_origin_x_diff = offsets[offset_id][1]
for index in range(len(offsets)):
shifted_offsets[index, :] = (offsets[index][0] - new_origin_y_diff, offsets[index][1] - new_origin_x_diff)
return shifted_offsets
@jit("int64[:, :, ::1](int64[:, ::1], unicode_type)", nopython=True)
def get_all_shifted_offsets(offsets, orientation):
''' Description: Compiles a list of all shifted offsets for a piece at a specific orientation.
Returns a numpy array, which is a list of a list of tuples which each contain
a shifted offset.
Parameters:
offsets: numpy array of tuples containing the coords of offsets needing do be shifted
orientation: string specifying the orientation that the shifts should take place
Returns:
list of all shifted orientations a piece can make | |
from http import HTTPStatus
from django.db.models import F, Q, Case, When, Sum, Count
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase, RequestFactory, override_settings
from django.test import TransactionTestCase
from django.urls import reverse
from .. import views
from ..models import Specification, Order, Category, Rate
User = get_user_model()
class HomePageViewTests(TestCase):
fixtures = ['example_shop_data.json']
@classmethod
def setUpTestData(cls):
username, passwd = 'test', '<PASSWORD>'
user = User.objects.create_user(
username=username, password=<PASSWORD>,
)
cls.cart_order = Order.objects.create(
user=user, status=Order.CART,
)
cls.username = username
cls.passwd = <PASSWORD>
cls.user = user
def setUp(self):
self.factory = RequestFactory()
self.specs = Specification.objects.filter(
available_qty__gt=0,
).order_by('-id')
home_url = reverse('shop:home')
self.request = self.factory.get(home_url)
self.url = home_url
def test_context_data(self):
"""Context data for all users include anonymous one"""
n = views.HomePageView.number_of_specs
self.assertGreaterEqual(
len(self.specs), n,
msg=f'There are less than {n} specifications in the test db.',
)
self.request.user = AnonymousUser()
response = views.HomePageView.as_view()(self.request)
self.assertIn('form', response.context_data)
self.assertIn('form_rating', response.context_data)
self.assertIn('form_login', response.context_data)
self.assertIn('form_sign_up', response.context_data)
self.assertNotIn('cart', response.context_data)
self.assertIn('spec_list', response.context_data)
spec_list = list(
response.context_data['spec_list'].values_list('id', flat=True)
)
self.assertEqual(len(spec_list), n)
self.assertEqual(spec_list[0], self.specs[0].id)
def test_cart_context_data(self):
"""
Context data contains the user cart with the quantity and
spec id as the dict key.
A user cart is the instance of Order model with a cart status.
First check if there are no items in the cart, then add item.
"""
spec = self.specs.first()
self.assertIsNotNone(spec, msg='No specification')
self.request.user = self.user
response = views.HomePageView.as_view()(self.request)
self.assertIn('cart', response.context_data)
self.assertFalse(response.context_data['cart'])
defaults = {'quantity': spec.pre_packing, 'price': spec.price}
self.cart_order.specs.add(spec, through_defaults=defaults)
response = views.HomePageView.as_view()(self.request)
self.assertIsInstance(response.context_data['cart'], dict)
self.assertIn((spec.id, defaults['quantity']),
response.context_data['cart'].items())
def test_get_home_page(self):
"""Display home page on GET."""
response = self.client.get(self.url, follow=True)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_add_item_to_cart(self):
"""Logged-in user add items to his cart."""
n = 2
spec_list = list(self.specs[:n])
self.assertGreaterEqual(
len(spec_list), n, msg=f'Less than {n} specs in test db.',
)
self.client.force_login(self.user)
response = self.client.get(self.url, secure=True)
self.assertEqual(response.status_code, HTTPStatus.OK)
num_in_cart = len(response.context['cart'])
for spec in spec_list:
num_in_cart += 1
data = {'specification': spec.id, 'quantity': spec.pre_packing}
response = self.client.post(
self.url, data=data, secure=True, follow=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIn(spec.id, response.context['cart'])
self.assertEqual(
len(response.context['cart']), num_in_cart,
msg=f'The number of items is not equal to {num_in_cart}.'
)
class SearchViewTests(TestCase):
fixtures = ['example_shop_data.json']
def setUp(self):
self.url = reverse('shop:search')
def test_parent_category_search_result(self):
"""
Returns all products from the parent category whose name
was specified in the user search query.
"""
category = Category.objects.filter(
category__isnull=True,
).first()
self.assertIsNotNone(category, msg='No category in test db')
category_names = list(
category.categories.order_by().values_list('name', flat=True)
) + [category.name]
response = self.client.get(
self.url, {'q': category_names[-1]}, secure=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
for spec in response.context[views.SearchView.context_object_name]:
self.assertIn(spec.category.name, category_names)
def test_category_search_result(self):
"""
Returns all products from the category whose name
was specified in the user search query.
"""
spec = Specification.objects.filter(
available_qty__gt=0,
).select_related('category').last()
self.assertIsNotNone(spec, msg='No spec in test db')
category = spec.category
response = self.client.get(
self.url, {'q': category.name}, secure=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
for spec in response.context[views.SearchView.context_object_name]:
self.assertEqual(spec.category.name, category.name)
def test_category_and_product_search_result(self):
"""
Returns the product that was specified in the user search query.
"""
spec = Specification.objects.filter(
available_qty__gt=0,
).last()
self.assertIsNotNone(spec, msg='No spec in test db')
product = spec.content_object
q = (f'{product.category.name} {product.name} '
f'{product.marking}')
response = self.client.get(
self.url, {'q': q}, secure=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
spec_list = list(
response.context[views.SearchView.context_object_name]
)
self.assertTrue(
spec_list, msg="The search didn't find any products.",
)
self.assertEqual(
spec_list[0].object_id, product.id,
msg=(f'"{product}" should comes first on the list '
f'as best matching the query "{q}". '
f'Results: {", ".join(map(str, spec_list))}'),
)
def test_product_search_result(self):
"""
Returns the products that best match the user search query
without specifying a category.
"""
spec = Specification.objects.filter(
available_qty__gt=0,
).last()
self.assertIsNotNone(spec, msg='No spec in test db')
product = spec.content_object
q = f'{product.name} {product.marking} {spec.tag}'
response = self.client.get(
self.url, {'q': q}, secure=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
spec_list = list(
response.context[views.SearchView.context_object_name]
)
self.assertTrue(
spec_list, msg="The search didn't find any products.",
)
self.assertEqual(
spec_list[0].id, spec.id,
msg=(f'"{spec}" should comes first on the list '
f'as best matching the query "{q}". '
f'Results: {", ".join(map(str, spec_list))}'),
)
class CategorySpecListTests(TestCase):
fixtures = ['example_shop_data.json']
def setUp(self):
self.category_qs = Category.objects.filter(
category__isnull=True,
)
def test_get_category_page(self):
"""Display the same content type products from a category."""
category = self.category_qs.exclude(
Q(categories__content_type_id__lt=F('content_type_id')) |
Q(categories__content_type_id__gt=F('content_type_id')),
).first()
kwargs = {'category': str(category.name).lower()}
response = self.client.get(
reverse('shop:category', kwargs=kwargs),
follow=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_context_data(self):
"""
Context data for a page with a product list from a category.
Context contains different content type products.
"""
factory = RequestFactory()
category = self.category_qs.filter(
Q(categories__content_type_id__lt=F('content_type_id')) |
Q(categories__content_type_id__gt=F('content_type_id')),
).first()
self.assertIsNotNone(
category, msg='the test data needs category with'
'different content type products.'
)
kwargs = {'category': str(category.name).lower()}
request = factory.get(reverse('shop:category', kwargs=kwargs))
request.user = AnonymousUser()
view = views.CategorySpecList.as_view(paginate_by=None)
response = view(request, **kwargs)
self.assertIn('category', response.context_data)
self.assertEqual(response.context_data['category'], category)
self.assertIn('spec_list', response.context_data)
n = Specification.objects.filter(
available_qty__gt=0,
category__category_id=category.id,
).count()
spec_list = list(response.context_data['spec_list'])
self.assertEqual(len(spec_list), n)
class SubcategorySpecListTests(TestCase):
fixtures = ['example_shop_data.json']
def setUp(self):
self.category_qs = Category.objects.filter(
category__isnull=False,
).select_related('category')
def test_get_subcategory_page(self):
"""Display a product list from a subcategory."""
category = self.category_qs.first()
kwargs = {'category': str(category.category.name).lower(),
'subcategory': str(category.name).lower()}
response = self.client.get(
reverse('shop:subcategory', kwargs=kwargs),
follow=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIn('category', response.context)
self.assertEqual(response.context['category'], category)
class SpecificationDetailTests(TestCase):
fixtures = ['example_shop_data.json']
@classmethod
def setUpTestData(cls):
username, passwd = '<PASSWORD>', '<PASSWORD>'
cls.username = username
cls.passwd = <PASSWORD>
cls.user = User.objects.create_user(
username=username, password=<PASSWORD>,
)
def setUp(self):
self.factory = RequestFactory()
spec = Specification.objects.select_related(
'category__category',
).first()
kwargs = {
'category': str(spec.category.category.name).lower(),
'subcategory': str(spec.category.name).lower(),
'pk': str(spec.id),
}
self.url = reverse('shop:spec_detail', kwargs=kwargs)
self.url_kwargs = kwargs
self.spec = spec
def test_get_template_names(self):
"""
Method get_template_names() adds a product model name to
a template name and returns a list of template names.
"""
spec = self.spec
kwargs = self.url_kwargs
self.assertIsNotNone(spec, msg='No specification')
request = self.factory.get(self.url)
request.user = self.user
response = views.SpecificationDetail.as_view()(request, **kwargs)
t_name = f'shop/spec_detail/spec_{spec.content_type.model}.html'
self.assertEqual(response.template_name[0], t_name)
def test_context_data_with_rating_list(self):
"""
Context data contains queryset with ratings from users.
"""
spec = self.spec
kwargs = self.url_kwargs
rating_obj = Rate.objects.create(
point=Rate.PointValue.one, review='test',
user=self.user, object_id=spec.object_id,
content_type_id=spec.content_type_id,
)
request = self.factory.get(self.url)
request.user = self.user
response = views.SpecificationDetail.as_view()(request, **kwargs)
self.assertIn('rating_list', response.context_data)
self.assertEqual(
rating_obj, response.context_data['rating_list'].first(),
)
def test_get_spec_detail(self):
"""Display specification and product details on GET."""
spec = self.spec
response = self.client.get(self.url, follow=True)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIn('spec', response.context_data)
self.assertEqual(response.context_data['spec'].id, spec.id)
self.assertTrue(
hasattr(response.context_data['spec'], 'product'),
msg='Product was not prefetched to spec attribute.',
)
def test_rate_and_review_product(self):
"""Logged-in user rate and review product on detail page."""
spec = self.spec
data = {'point': Rate.PointValue.one, 'review': 'test',
'content_type': spec.content_type_id,
'object_id': spec.object_id}
self.assertFalse(
Rate.objects.filter(user=self.user, **data).exists(),
msg='The rating is already in the database.',
)
self.client.force_login(self.user)
response = self.client.post(
self.url, data=data, secure=True, follow=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertTrue(
Rate.objects.filter(user=self.user, **data).exists(),
msg='The rating was not created in the database.',
)
def test_add_item_to_cart(self):
"""Logged-in user add product to cart on detail page."""
spec = self.spec
data = {'specification': spec.id,
'quantity': spec.pre_packing}
item = Order.specs.through.objects.filter(
order__user=self.user, order__status=Order.CART, **data,
)
self.assertFalse(
item.exists(), msg='The item is already in cart.',
)
self.client.force_login(self.user)
response = self.client.post(
self.url, data=data, secure=True, follow=True,
)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertTrue(
item.exists(), msg='The item was not created in the database.',
)
class CartViewTests(TestCase):
fixtures = ['example_shop_data.json']
@classmethod
def setUpTestData(cls):
username, passwd = '<PASSWORD>', '<PASSWORD>'
user = User.objects.create_user(
username=username, password=<PASSWORD>,
)
cls.cart_order = Order.objects.create(
user=user, status=Order.CART,
)
cls.user = user
def setUp(self):
self.specs = Specification.objects.filter(
available_qty__gt=0,
)
self.url = reverse('shop:cart')
self.items = Order.specs.through.objects.filter(
order_id=self.cart_order.id,
)
def test_get_cart_page(self):
"""Display cart items with order cost."""
spec = self.specs.first()
self.assertIsNotNone(spec, msg='No specification in test db.')
item = Order.specs.through.objects.create(
order=self.cart_order, specification=spec,
quantity=spec.pre_packing, price=spec.price,
)
cart_aggr = self.cart_order.specs.annotate(
best_price=Case(
When(sale_price__gt=0, then=F('sale_price')),
When(discount__gt=0, then=F('discount_price')),
default=F('price'),
),
).aggregate(
order_cost=Sum('best_price'), item_count=Count('id'),
)
self.client.force_login(self.user)
response = self.client.get(self.url, secure=True, follow=True)
self.assertEqual(response.status_code, HTTPStatus.OK)
context = response.context
self.assertIn(item, context['cart'])
self.assertEqual(len(context['cart']), cart_aggr['item_count'])
self.assertEqual(context['num_in_cart'], cart_aggr['item_count'])
self.assertEqual(context['order'], self.cart_order)
self.assertEqual(context['order_cost'], cart_aggr['order_cost'])
class CartItemFormViewTests(TestCase):
fixtures = ['example_shop_data.json']
@classmethod
def setUpTestData(cls):
username, passwd = '<PASSWORD>', '<PASSWORD>'
user = User.objects.create_user(
username=username, password=<PASSWORD>,
)
cls.cart_order = Order.objects.create(
user=user, status=Order.CART,
)
cls.user = user
def setUp(self):
self.specs = Specification.objects.filter(
available_qty__gt=0,
).order_by('-id')
self.url = reverse('shop:add_to_cart')
self.items = Order.specs.through.objects.filter(
order_id=self.cart_order.id,
)
@override_settings(SECURE_SSL_REDIRECT=False)
def test_add_to_cart_for_anonymous_user(self):
"""
Only a logged-in user can add products to his cart,
anonymous users will be redirected to the login page.
"""
spec = self.specs[0]
data = {'specification': spec.id,
'quantity': spec.pre_packing}
response = self.client.post(self.url, data=data)
self.assertRedirects(
response, f'{reverse("shop:login")}?next={self.url}'
)
def test_add_item_to_cart(self):
"""Logged-in user add items to his cart."""
n = 2
spec_list = list(self.specs[:n])
self.assertGreaterEqual(
len(spec_list), n, msg=f'Less than {n} specs in test db.',
)
self.client.force_login(self.user)
for n, spec in enumerate(spec_list, 1):
data = {'specification': spec.id,
'quantity': spec.pre_packing}
item_qs = self.items.filter(specification=spec)
self.assertFalse(
item_qs.exists(), msg='The item is already in | |
'+str(ic))
connect('button_press_event',self.closewin)
show()
def histDialog(self):
"histogramDialog(self) - dialog to setup histogram plot"
import Tkinter
if self.histFrame != None: self.histDone()
top=Toplevel()
self.histFrame=top
fm = Frame(top,borderwidth=0)
top.title('Histogram Dialog...')
self.histVar = [StringVar(),IntVar(),IntVar()]
Label(fm,text='Curve # [1-'+str(self.nc)+']:').grid(row=1,column=1,sticky=W)
asply = tuple(range(1,self.nc+1))
self.histVar[0] = Pmw.ComboBox(fm,label_text='Pick:',
labelpos=W, listbox_width=5,dropdown=1,
scrolledlist_items=asply)
self.histVar[0].grid(row=1,column=2,sticky=W)
self.histVar[0].selectitem(0)
Label(fm,text='Number of bins:').grid(row=2,column=1,sticky=W)
Entry(fm,width=10,textvariable=self.histVar[1]).grid(row=2,column=2,sticky=W)
Label(fm,text='Horizontal:').grid(row=3,column=1,sticky=W)
Checkbutton(fm,variable=self.histVar[2],state=NORMAL).grid(row=3,column=2,sticky=W)
self.histVar[1].set(20)
Tkinter.Button(fm,text='Accept',command=self.histRun).grid(row=4,column=1,stick=W)
Tkinter.Button(fm,text='Close',command=self.histDone).grid(row=4,column=2,stick=W)
fm.pack(fill=BOTH)
def histRun(self):
"histRun(self) - do histogram plot for selected curve"
ic = string.atoi(self.histVar[0].get())
if ic > 0 and ic < self.nc:
nbin = self.histVar[1].get()
hz = self.histVar[2].get()
self.fig = self.fig+1
figure(self.fig,figsize=(5.5,4))
y = self.y
if hz:
n,bins,patches = hist(y[ic-1],nbin,orientation='horizontal')
xlabel('Occurance')
else:
n,bins,patches = hist(y[ic-1], nbin)
ylabel('Occurance')
title('Curve # '+str(ic)+': Histogram for bin='+str(nbin))
connect('button_press_event',self.closewin)
show()
print n
def histDone(self):
"histDone(self) - close histogram dialog"
self.histFrame.destroy()
self.histFrame = None
def setupXrange(self,title=None):
"setupXrange(self) - dialog to reset X axis data range"
import Tkinter
top=Toplevel()
self.setXFrame=top
fm = Frame(top,borderwidth=0)
if title == None: ntitle='Set New Data XRange...'
else: ntitle = title
top.title(ntitle)
self.xVar = [StringVar(),StringVar()]
Label(fm,text='Plot Start Coordinate X[0]:').grid(row=1,column=1,sticky=W)
Entry(fm,width=20,textvariable=self.xVar[0]).grid(row=1,column=2,sticky=W)
sz = len(self.x)
Label(fm,text='Plot Stop Coordinate X['+str(sz-1)+']:').grid(row=2,column=1,sticky=W)
Entry(fm,width=20,textvariable=self.xVar[1]).grid(row=2,column=2,sticky=W)
self.xVar[0].set(0)
self.xVar[1].set(sz-1)
Tkinter.Button(fm,text='Close',command=self.setupXrangeDone).grid(row=4,column=1,stick=W)
Tkinter.Button(fm,text='Accept',command=self.setupXrangeRun).grid(row=4,column=2,stick=W)
Tkinter.Button(fm,text='Reset',command=self.setupXrangeReset).grid(row=2,column=3,stick=W)
# if title != None:
# Tkinter.Button(fm,text='Functions Fit...',command=self.otherfit).grid(row=4,column=3,stick=W)
fm.pack(fill=BOTH)
def setupXrangeReset(self):
"setupXrangeReset(self) - set X range value"
self.xVar[0].set(str(self.xcord[0]))
self.xVar[1].set(str(self.xcord[self.NPT-1]))
def setupXrangeDone(self):
"setupXrangeDone(self) - close X range dialog"
self.setXFrame.destroy()
def setupXrangeRun(self):
"setupXrangeRun(self) - accept and setup X range"
x1 = string.atof(self.xVar[0].get())
x2 = string.atof(self.xVar[1].get())
dx = (x2-x1)/(self.NPT-1)
x = arange(x1,x2+.001,dx)
y = self.y
self.initfields(x,y)
def fittingNumDialog(self):
"fittingNumDialog(self) - dialog to enter curve # for fitting"
import Tkinter
if self.fitFrame != None: self.fittingDone()
top=Toplevel()
self.fitFrame=top
fm = Frame(top,borderwidth=0)
top.title('Least Square Fitting Dialog...')
self.fitVar = [IntVar(),IntVar(),StringVar(),StringVar(),StringVar(),StringVar()]
Label(fm,text='Curve # to be fitted').grid(row=1,column=1,sticky=W)
asply = tuple(range(1,self.nc+1))
self.fitVar[0] = Pmw.ComboBox(fm,label_text='[1-'+str(self.nc)+'] Pick:',
labelpos=W, listbox_width=5,dropdown=1,
scrolledlist_items=asply)
self.fitVar[0].grid(row=1,column=2,sticky=W)
self.fitVar[0].selectitem(0)
Label(fm,text='Polynomial order #:').grid(row=2,column=1,sticky=W)
Entry(fm,width=10,textvariable=self.fitVar[1]).grid(row=2,column=2,sticky=W)
Tkinter.Button(fm,text='Polynomial Fit...',command=self.fittingRun).grid(row=4,column=1,stick=W)
# Tkinter.Button(fm,text='Functions Fit...',command=self.otherfit).grid(row=4,column=2,stick=W)
Tkinter.Button(fm,text='Close',command=self.fittingDone).grid(row=4,column=3,stick=W)
# Tkinter.Button(fm,text='Help...',command=self.fittingHelp).grid(row=5,column=1,stick=W)
# Tkinter.Button(fm,text='Try New Fit Xrange...',command=self.otherxfit).grid(row=5,column=2,stick=W)
Label(fm,text='Output Title:').grid(row=15,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[2]).grid(row=15,column=2,sticky=W)
Label(fm,text='Output Xlabel:').grid(row=16,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[3]).grid(row=16,column=2,sticky=W)
Label(fm,text='Output Ylabel:').grid(row=17,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[4]).grid(row=17,column=2,sticky=W)
Label(fm,text='Ouput Fitting Coeffs:').grid(row=18,column=1,sticky=W)
Entry(fm,width=40,textvariable=self.fitVar[5]).grid(row=18,column=2,sticky=W)
# self.fitVar[0].set(1)
self.fitVar[1].set(2)
self.fitVar[2].set('Fitting Result Curve #')
self.fitVar[3].set('Polynomial Power')
self.fitVar[4].set('Polynomial Regression')
fm.pack(fill=BOTH)
def fittingDone(self):
"fittingDone(self) - close fitting dialog"
self.fitFrame.destroy()
self.fitFrame = None
def fittingHelp(self):
'fittingHelp(self) - help on fitting dialog'
text = 'Polynomial Fit... - use curve # and order # to do polynomial fit\n-->Functions Fit... - use default X value in various fitting functions\n --> Try New Fit Xrange... - if fit failed with default X values use the Xrange dialog to try oher X range'
self.message(nm=text,info='Fitting Info')
def otherxfit(self):
'otherxfit(self) - try fit with different X range'
if self.setXFrame != None: self.setXFrame.destroy()
self.setupXrange(title='Try New Fitting X Range')
self.xVar[0].set('-'+self.xVar[1].get())
def otherfit(self):
'otherfit(self) - pop up Least Square fit dialog'
id = string.atoi(self.fitVar[0].get())
if id > 0 and id <= self.nc:
x = self.x
y = self.y[id-1]
x1 = string.atof(self.xmin.get())
x2 = string.atof(self.xmax.get())
i1 = 0
for i in range(self.NPT):
if x[i] <= x1:
i1 = i
else:
break
for i in range(i1,self.NPT):
if x[i] <= x2:
i2 = i
else:
break
data = []
for k in range(i1,i2+1):
data.append( (x[k],y[k]) )
Fit = FitDialog(self.fitFrame)
Fit.x = x
Fit.y = y
Fit.data = data
Fit.legd = 0
Fit.Wtitle = 'Curve # '+str(id)
Fit.createDialog(self.fitFrame)
def fittingRun(self):
"fittingRun(self) - accept power and curve # to do polynomial fit"
id = string.atoi(self.fitVar[0].get())
pow = self.fitVar[1].get()
if id > 0 and id < self.nc:
x = self.x
y = self.y[id-1]
x1 = string.atof(self.xmin.get())
x2 = string.atof(self.xmax.get())
i1 = 0
for i in range(self.NPT):
if x[i] <= x1:
i1 = i
else:
break
for i in range(i1,self.NPT):
if x[i] <= x2:
i2 = i
else:
break
x = x[i1:i2+1]
y = y[i1:i2+1]
# linear polynomial fit
coeffs = polyfit(x,y,pow)
self.fitVar[5].set(str(coeffs))
z = polyval(coeffs,x)
self.fig=self.fig+1
figure(self.fig,figsize=(5.5,4))
plot(x,y,'b+', x, z ,'-k',linewidth=1)
tit = self.fitVar[2].get() +' ' + str(id)
title(tit)
xtit = self.fitVar[3].get() + ' '+str(pow)
xlabel(xtit)
ytit = self.fitVar[4].get()
ylabel(ytit)
gl = self.toggleGridVar.get()
grid(gl)
connect('button_press_event',self.closewin)
show()
def closewin(self,event):
'closewin(self,event) - right mouse button to close plot window'
if event.button == 3: close()
def closeall(self):
'closeall(self) - close all plot windows'
close('all')
self.fig = 0
def printer(self):
'printer(self) - dialog to set up printer'
from tv import setupPrinter
root=self.interior()
dialog = setupPrinter(root)
def Print(self):
'Print(self) - save plot to plot1d.png and send to PS printer'
from plot2d import printPicture
savefig('plot1d.png')
ptr = self.SH['printer']
printPicture('plot1d.png',ptr)
def doneLeg(self):
'doneLeg(self) - close setup legend dialog'
self.legFrame.destroy()
self.legFrame = None
def toggleLeg(self):
"toggleLeg(self) - get default legend position"
self.legloc = self.legVar.get()
self.doneLeg()
try:
if self.legOn: legend(self.labels,loc=legends[self.legloc])
except:
pass
def pickLegpos(self):
"pickLegpos(self) - dialog to pick legend position"
import Tkinter
if self.legFrame != None: self.doneLeg()
top=Toplevel()
self.legFrame=top
fm = Frame(top,borderwidth=0)
var = IntVar()
for i in range(len(legends)):
Radiobutton(fm,text=legends[i],value=i,variable=var,
command=self.toggleLeg,
indicatoron=1).pack(anchor=W)
var.set(0)
self.legVar= var
fm.pack(fill=BOTH)
def toggleLegend(self):
"toggleLegend(self) - set Legend on or off"
self.legOn = self.toggleLegVar.get()
def toggleSym(self):
"toggleSym(self) - set symbols on or off"
self.symOn = self.toggleSymVar.get()
def toggleGrid(self):
"toggleGrid(self) - set grid line on or off"
gl = self.toggleGridVar.get()
grid(gl)
def toggleSty(self):
"toggleSty(self) - set line style on or off"
self.styOn = self.toggleStyVar.get()
def getlegpos(self):
"getlegpos(self) - get and set new legposition"
locx = self.locx.get()
locy = self.locy.get()
self.legFrame.destroy()
self.legFrame = None
try:
loc = (string.atof(locx),string.atof(locy))
if self.legOn: legend(self.labels, loc=loc)
except:
pass
def setlegpos(self):
"setlegpos(self) - dialog to set legend position"
import Tkinter
if self.legFrame != None: return
top=Toplevel()
top.title('Enter Legend Location')
self.legFrame=top
fm = Frame(top,borderwidth=0)
self.locx,self.locy = StringVar(), StringVar()
Label(fm,text='ENTER LEGEND LOCATION').grid(row=0,column=1,sticky=W)
Label(fm,text='Normalized X loc[0-1]:').grid(row=1,column=1,sticky=W)
Entry(fm,width=5,textvariable=self.locx).grid(row=1,column=2,sticky=W)
self.locx.set(0.8)
Label(fm,text='Normalized Y loc[0-1]:').grid(row=2,column=1,sticky=W)
Entry(fm,width=5,textvariable=self.locy).grid(row=2,column=2,sticky=W)
self.locy.set(0.8)
fm.pack(fill=BOTH)
Tkinter.Button(top,text='OK',command=self.getlegpos).pack()
def getsymbols(self):
"getsymbols(self) - get and set new symbols"
self.spp = string.atoi(self.sppVar.get())
if self.spp < 1: self.spp = 1
for i in range(len(symbols)):
symbols[i] = self.sym[i].get()
def getsymbolClose(self):
'getsymbolClose(self) - close symbol dialog'
self.SymFrame.destroy()
def setsymbols(self):
"setsymbols(self) - dialog to modify and set new symbols"
import Tkinter
top=Toplevel()
top.title('Symbol Definition')
self.SymFrame=top
sym=[]
for i in range(len(symbols)):
sym.append(StringVar())
fm = Frame(top,borderwidth=0)
for i in range(len(symbols)):
Label(fm,text='symbol for line '+str(i+1)).grid(row=i,column=1,sticky=W)
Entry(fm,width=1,textvariable=sym[i]).grid(row=i,column=2,sticky=W)
sym[i].set(symbols[i])
self.sym = sym
Label(fm,text='DataSteps/symbol').grid(row=20,column=1,sticky=W)
self.sppVar = StringVar()
Entry(fm,width=5,textvariable=self.sppVar).grid(row=20,column=2,sticky=W)
self.sppVar.set(str(self.spp))
fm.pack(fill=BOTH)
fm1 = Frame(top,borderwidth=1)
Tkinter.Button(fm1,text=' OK ',command=self.getsymbols).pack(side=LEFT)
Tkinter.Button(fm1,text='Cancel',command=self.getsymbolClose).pack(side=LEFT)
fm1.pack(fill=BOTH)
def helpinfo(self):
"helpinfo(self) - display plot1d_help.txt with scrolled text"
fname = os.environ['PYTHONSTARTUP']+os.sep+'plot1d_help.txt'
top = Toplevel()
st = Pmw.ScrolledText(top,borderframe=1,labelpos=N,
label_text=fname,usehullsize=1,
hull_width=600,hull_height=400,
text_padx=10,text_pady=10,
text_wrap='none')
st.importfile(fname)
st.pack(fill=BOTH, expand=1, padx=1, pady=1)
def getlegend(self):
"getlegend(self) - dialog to set legends for plot at most 85"
from plotAscii import GetLegends,loadpvs
V = 85*['']
for i in range(self.nc):
V[i] = 'D_'+str(i+1)
file='pvs'
fd = open(file,'w')
fd.write(str(V))
fd.close()
top = self.form
GetLegends(top)
self.pvs = loadpvs()
def displayFile(self):
"displayFile(self) - display picked text file"
if self.fname != '': xdisplayfile(self.fname)
def rowdata(self):
"rowdata(self) - extract x,y vectors from row oriented text array"
try:
data = self.data
nc = len(data)
NPT = len(data[0])
self.NPT = NPT
try:
xid = int(self.xrid.get())
yid = int(self.yrid.get())
except ValueError:
self.message(nm='Row data array - X row #:\nonly single integer # allowed')
return
if xid < 0 or xid >= nc:
x = range(NPT)
y = data[0:nc]
else:
x = data[xid]
y = []
for i in range(nc):
if i >= yid:
y.append(data[i])
self.initfields(x,y)
except AttributeError:
self.message()
return
def columndata(self):
"columndata(self) - extract x,y vectors from column oriented text array"
try:
from plotAscii import transposeA
data = self.data
NPT = len(data)
self.NPT = NPT
NC = len(data[0])
if NC <= 1:
print 'bad file'
return
self.W[0].setentry(self.fname)
da = transposeA(data)
try:
xid = int(self.xcid.get())
yid = int(self.ycid.get())
except ValueError:
self.message(nm='Column: X col #:\nonly single integer # allowed')
return
if xid < 0:
x = range(NPT)
y = da[0:NC]
else:
x = da[xid]
y=[]
for i in range(NC):
if i >= yid:
y.append(da[i])
self.initfields(x,y)
self.xcord = x
except AttributeError:
self.message()
return
def pickMDA(self):
'pickMDA(self) - dialog to pick MDA file and load 1D array into memory'
import tkFileDialog
from readMDA import *
fname = tkFileDialog.askopenfilename( initialdir=self.mdapath,
filetypes=[("MDA File", '.mda'),
("All Files","*")])
if fname == (): return
(self.mdapath, fn) = os.path.split(fname)
self.mdafile = fn # fname
self.W[0].setentry(fname)
d = readMDA(fname,maxdim=1)
try:
if d[1].nd> 0:
# print '1D data found'
self.W[1].setentry(d[1].p[0].fieldName)
x = d[1].p[0].data
self.NPT = len(x)
data = []
labels = []
for i in range(d[1].nd):
data.append(array(d[1].d[i].data))
labels.append(d[1].d[i].name)
self.pvs = labels
self.initfields(x,data)
self.xcord = x
except IndexError:
pass
def pickFile(self):
"pickFile(self) - dialog to pick a text data file"
from plotAscii import readArray
import tkFileDialog
fname = tkFileDialog.askopenfilename(initialdir=self.txtpath,
initialfile='*.txt')
if fname ==(): return
self.fname = fname
data = readArray(fname)
self.data = data
self.columndata()
def initfields(self,x,y):
"initfields(self,x,y) - initialize X,Y ranges fields from x,y vectors"
self.x = x
self.y = y
self.nc = len(y)
xmax = max(x)
xmin = min(x)
self.Rg[0] = xmin
self.Rg[1] = xmax
ymin,ymax = minmax(y)
self.Rg[2] = ymin
self.Rg[3] = ymax
self.xmin.setentry(str(xmin))
self.xmax.setentry(str(xmax))
self.ymin.setentry(str(ymin))
self.ymax.setentry(str(ymax))
self.createCB()
def createCB(self):
"createCB(self) - update CheckButtons to reflect the defined y vectors"
nc = self.nc # 85
checkD=[]
var =[]
for i in range(nc):
di = str(i+1)
var.append(IntVar())
if i < 2:
var[i].set(1)
if i > 9:
ii = i % 10
ij = i / 10
checkD.append((di,ij,ii,NORMAL))
else:
checkD.append((di,0,i,NORMAL))
self.var = var
if self.CBframe != -1: self.CBframe.destroy()
frame = Frame(self.form,borderwidth=0)
for i in range(nc):
Checkbutton(frame,text=checkD[i][0],state=checkD[i][3], anchor=W,
variable=var[i]).grid(row=checkD[i][1],column=checkD[i][2],sticky=W)
frame.pack()
self.CBframe = frame
# self.getCB()
def getCB(self):
"getCB(self) - get the state of all checked buttons"
value=[]
for i in range(self.nc):
value.append(self.var[i].get())
return value
def CBallon(self):
"CBallon(self) - select all check buttons for Y vectors"
if self.nc > 1:
for i in range(self.nc):
self.var[i].set(1)
def CBalloff(self):
"CBalloff(self) - unselect all check buttons for Y vectors"
if self.nc > 1:
for i in range(self.nc):
self.var[i].set(0)
def settitle(self):
"settitle(self) - update the title of plot figure"
title(self.W[0].get())
def setxlabel(self):
"setxlabel(self) - update the xlabel of plot figure"
xlabel(self.W[1].get())
def setylabel(self):
"setylabel(self) - update the ylabel of plot figure"
ylabel(self.W[2].get())
def createFields(self):
"createFields(self) - create modifiable control fields for plot"
self.form = self.interior()
self.W = [StringVar(),StringVar(),StringVar()]
self.W[0] = Pmw.EntryField(self.form,labelpos=W,value=self.title,
label_text = 'Title', command=self.settitle)
self.W[1] = Pmw.EntryField(self.form,labelpos=W,value=self.xlabel,
label_text = 'Xlabel', command=self.setxlabel)
self.W[2] = Pmw.EntryField(self.form,labelpos=W,value=self.ylabel,
label_text = 'Ylabel', command=self.setylabel)
self.W[0].pack(fill=X)
self.W[1].pack(fill=X)
self.W[2].pack(fill=X)
frame = Frame(self.form,borderwidth=0)
self.xmin = Pmw.EntryField(frame,labelpos=W,value=self.Rg[0],
label_text = 'Xrange Xmin:', command=self.setxlimit,
# validate = {'validator':'real','min':0,'max':100,'minstrict':0}
)
self.xmax = Pmw.EntryField(frame,labelpos=W,value=self.Rg[1],
label_text = ' Xmax:', command=self.setxlimit
)
self.xmin.grid(row=1,column=1,sticky=W)
self.xmax.grid(row=1,column=2,sticky=W)
self.ymin = Pmw.EntryField(frame,labelpos=W,value=self.Rg[2],
label_text = 'Yrange Ymin:', command=self.setylimit,)
self.ymax = Pmw.EntryField(frame,labelpos=W,value=self.Rg[3],
label_text = ' Ymax:', command=self.setylimit)
self.ymin.grid(row=2,column=1,sticky=W)
self.ymax.grid(row=2,column=2,sticky=W)
self.xcid = Pmw.EntryField(frame,labelpos=W,value='0',
command=self.setxcid,
label_text = 'Column Data: X col #:')
self.ycid = Pmw.EntryField(frame,labelpos=W,value='1',
label_text = ' Start Y col #:')
self.xrid = Pmw.EntryField(frame,labelpos=W,value='-1',
command=self.setxrid,
label_text = ' Row data: X row #:')
self.yrid = Pmw.EntryField(frame,labelpos=W,value='0',
label_text = ' Start Y row #:')
self.xcid.grid(row=3,column=1,sticky=W)
self.ycid.grid(row=4,column=1,sticky=W)
self.xrid.grid(row=5,column=1,sticky=W)
self.yrid.grid(row=6,column=1,sticky=W)
frame.pack()
def startup(self):
"startup(self) - initialize variables at object plot1d creation"
from plotAscii import readST,loadpvs,initSH
self.CBframe = -1
self.nc = -1
self.fig = 0
self.symOn = 0
self.legOn = 1
self.spp = 1
self.styOn = 0
self.legloc = 0
self.pvs = loadpvs()
self.linestyles = linestyles
self.colors = | |
CalcLines(dbase, display_repl, _gui._locale, _gui._nd)
self.__blank_father = None
self.__blank_mother = None
self.__blank_father = \
self.__calc_l.calc_lines(None, None, self.disp_father)
self.__blank_mother = \
self.__calc_l.calc_lines(None, None, self.disp_mother)
self.__blank_marriage = \
self.__calc_l.calc_lines(None, None, self.disp_marr)
def calc_person(self, index, indi_handle, fams_handle):
working_lines = ""
if index[1] % 2 == 0 or (index[1] == 1 and self.center_use == 0):
if indi_handle == fams_handle is None:
working_lines = self.__calc_l.calc_lines(
None, None, self.disp_father)
else:
working_lines = self.disp_father
else:
if indi_handle == fams_handle is None:
working_lines = self.__calc_l.calc_lines(
None, None, self.disp_mother)
else:
working_lines = self.disp_mother
if indi_handle == fams_handle is None:
return working_lines
else:
return self.__calc_l.calc_lines(indi_handle, fams_handle,
working_lines)
def calc_marriage(self, indi_handle, fams_handle):
if indi_handle == fams_handle is None:
return self.__blank_marriage
else:
return self.__calc_l.calc_lines(indi_handle, fams_handle,
self.disp_marr)
#------------------------------------------------------------------------
#
# Class MakeAncestorTree
#
#------------------------------------------------------------------------
class MakeAncestorTree(AscendPerson):
"""
The main procedure to use recursion to make the tree based off of a person.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
_gui = GuiConnect()
max_pedigree = _gui.get_val('maxpedigree')
fill_out = _gui.get_val('fill_out')
AscendPerson.__init__(self, dbase, max_pedigree, fill_out)
self.database = dbase
self.canvas = canvas
self.left_to_right = False
self.bold_direct = _gui.get_val('bolddirect')
self.inlc_marr = _gui.get_val('inc_marr')
self.inc_sib = self.left_to_right and _gui.get_val('show_parents')
self.compress_tree = _gui.get_val('compress_tree')
self.center_family = None
self.lines = [None] * (max_pedigree + 1)
self.max_generation = 0
self.center_boxes = [None] * 4
self.calc_items = CalcItems(self.database)
def get_center_boxes(self):
return self.center_boxes
def add_person(self, index, indi_handle, fams_handle):
""" Makes a person box and add that person into the Canvas. """
#print str(index) + " add_person " + str(indi_handle)
myself = PersonBox((index[0] - 1,) + index[1:], False)
if self.bold_direct:
myself.set_bold()
if index[LVL_GEN] == 1: # Center Person
self.center_family = fams_handle
if index[LVL_GEN] > self.max_generation:
self.max_generation = index[LVL_GEN]
myself.text = self.calc_items.calc_person(index,
indi_handle, fams_handle)
# myself.text[0] = myself.text[0] + ' ' + repr(index) # for debugging
if indi_handle is not None: # None is legal for an empty box
myself.add_mark(self.database,
self.database.get_person_from_handle(indi_handle))
self.canvas.add_box(myself)
#make the lines
indx = index[LVL_GEN]
self.lines[indx] = myself
if indx > 1:
if self.left_to_right:
if self.lines[indx - 1].line_to is None:
line = LineBase(self.lines[indx - 1])
self.lines[indx - 1].line_to = line
self.canvas.add_line(line)
else:
line = self.lines[indx - 1].line_to
line.add_to(myself)
else:
line = LineBase(myself)
line.add_to(self.lines[indx - 1])
self.canvas.add_line(line)
# Save for merging with descendant tree
if indx == 1 and self.center_boxes[3] is None:
# Center person
self.center_boxes[3] = myself
elif indx == 2:
# Mother or father of center person
if self.center_boxes[0] is None:
self.center_boxes[0] = myself
else:
self.center_boxes[2] = myself
return myself
def add_person_again(self, index, indi_handle, fams_handle):
self.add_person(index, indi_handle, fams_handle)
def add_marriage(self, index, indi_handle, fams_handle):
""" Makes a marriage box and add that person into the Canvas. """
if not self.inlc_marr:
return
indx = index[LVL_GEN]
myself = FamilyBox((indx - 1,) + index[1:], False)
if indx == 2 and self.center_boxes[1] is None:
# Family (parents) of center person
self.center_boxes[1] = myself
#calculate the text.
myself.text = self.calc_items.calc_marriage(indi_handle, fams_handle)
self.canvas.add_box(myself)
def y_index(self, x_level, index):
""" Calculate the column or generation that this person is in.
x_level -> 0 to max_gen-1
index -> 1 to (self.max_generation**2)-1
"""
#Calculate which row in the column of people.
tmp_y = index - (2**x_level)
#Calculate which row in the table (yes table) of people.
delta = (2**self.max_generation) // (2**(x_level))
return int((delta / 2) + (tmp_y * delta)) - 1
def do_y_indx(self):
''' Make the y_index for all boxes
first off of a forumula, then remove blank areas around the edges,
then compress the tree if desired
'''
boxes = boxes_in_ancestor_tree(self.canvas)
min_y = self.y_index(boxes[0].level[LVL_GEN],
boxes[0].level[LVL_INDX])
for box in boxes:
if "fam" in box.boxstr:
box.level = box.level + \
(self.y_index(box.level[LVL_GEN] - 1,
int(box.level[LVL_INDX] / 2)),)
else:
box.level = box.level + \
(self.y_index(box.level[LVL_GEN], box.level[LVL_INDX]),)
min_y = min(min_y, box.level[LVL_Y])
#print (str(box.level))
boxes = boxes_in_ancestor_tree(self.canvas)
#if a last father (of fathers) does not have a father/parents
#Then there could be a gap. Remove this gap
if min_y > 0:
for box in boxes:
box.level = box.level[:LVL_Y] + (box.level[LVL_Y] - min_y,)
#Now that we have y_index, lets see if we need to squish the tree
self.canvas.boxes.sort() # Sort them on the y_index
if not self.compress_tree:
return
#boxes are already in top down [LVL_Y] form so lets
#set the box in the correct y level depending on compress_tree
y_level = 0
current_y = boxes[0].level[LVL_Y]
for box in boxes:
y_index = box.level[LVL_Y]
if y_index > current_y:
current_y = y_index
y_level += 1
box.level = box.level[:LVL_Y] + (y_level,)
def do_sibs(self):
if not self.inc_sib or self.center_family is None:
return
family = self.database.get_family_from_handle(self.center_family)
mykids = [kid.ref for kid in family.get_child_ref_list()]
if len(mykids) == 1: # No other siblings. Don't do anything.
return
# The first person is the center person had he/she has our information
center = self.canvas.boxes.pop(self.canvas.boxes.index(self.lines[1]))
line = center.line_to
level = center.level[LVL_Y]
move = level - (len(mykids) // 2) + ((len(mykids) + 1) % 2)
if move < 0:
# more kids than parents. ran off the page. Move them all down
for box in boxes_in_ancestor_tree(self.canvas):
box.level = (box.level[0], box.level[1], box.level[2] - move)
move = 0
line.start = []
rrr = -1 # if len(mykids)%2 == 1 else 0
for kid in mykids:
rrr += 1
mee = self.add_person((1, 1, move + rrr), kid, self.center_family)
line.add_from(mee)
#mee.level = (0, 1, level - (len(mykids)//2)+rrr)
mee.line_to = line
def start(self, person_id):
""" go ahead and make it happen """
center = self.database.get_person_from_gramps_id(person_id)
if center is None:
raise ReportError(
_("Person %s is not in the Database") % person_id)
center_h = center.get_handle()
#Step 1. Get the people
self.recurse(center_h)
#Step 2. Calculate the y_index for everyone
self.do_y_indx()
#Step 3. Siblings of the center person
self.do_sibs()
#------------------------------------------------------------------------
#
# Transform Classes
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Class lr_Transform
#------------------------------------------------------------------------
class LRTransform:
"""
setup all of the boxes on the canvas in for a left/right report
"""
def __init__(self, canvas, max_generations):
self.canvas = canvas
self.max_generations = max_generations
self.left_to_right = False
self.width = 0
self.rept_opts = canvas.report_opts
self.x_offset = self.rept_opts.col_width + self.rept_opts.max_box_width
self.y_offset = (self.rept_opts.littleoffset * 2 +
self.canvas.title.height)
def _place(self, box):
""" put the box in it's correct spot """
#1. cm_x
if self.left_to_right:
dx = box.level[LVL_GEN]
else:
dx = self.max_generations - box.level[LVL_GEN]
box.x_cm = self.rept_opts.littleoffset
box.x_cm += dx * self.x_offset
if box.x_cm - self.x_offset > self.width:
self.width = box.x_cm - self.x_offset
#2. cm_y
box.y_cm = self.rept_opts.max_box_height + self.rept_opts.box_pgap
box.y_cm *= box.level[LVL_Y]
box.y_cm += self.y_offset
#if box.height < self.rept_opts.max_box_height:
# box.y_cm += ((self.rept_opts.max_box_height - box.height) /2)
def place(self):
""" Step through boxes so they can be put in the right spot """
#prime the pump
boxes = boxes_in_ancestor_tree(self.canvas)
self.__last_y_level = boxes[0].level[LVL_Y]
#go
for box in boxes:
self._place(box)
#------------------------------------------------------------------------
#
# class MakeAncReport
#
#------------------------------------------------------------------------
class MakeAncReport:
def __init__(self, dbase, canvas, inlc_marr, compress_tree):
self.database = dbase
self.canvas = canvas
self.inlc_marr = inlc_marr
self.compress_tree = compress_tree
self.mother_ht = self.father_ht = 0
self.max_generations = 0
self.width = 0
def get_height_width(self, box):
"""
obtain width information for each level (x)
obtain height information for each item
"""
self.canvas.set_box_height_width(box)
if box.width > self.canvas.report_opts.max_box_width:
self.canvas.report_opts.max_box_width = box.width # + box.shadow
if box.level[LVL_Y] > 0:
if box.level[LVL_INDX] % 2 == 0 and box.height > self.father_ht:
self.father_ht = box.height
elif box.level[LVL_INDX] % 2 == 1 and box.height > self.mother_ht:
self.mother_ht = box.height
if box.level[LVL_GEN] > self.max_generations:
self.max_generations = box.level[LVL_GEN]
def get_generations(self):
return self.max_generations
def get_width(self):
return self.width
def start(self):
# __gui = GUIConnect()
# 1.
#set the sizes for each box and get the max_generations.
self.father_ht = 0.0
self.mother_ht = 0.0
boxes = boxes_in_ancestor_tree(self.canvas)
for box in boxes:
self.get_height_width(box)
if self.compress_tree and not self.inlc_marr:
self.canvas.report_opts.max_box_height = \
min(self.father_ht, self.mother_ht)
else:
self.canvas.report_opts.max_box_height = \
max(self.father_ht, self.mother_ht)
#At this point we know everything we need to make the report.
#Size of each column of people - self.rept_opt.box_width
#size of each column (or row) of lines - self.rept_opt.col_width
#size of each row - self.rept_opt.box_height
#go ahead and set it now.
for box in boxes:
box.width = self.canvas.report_opts.max_box_width
# 2.
#setup the transform class to move around the boxes on the canvas
transform = LRTransform(self.canvas, self.max_generations)
transform.place()
self.width = transform.width
# -----------------------------------------------------------------------
#
# PART 2. DESCENDANTS
#
# -----------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Class RecurseDown
#
#------------------------------------------------------------------------
class RecurseDown:
"""
The main | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
__all__ = [
"is_token_expired",
"SpotifyClientCredentials",
"SpotifyOAuth",
"SpotifyOauthError",
"SpotifyStateError",
"SpotifyImplicitGrant",
"SpotifyPKCE"
]
import base64
import json
import logging
import os
import time
import warnings
import webbrowser
import requests
from spotipy.util import CLIENT_CREDS_ENV_VARS, get_host_port
from spotipy.exceptions import SpotifyException
# Workaround to support both python 2 & 3
import six
import six.moves.urllib.parse as urllibparse
from six.moves.BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from six.moves.urllib_parse import urlparse, parse_qsl
logger = logging.getLogger(__name__)
class SpotifyOauthError(Exception):
""" Error during Auth Code or Implicit Grant flow """
def __init__(self, message, error=None, error_description=None, *args, **kwargs):
self.error = error
self.error_description = error_description
self.__dict__.update(kwargs)
super(SpotifyOauthError, self).__init__(message, *args, **kwargs)
class SpotifyStateError(SpotifyOauthError):
""" The state sent and state recieved were different """
def __init__(self, local_state=None, remote_state=None, message=None,
error=None, error_description=None, *args, **kwargs):
if not message:
message = ("Expected " + local_state + " but recieved "
+ remote_state)
super(SpotifyOauthError, self).__init__(message, error,
error_description, *args,
**kwargs)
def _make_authorization_headers(client_id, client_secret):
auth_header = base64.b64encode(
six.text_type(client_id + ":" + client_secret).encode("ascii")
)
return {"Authorization": "Basic %s" % auth_header.decode("ascii")}
def is_token_expired(token_info):
now = int(time.time())
return token_info["expires_at"] - now < 60
def _ensure_value(value, env_key):
env_val = CLIENT_CREDS_ENV_VARS[env_key]
_val = value or os.getenv(env_val)
if _val is None:
msg = "No %s. Pass it or set a %s environment variable." % (
env_key,
env_val,
)
raise SpotifyOauthError(msg)
return _val
def _get_cache_path(cache_path, username):
if cache_path:
return cache_path
cache_path = ".cache"
if username:
cache_path += "-" + str(username)
return cache_path
class SpotifyAuthBase(object):
def __init__(self, requests_session):
if isinstance(requests_session, requests.Session):
self._session = requests_session
else:
if requests_session: # Build a new session.
self._session = requests.Session()
else: # Use the Requests API module as a "session".
from requests import api
self._session = api
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, val):
self._client_id = _ensure_value(val, "client_id")
@property
def client_secret(self):
return self._client_secret
@client_secret.setter
def client_secret(self, val):
self._client_secret = _ensure_value(val, "client_secret")
@property
def redirect_uri(self):
return self._redirect_uri
@redirect_uri.setter
def redirect_uri(self, val):
self._redirect_uri = _ensure_value(val, "redirect_uri")
@staticmethod
def _get_user_input(prompt):
try:
return raw_input(prompt)
except NameError:
return input(prompt)
def __del__(self):
"""Make sure the connection (pool) gets closed"""
if isinstance(self._session, requests.Session):
self._session.close()
class SpotifyClientCredentials(SpotifyAuthBase):
OAUTH_TOKEN_URL = "https://accounts.spotify.com/api/token"
def __init__(self,
client_id=None,
client_secret=None,
proxies=None,
requests_session=True,
requests_timeout=None):
"""
You can either provide a client_id and client_secret to the
constructor or set SPOTIPY_CLIENT_ID and SPOTIPY_CLIENT_SECRET
environment variables
"""
super(SpotifyClientCredentials, self).__init__(requests_session)
self.client_id = client_id
self.client_secret = client_secret
self.token_info = None
self.proxies = proxies
self.requests_timeout = requests_timeout
def get_access_token(self, as_dict=True):
"""
If a valid access token is in memory, returns it
Else feches a new token and returns it
Parameters:
- as_dict - a boolean indicating if returning the access token
as a token_info dictionary, otherwise it will be returned
as a string.
"""
if as_dict:
warnings.warn(
"You're using 'as_dict = True'."
"get_access_token will return the token string directly in future "
"versions. Please adjust your code accordingly, or use "
"get_cached_token instead.",
DeprecationWarning,
stacklevel=2,
)
if self.token_info and not self.is_token_expired(self.token_info):
return self.token_info if as_dict else self.token_info["access_token"]
token_info = self._request_access_token()
token_info = self._add_custom_values_to_token_info(token_info)
self.token_info = token_info
return self.token_info["access_token"]
def _request_access_token(self):
"""Gets client credentials access token """
payload = {"grant_type": "client_credentials"}
headers = _make_authorization_headers(
self.client_id, self.client_secret
)
response = self._session.post(
self.OAUTH_TOKEN_URL,
data=payload,
headers=headers,
verify=True,
proxies=self.proxies,
timeout=self.requests_timeout,
)
if response.status_code != 200:
error_payload = response.json()
raise SpotifyOauthError(
'error: {0}, error_description: {1}'.format(
error_payload['error'], error_payload['error_description']),
error=error_payload['error'],
error_description=error_payload['error_description'])
token_info = response.json()
return token_info
def is_token_expired(self, token_info):
return is_token_expired(token_info)
def _add_custom_values_to_token_info(self, token_info):
"""
Store some values that aren't directly provided by a Web API
response.
"""
token_info["expires_at"] = int(time.time()) + token_info["expires_in"]
return token_info
class SpotifyOAuth(SpotifyAuthBase):
"""
Implements Authorization Code Flow for Spotify's OAuth implementation.
"""
OAUTH_AUTHORIZE_URL = "https://accounts.spotify.com/authorize"
OAUTH_TOKEN_URL = "https://accounts.spotify.com/api/token"
def __init__(
self,
client_id=None,
client_secret=None,
redirect_uri=None,
state=None,
scope=None,
cache_path=None,
username=None,
proxies=None,
show_dialog=False,
requests_session=True,
requests_timeout=None
):
"""
Creates a SpotifyOAuth object
Parameters:
* client_id: Must be supplied or set as environment variable
* client_secret: Must be supplied or set as environment variable
* redirect_uri: Must be supplied or set as environment variable
* state: May be supplied, no verification is performed
* scope: May be supplied, intuitively converted to proper format
* cache_path: May be supplied, will otherwise be generated
(takes precedence over `username`)
* username: May be supplied or set as environment variable
(will set `cache_path` to `.cache-{username}`)
* proxies: Proxy for the requests library to route through
* show_dialog: Interpreted as boolean
* requests_timeout: Tell Requests to stop waiting for a response after a given number
of seconds
"""
super(SpotifyOAuth, self).__init__(requests_session)
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.state = state
self.username = username or os.getenv(
CLIENT_CREDS_ENV_VARS["client_username"]
)
self.cache_path = _get_cache_path(cache_path, self.username)
self.scope = self._normalize_scope(scope)
self.proxies = proxies
self.requests_timeout = requests_timeout
self.show_dialog = show_dialog
def get_cached_token(self):
""" Gets a cached auth token
"""
token_info = None
try:
f = open(self.cache_path)
token_info_string = f.read()
f.close()
token_info = json.loads(token_info_string)
# if scopes don't match, then bail
if "scope" not in token_info or not self._is_scope_subset(
self.scope, token_info["scope"]
):
return None
if self.is_token_expired(token_info):
token_info = self.refresh_access_token(
token_info["refresh_token"]
)
except IOError:
logger.warning("Couldn't read cache at: %s", self.cache_path)
return token_info
def _save_token_info(self, token_info):
if self.cache_path:
try:
f = open(self.cache_path, "w")
f.write(json.dumps(token_info))
f.close()
except IOError:
logger.warning('Couldn\'t write token to cache at: %s',
self.cache_path)
def _is_scope_subset(self, needle_scope, haystack_scope):
needle_scope = set(needle_scope.split()) if needle_scope else set()
haystack_scope = (
set(haystack_scope.split()) if haystack_scope else set()
)
return needle_scope <= haystack_scope
def is_token_expired(self, token_info):
return is_token_expired(token_info)
def get_authorize_url(self, state=None):
""" Gets the URL to use to authorize this app
"""
payload = {
"client_id": self.client_id,
"response_type": "code",
"redirect_uri": self.redirect_uri,
}
if self.scope:
payload["scope"] = self.scope
if state is None:
state = self.state
if state is not None:
payload["state"] = state
if self.show_dialog:
payload["show_dialog"] = True
urlparams = urllibparse.urlencode(payload)
return "%s?%s" % (self.OAUTH_AUTHORIZE_URL, urlparams)
def parse_response_code(self, url):
""" Parse the response code in the given response url
Parameters:
- url - the response url
"""
_, code = self.parse_auth_response_url(url)
if code is None:
return url
else:
return code
@staticmethod
def parse_auth_response_url(url):
query_s = urlparse(url).query
form = dict(parse_qsl(query_s))
if "error" in form:
raise SpotifyOauthError("Received error from auth server: "
"{}".format(form["error"]),
error=form["error"])
return tuple(form.get(param) for param in ["state", "code"])
def _make_authorization_headers(self):
return _make_authorization_headers(self.client_id, self.client_secret)
def _open_auth_url(self):
auth_url = self.get_authorize_url()
try:
webbrowser.open(auth_url)
logger.info("Opened %s in your browser", auth_url)
except webbrowser.Error:
logger.error("Please navigate here: %s", auth_url)
def _get_auth_response_interactive(self, open_browser=True):
if open_browser:
self._open_auth_url()
prompt = "Enter the URL you were redirected to: "
else:
url = self.get_authorize_url()
prompt = (
"Go to the following URL: {}\n"
"Enter the URL you were redirected to: ".format(url)
)
response = self._get_user_input(prompt)
state, code = SpotifyOAuth.parse_auth_response_url(response)
if self.state is not None and self.state != state:
raise SpotifyStateError(self.state, state)
return code
def _get_auth_response_local_server(self, redirect_port):
server = start_local_http_server(redirect_port)
self._open_auth_url()
server.handle_request()
if self.state is not None and server.state != self.state:
raise SpotifyStateError(self.state, server.state)
if server.auth_code is not None:
return server.auth_code
elif server.error is not None:
raise SpotifyOauthError("Received error from OAuth server: {}".format(server.error))
else:
raise SpotifyOauthError("Server listening on localhost has not been accessed")
def get_auth_response(self, open_browser=True):
logger.info('User authentication requires interaction with your '
'web browser. Once you enter your credentials and '
'give authorization, you will be redirected to '
'a url. Paste that url you were directed to to '
'complete the authorization.')
redirect_info = urlparse(self.redirect_uri)
redirect_host, redirect_port = get_host_port(redirect_info.netloc)
if (
open_browser
and redirect_host in ("127.0.0.1", "localhost")
and redirect_info.scheme == "http"
):
# Only start a local http server if a port is specified
if redirect_port:
return self._get_auth_response_local_server(redirect_port)
else:
logger.warning('Using `%s` as redirect URI without a port. '
'Specify a port (e.g. `%s:8080`) to allow '
'automatic retrieval of authentication code '
'instead of having to copy and paste '
'the URL your browser is redirected to.',
redirect_host, redirect_host)
return self._get_auth_response_interactive(open_browser=open_browser)
def get_authorization_code(self, response=None):
if response:
return self.parse_response_code(response)
return self.get_auth_response()
def get_access_token(self, code=None, as_dict=True, check_cache=True):
""" Gets the access token for the app given the code
Parameters:
- code - the response code
- as_dict - a boolean indicating if returning the access token
as a token_info dictionary, otherwise it will be returned
as a string.
"""
if as_dict:
warnings.warn(
"You're using 'as_dict = True'."
"get_access_token will return the token string directly in future "
"versions. Please adjust your code accordingly, or use | |
<reponame>basepipe/developer_onboarding
# encoding: utf-8
# module _ast
# from (built-in)
# by generator 1.147
# no doc
# no imports
# Variables with simple values
PyCF_ONLY_AST = 1024
__version__ = '82160'
# no functions
# classes
class AST(object):
# no doc
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
_attributes = ()
_fields = ()
class operator(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': (), '__dict__': <attribute '__dict__' of 'operator' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'operator' objects>, '__doc__': None})"
class Add(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class alias(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_fields = (
'name',
'asname',
)
__dict__ = None # (!) real value is "dict_proxy({'__dict__': <attribute '__dict__' of 'alias' objects>, '__module__': '_ast', '_fields': ('name', 'asname'), '__weakref__': <attribute '__weakref__' of 'alias' objects>, '__doc__': None})"
class boolop(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': (), '__dict__': <attribute '__dict__' of 'boolop' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'boolop' objects>, '__doc__': None})"
class And(boolop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class arguments(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_fields = (
'args',
'vararg',
'kwarg',
'defaults',
)
__dict__ = None # (!) real value is "dict_proxy({'__dict__': <attribute '__dict__' of 'arguments' objects>, '__module__': '_ast', '_fields': ('args', 'vararg', 'kwarg', 'defaults'), '__weakref__': <attribute '__weakref__' of 'arguments' objects>, '__doc__': None})"
class stmt(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = (
'lineno',
'col_offset',
)
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': ('lineno', 'col_offset'), '__dict__': <attribute '__dict__' of 'stmt' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'stmt' objects>, '__doc__': None})"
class Assert(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'test',
'msg',
)
class Assign(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'targets',
'value',
)
class expr(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = (
'lineno',
'col_offset',
)
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': ('lineno', 'col_offset'), '__dict__': <attribute '__dict__' of 'expr' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'expr' objects>, '__doc__': None})"
class Attribute(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
'attr',
'ctx',
)
class AugAssign(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'target',
'op',
'value',
)
class expr_context(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': (), '__dict__': <attribute '__dict__' of 'expr_context' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'expr_context' objects>, '__doc__': None})"
class AugLoad(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class AugStore(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class BinOp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'left',
'op',
'right',
)
class BitAnd(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class BitOr(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class BitXor(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class BoolOp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'op',
'values',
)
class Break(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Call(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'func',
'args',
'keywords',
'starargs',
'kwargs',
)
class ClassDef(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'name',
'bases',
'body',
'decorator_list',
)
class cmpop(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': (), '__dict__': <attribute '__dict__' of 'cmpop' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'cmpop' objects>, '__doc__': None})"
class Compare(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'left',
'ops',
'comparators',
)
class comprehension(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_fields = (
'target',
'iter',
'ifs',
)
__dict__ = None # (!) real value is "dict_proxy({'__dict__': <attribute '__dict__' of 'comprehension' objects>, '__module__': '_ast', '_fields': ('target', 'iter', 'ifs'), '__weakref__': <attribute '__weakref__' of 'comprehension' objects>, '__doc__': None})"
class Continue(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Del(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Delete(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'targets',
)
class Dict(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'keys',
'values',
)
class DictComp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'key',
'value',
'generators',
)
class Div(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class slice(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "dict_proxy({'__module__': '_ast', '_attributes': (), '__dict__': <attribute '__dict__' of 'slice' objects>, '_fields': (), '__weakref__': <attribute '__weakref__' of 'slice' objects>, '__doc__': None})"
class Ellipsis(slice):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Eq(cmpop):
# | |
<reponame>systemsbiology/dsPyspark
import collections
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio import motifs
from contextlib import contextmanager
from pyspark import SparkContext, SparkConf
from tral.sequence import sequence
import re
import pandas as pd
import os
import argparse
import datetime
import warnings
import sys
SPARK_MASTER='local[*]'
SPARK_APP_NAME='dsSeqModel'
VERSION=0.1
# RUN with: spark-submit dsSeqModel.py -i <input> -o <output prefix> -s <sample name for vcf>
# this is from streamFastqTransform.py and expects that to output:
# barcode seq1 seq1_quality seq2 seq2_quality read_type read_name
# where barcode combined from seq1 and seq2
# This script uses direct matches - this should be the advantage of using duplex sequencing
# If there is a high error rate for some reason, it will produce few results
# If the person has a mutation in the 12 bp surrounding each CODIS loci, it will fail if you use the reference genome
# It is always best to create a CODIS_surrounding.tsv for an individual person based on sequencing their genome
parser = argparse.ArgumentParser(description="Create alleles from Duplex Sequencing CODIS FASTQ files")
parser.add_argument('-i', '-input',
help="Input tab delimited TXT file of Duplex Sequencing information", required=True)
parser.add_argument('-o', '-output',
help="Output prefix for TXT and VCF files", required=True)
parser.add_argument('-s', '-sample', help="Sample Name for VCF file", required=True)
parser.add_argument('-nr', '-num_reads', help="Number of reads to count as a Duplex Consensus Sequence",
type=int, default=3)
parser.add_argument('-nd', '-num_duplex', help="Number of Duplex Consensus Sequences required to clan",
type=int, default=1)
parser.add_argument('-na', '-num_duplex_alleles', help="Number of Duplex Consensus Sequences required to support a CODIS allele report",
type=int, default=1)
parser.add_argument('-mc', '-match_count', help="Number of matches required for a sequence to match a CODIS allele",
type=int, default=4)
parser.add_argument('-c', '-codis', help="CODIS information for matching loci to genome",
default="CODIS_surrounding.tsv")
parser.add_argument('-mp', '-min_partitions', help="Minimum number of partitions to split the text file",
type=int, default=20)
parser.add_argument('-t', '-tmp', help="Tmp directory to store files in", default="/tmp")
args = vars(parser.parse_args())
if not os.path.isfile(args['c']):
raise IOError("Codis file not found: {}".format(args['c']))
if (args['nr'] <= 1):
warnings.warn("WARNING: Keeping all reads with args['nr'] set to {}.".format(args['nr']), Warning)
if (args['nd'] < 1):
warnings.warn("WARNING: Invalid option for number of duplex sequences required.", Warning)
sys.exit()
# test args= {'i': 'CODIS1_spark_input.txt', 's': 'CODIS1', 't': '/40TB_2/workspace/tmp', 'c': 'CODIS_surrounding.tsv', 'o': 'CODIS1_output', 'nd': 1, 'nr': 3, 'mp': 20}
@contextmanager
def spark_manager():
conf = SparkConf().setMaster(SPARK_MASTER).setAppName(SPARK_APP_NAME)
conf.set("spark.local.dir", args['t'])
#conf.set("spark.local.dir", "/40TB_2/workspace/tmp")
conf.set("spark.driver.memory", "5g")
spark_context = SparkContext(conf=conf)
try:
yield spark_context
finally:
spark_context.stop()
with spark_manager() as context:
dnaRDD = context.textFile(args['i'], minPartitions=args['mp'])
#dnaArray is
#['NNNNAAATGCNAGGCGGCCA', 'TTTGGGGGCATCTCTTATACTCATGAAATCAACAGAGGCTTGCATGTATCTATCTGTCTGTCTGTCTATCTATCTATCTATCTATCTATCTATCTATCTATCTATCTATCTATGAGACAGGGTCTTGCTCTGTCACCAAGATTG', 'E/EEAAEEEEEEEEEEEEAEEAEEAA<EEAAE<AAE<AEEAAAEEAAAAA<AAAA<A<EAEEE6AAE/<6<E<<AAAAA<EA6<E<<A<A<6AEAEEEAEA<6AAEEE</6AEAE<E/E//<EEAE/AEEE/E<A/6/EEEE//', 'ba', '1', 'NS500773:56:H7LK3AFXX:1:11101:15840:1018']
# 0 is barcode
# 1 is sequence
# 2 is sequence quality
# 3 is read type (ab or ba indicating whether the barcode sequence was in seq1/seq2 or seq2/seq1)
# 4 is sequence file (this sequence came from seq1 or seq2)
# 5 is read name
dnaArray = dnaRDD.map(lambda line: line.split('\t'))
def merge_two_dicts(x,y):
z = x.copy()
z.update(y)
return z
# seqBarcodes is
# seqBarcodes.take(1)[0][0] is the barcode and read orientation
# seqBarcodes.take(1)[0][1] is the sequence
seqBarcodes = dnaArray.map(lambda x: ((x[0], x[3]+'-'+x[4]), [x[1]])).reduceByKey(lambda x,y: x+y)
# seqCombinedBarcodes is the sequenced by barcode and read orientation
# seqCombinedBarcodes[0] = the barcode
# seqCombinedBarcodes[1] = a dictionary of sequences by read orientation ex: 'ab-1': [list of seqs]
seqCombinedBarcodes = seqBarcodes.map(lambda x: [x[0][0], {x[0][1]: x[1]}]).reduceByKey(lambda x,y: merge_two_dicts(x,y))
# seqCombinedBarcodesFiltered only keeps barcodes that have ab-1, ab-2, ba-1, and ba-2 entries above the cutoff for number of reads (args['nr'])
seqCombinedBarcodesFiltered = seqCombinedBarcodes.filter(lambda x: len(x[1].get("ab-1", [])) > args['nr'] and len(x[1].get("ab-2",[])) > args['nr'] and len(x[1].get("ba-1",[])) > args['nr'] and len(x[1].get("ba-2",[])) > args['nr'])
# create consensus sequences for seq using BioPython Seq module
def create_consensus(seqArray):
seqDNA = [Seq(xs, IUPAC.ambiguous_dna) for xs in seqArray]
seqMotif = motifs.create(seqDNA)
seqCons = str(seqMotif.consensus)
return seqCons
# combine ab:1 and ba:2 together; combine ab:2 and ba:1 together
def overall_consensus(combined):
barcode, groupedSeqs = combined
forward = []
reverse = []
forward.extend(groupedSeqs["ab-1"])
reverse.extend(groupedSeqs["ab-2"])
if len(forward) != len(reverse):
warnings.warn("WARNING: Nonmatching read counts in overall_consensus for barcode {}.".format(barcode), Warning)
forward.extend(groupedSeqs["ba-2"])
reverse.extend(groupedSeqs["ba-1"])
if len(forward) != len(reverse):
warnings.warn("WARNING: Nonmatching read counts in overall_consensus for barcode {}.".format(barcode), Warning)
for_cons = create_consensus(forward)
if (for_cons != forward[0]):
print("ERROR: forward doesn't match consensus")
print(for_cons)
print(forward[0])
rev_cons = create_consensus(reverse)
if (rev_cons != reverse[0]):
print("ERROR: reverse doesn't match consensus")
print(rev_cons)
print(reverse[0])
return [barcode, for_cons, rev_cons, len(forward)]
barcodeConsensus = seqCombinedBarcodesFiltered.map(overall_consensus)
# break the parts of the sequence up so that we can combine them all together by consensus
consensusSeqFor = barcodeConsensus.map(lambda x: [x[1], [[x[0], x[3]]]])
consensusSeqRev = barcodeConsensus.map(lambda x: [x[2], [[x[0], x[3]]]])
consensusSeqComb = consensusSeqFor.union(consensusSeqRev)
##############################################################################################
# COMBINE BY CONSENSUS RATHER THAN BARCODE
# consensusSeqComb
# [['AATATTGGTAATTAAATGTTTACTATAGACTATTTAGTGAGATAAAAAAAAACTATCAATCTGTCTATCTATCTATCTATCTATCTATCTATCTATCTATCGTTAGTTCGTTCTAAACTATGACAAGTGTTCTATCATACCCTT', ['TACGTTTCAGAATTTGCGAT', 310]]]
# create
#[('ACCAGCCTGGGGAAAATAGCAGGACTCCATCTCTACGAAAAATTTGAAAATTAGCCCGGCATGGTGGTACATGCCTGTAGTCCTAGCTATTCAGGAGGCTGAGGCAGGAGGACTGCTTGAGCCCAGGAGTTCGAGGCTGCAGTG', ['TGTAAAACCAAAACTGATAT', 10])]
consBySequence = consensusSeqComb.reduceByKey(lambda x,y: x+y)
###############################################################################################
# SUM READ COUNTS
# now that we have all of the sequences matched together, create summary counts
# consBySequence is [sequence, [list of [barcode, count]]]
def count_reads(group):
dcs_count = 0
dcs_max_reads = 0
dcs_sum_reads = 0
# a group is [barcode, count]
for g in group:
if (g[1] > dcs_max_reads):
dcs_max_reads = g[1]
dcs_count += 1
dcs_sum_reads += g[1]
return [dcs_count, dcs_max_reads, dcs_sum_reads]
consCounted = consBySequence.map(lambda x: [x[0], count_reads(x[1])])
# Filter for number of DCS required
consFiltered = consCounted.filter(lambda x: x[1][0] >= args['nd'])
###############################################################################################
# MATCH VERSUS CODIS LOCATIONS
# load codis information
codisPickleFile = "codis_regex.p"
if os.path.isfile(codisPickleFile):
codis = pd.read_pickle(codisPickleFile)
else:
codis = pd.read_csv(args['c'], sep="\t")
regexer = lambda x: re.compile(x)
rev_comp = lambda x: str(Seq(x).reverse_complement())
codis["FULL LEFT COMP"] = codis["FULL LEFT"].apply(rev_comp).apply(regexer)
codis["FULL RIGHT COMP"] = codis["FULL RIGHT"].apply(rev_comp).apply(regexer)
codis["ALT LEFT COMP"] = codis["ALT LEFT"].apply(rev_comp).apply(regexer)
codis["ALT RIGHT COMP"] = codis["ALT RIGHT"].apply(rev_comp).apply(regexer)
codis["FULL LEFT"] = codis["FULL LEFT"].apply(regexer)
codis["FULL RIGHT"] = codis["FULL RIGHT"].apply(regexer)
codis["ALT LEFT"] = codis["ALT LEFT"].apply(regexer)
codis["ALT RIGHT"] = codis["ALT RIGHT"].apply(regexer)
codis.to_pickle("codis_regex.p")
# match vs codis
def match_codis(codis,seq):
matches = []
for i,row in codis.iterrows():
match_count = 0
for reg in row[["FULL LEFT","FULL RIGHT", "ALT LEFT","ALT RIGHT","FULL LEFT COMP","FULL RIGHT COMP", "ALT LEFT COMP", "ALT RIGHT COMP"]]:
print("matching CODIS {} reg {} against seq {}".format(row["ID"], reg, seq))
if reg.search(seq):
match_count = match_count +1
print("match count {}".format(match_count))
if match_count >= args['mc']:
matches.append(row["ID"])
if len(matches) == 0:
return ''
elif len(matches) == 1:
return matches[0]
else:
return 'multiple'
# codis_matched is the CODIS ID, the second element is the array of tuples of sequence results
codisCompared = consFiltered.map(lambda x: [match_codis(codis,x[0]), [x]]).reduceByKey(lambda x,y: x+y)
#codisUnmatched = codisCompared.filter(lambda x: x[0] == '')
#codisMultiple = codisCompared.filter(lambda x: x[0] == 'multiple')
codisMatched = codisCompared.filter(lambda x: x[0] != '' and x[0] != 'multiple')
if (codisMatched.count() == 0):
print("No results matching CODIS with the num_reads restriction {} and the num_dcs restriction {}".format(args['nr'], args['nd']))
context.stop()
sys.exit(1)
# codis matched is the CODIS loci it matched via sequence primer comparison
# the second entry is an array of sequence information
# each sequence is: [sequence, [ dcs_count, max # reads in all dcs, sum of reads in all dcs]]
###############################################################################################
# DETECT REPEATS
# use tral = uses TRF
def get_repeats(cr):
match = cr[0]
motif = codis.loc[codis["ID"] == match,"motif"].item()
rc_motif = str(Seq(motif,IUPAC.ambiguous_dna).reverse_complement())
group = cr[1]
repeats = []
for g in group:
info = [(i.n, i.repeat_region_length, tuple(i.msaD)) for i in sequence.Sequence(g[0], g[0]).detect(denovo=True,detection={"detectors":["TRF"]}).repeats]
if len(info) > 0:
g.append(info)
repeats.append(g)
# currently if we match against the motif, TRF identifies several repeats with - in the center A-TAG for CSF1PO
# CSF1PO is detected buggy...
return repeats
codisRepeats = codisMatched.map(lambda x: [x[0], get_repeats(x)])
crDir = 'codis_repeats_'+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
codisRepeats.saveAsPickleFile(crDir) # we want to save this so that we can avoid re-running the repeat identification software
codisRepeats = context.pickleFile(crDir)
# codisRepeats is [codis allele, [list of matches [sequence, [num dcs, max dcs_read, sum_dcs_read], [repeat count, repeat length, repeat seq]]]]
###############################################################################################
# COLLAPSE ALLELES
# make a key out of the number of repeats to combine alleles together
# ['CTGGGCTCTTCGTCTTCCGAGTGTTTCTATTTTTAGACCGTTTGGTGTTTGGATAGATAGATAGATAGATAGATATATAAACAAATACTGTTTTGTCTTTCAATGATATCTATCTATCTATCTATCTATCTATCTATCTATATA', [1, 3302, 4037], [(9, 35, ('TATC', 'TATC', 'TATC', 'TATC', 'TATC', 'TATC', 'TATC', 'TATC', 'TAT-'))]]
def allele_key(codis):
groups = []
for g in codis[1]:
# create an individual entry for the codis and the sequence entry
c = [codis[0]] + g
alleles = ''
# if it matches > 1 allele we want to discard it
if len(g[2]) == 1:
alleles = [(codis[0], g[2][0]), [c]]
else:
alleles = [(codis[0], ()), [c]]
groups.append(alleles)
return groups
#'codis_repeats_20161217-170655'
# codisAlleles is [(CODIS ALLELE, (repeat count, repeat length, repeat sequence)), [list of [sequence, [dcs num, dcs read count max, dcs read count sum], | |
上距离为 stride)
# x_col[x,x,i,x,x,x]: stride = stride (这个 stride 是指卷积的步长) * base_stride (x_col[x,x,1,1,1,1] -> x_col[x,x,2,1,1,1] 这两个数在 x_pad 上距离为 stride)
# x_col[x,i,x,x,x,x]: stride = stride (这个 stride 是指卷积的步长) * (W_in+2*pad) * base_stride (x_col[x,1,1,1,1,1] -> x_col[x,2,1,1,1,1] 这两个数在 x_pad 上距离为 stride)
# x_col[i,x,x,x,x,x]: stride = C * (H_in+2*pad) * (W_in+2*pad) * base_stride (x_col[1,1,1,1,1,1] -> x_col[2,1,1,1,1,1] 这两个数在 x_pad 上距离为 stride)
base_stride = x_pad.strides[-1]
strides = np.array([
C * (H_in+2*self.pad) * (W_in+2*self.pad) * base_stride,
self.stride * (W_in+2*self.pad) * base_stride,
self.stride * base_stride,
(H_in+2*self.pad) * (W_in+2*self.pad) * base_stride,
(W_in+2*self.pad) * base_stride,
base_stride
])
# 得到卷积区域并展平
x_col = np.lib.stride_tricks.as_strided(x_pad, shape=(N, H_out, W_out, C, H_filter, W_filter), strides=strides)
x_col = x_col.reshape((N, H_out, W_out, -1))
# 卷积核展平
filter_col = self.filter_weight.reshape((F, -1))
# im2col 卷积(这里就变成普通的矩阵乘加偏置了)
out = x_col @ filter_col.T + self.bias.reshape((1,1,1,-1))
# 把最后一个维度挪到前面来
out = np.transpose(out, axes=(0, 3, 1, 2))
# 保存现场
self.cache = (x_col, filter_col, x.shape)
return out
def backward_col2im(self, dout):
''' 卷积反向传播,利用 col2im 加速 '''
x_col, filter_col, x_shape = self.cache
N, F, H_out, W_out = dout.shape
_, _, H_filter, W_filter = self.filter_weight.shape
_, C, H_in, W_in = x_shape
# 得到 db
db = np.sum(dout, (0,2,3))
# 前面 “把最后一个维度挪到前面来” 操作的反向操作
# dout: N x F x H_out x W_out -> N x H_out x W_out x F
dout = np.transpose(dout, axes=(0,2,3,1))
# 前面 im2col 卷积操作的反向操作(由于 im2col 卷积操作就是一个全连接层的 forward,这里跟全连接的 backward 是一样的)
dfilter_col = dout.reshape((-1, F)).T @ x_col.reshape((-1, x_col.shape[-1]))
# 得到 dw
dw = dfilter_col.reshape(self.filter_weight.shape)
# 得到 dx_col
dx_col = (dout @ filter_col).reshape((N, H_out, W_out, C, H_filter, W_filter))
dx_pad = np.zeros((N, C, H_in+2*self.pad, W_in+2*self.pad))
for index_y in range(H_out):
for index_x in range(W_out):
# 把 dx_col 里的梯度值放到合适的位置
dx_pad[:, :, self.stride*index_y:self.stride*index_y+H_filter, self.stride*index_x:self.stride*index_x+W_filter] += dx_col[:, index_y, index_x, :, :, :]
# 得到 dx
dx = dx_pad[:, :, self.pad:-self.pad, self.pad:-self.pad]
# 更新参数
self.filter_weight = self.optimizer.optim(self.filter_weight, dw, add_reg=True)
self.bias = self.optimizer.optim(self.bias, db)
return dx
def get_weights(self):
'''
获取参数
'''
return {"weights":self.filter_weight, "bias":self.bias}
def set_weights(self, weights_dict):
'''
导入参数
'''
assert weights_dict["weights"].shape == self.filter_weight.shape, self.name + " 层权重参数大小输入不匹配,导入参数失败!应为 " + str(self.filter_weight.shape) + " 实际为 " + str(weights_dict["weights"].shape)
assert weights_dict["bias"].shape == self.bias.shape, self.name + " 层偏置参数大小输入不匹配,导入参数失败!应为 " + str(self.bias.shape) + " 实际为 " + str(weights_dict["bias"].shape)
self.filter_weight = weights_dict["weights"]
self.bias = weights_dict["bias"]
print("%s 层参数导入成功" % self.name)
class MaxPool2D(Layer):
'''
池化层
Args:
kernel_size: 卷积核长宽(默认长等于宽)
stride: 步长
pad:补零,没实装
name: 层名
'''
def __init__(self, kernel_size=3, stride=1, pad=0, name="pool_layer"):
super(MaxPool2D, self).__init__(name=name)
self.kernel_size = kernel_size
self.stride = stride
# pad 还没实装
self.pad = pad
def forward(self, x):
if x is None:
print("(%s)\n\tMaxPool Layer -> kernal_size=%d\tstride=%d\tpad=%d" % (
self.name, self.kernel_size, self.stride, self.pad))
return None
pool_height = self.kernel_size
pool_width = self.kernel_size
stride = self.stride
N, C, H, W = x.shape
H_out = int(1 + (H - pool_height) / stride)
W_out = int(1 + (W - pool_width) / stride)
out = np.zeros((N, C, H_out, W_out))
for (index_y,index_x) in zip(*np.where(np.ones((H_out,W_out)))):
out[:,:,index_y,index_x] = np.max(x[:,:,stride*index_y:stride*index_y+pool_height,stride*index_x:stride*index_x+pool_width],(2,3))
self.cache = x
return out
def backward(self, dout):
N, C, H_out, W_out = dout.shape
x = self.cache
pool_height = self.kernel_size
pool_width = self.kernel_size
stride = self.stride
N, C, H, W = x.shape
dx = np.zeros_like(x)
for (index_y,index_x) in zip(*np.where(np.ones((H_out,W_out)))):
y_start = stride*index_y
y_end = stride*index_y+pool_height
x_start = stride*index_x
x_end = stride*index_x+pool_height
mask = (x[:,:,y_start:y_end,x_start:x_end] == np.max(x[:,:,y_start:y_end,x_start:x_end], (2,3)).reshape((N, C, 1, 1))).astype(np.float64)
dx[:,:,y_start:y_end,x_start:x_end] += dout[:,:,index_y,index_x].reshape((N, C, 1, 1)) * mask
return dx
class Flatten(Layer):
''' 展平层 '''
def __init__(self, name="flatten_layer"):
super(Flatten, self).__init__(name=name)
def forward(self, x):
if x is None:
print("(%s)\n\tFlatten Layer" % (self.name))
return None
out = np.reshape(x, (np.shape(x)[0], -1))
self.cache = np.shape(x)
return out
def backward(self, dout):
shape = self.cache
dx = np.reshape(dout, shape)
return dx
class Softmax(Layer):
''' Softmax 层 '''
def __init__(self, name="softmax_layer"):
super(Softmax, self).__init__(name=name)
def forward(self, x):
# x: batch_size x class_score
if x is None:
print("(%s)\n\tSoftmax Layer" % (self.name))
return None
x = x - np.max(x)
# print("x",x)
out = np.exp(x) / np.reshape(np.sum(np.exp(x), 1), (x.shape[0], 1))
self.cache = out
return out
def backward(self, dout):
# 对于 out 的每一行,梯度为 np.sum(diag(out) * out.T @ out, 0)
# 创建一个多维的对角矩阵
out = self.cache
diag = np.zeros((dout.shape[0],dout.shape[1],dout.shape[1]))
for i in range(diag.shape[0]):
diag[i, :, :] = np.diag(out[i])
# print(diag)
# print(out.reshape((out.shape[0], -1, 1)) @ out.reshape((out.shape[0], 1, -1)))
# print(dout)
# print(diag - out.reshape((out.shape[0], -1, 1)) @ out.reshape((out.shape[0], 1, -1)))
# 计算梯度 dout reshape to N x C x 1 * (diag - out reshape to N x C x 1 @ out reshape to N x 1 x C) -> N x C x C (这个矩阵一行是 yi 对每个 x 的导数,一列是每个 y 对 xi 的导数) -> sum -> N x 1 x C = N x C
dx = np.sum(dout.reshape(dout.shape[0], -1, 1) * (diag - out.reshape((out.shape[0], -1, 1)) @ out.reshape((out.shape[0], 1, -1))), 1)
return dx
class BatchNorm(Layer):
'''
BatchNorm 层,要求输入的 x 大小为 [N, D](线性层的输出)或 [N, C, H, W](卷积层的输出)
Args:
in_channel: 通道数(当输入大小为 [N, C, H, W] 时,in_channel = C,当输入大小为 [N, D] 时,in_channel = D)
name: 层名
optimizer: 优化器
momentum: running_mean 和 running_var 的更新动量,running_mean 和 running_var 为测试时用于标准化的 mean 和 var,在训练时 mean 和
var 依据输入的 batch 计算,同时更新 running_mean / var 为 (1-momentum)*new_mean/var + momentum * old_mean/var,在测试时 running_mean/var
不进行更新
spatial: 若为 True,将输入视为 [N, C, H, W],也就是对卷积输出进行空间上的 4 维 BatchNorm,若为 False,则将输入视为 [N, D],也就是对全连接层的输出进行 BatchNorm
'''
def __init__(self, in_channel, name, optimizer, momentum=0.9, spatial=False):
super(BatchNorm, self).__init__(name=name)
self.gamma = np.ones(in_channel)
self.beta = np.zeros(in_channel)
self.momentum = momentum
self.spatial = spatial # 输入是两个维度还是四个维度的图片(后者需要进行 spatial 层面的 batchnorm)
self.optimizer = optimizer
self.eps = 1e-5
self.train = True # 此层在训练与评估阶段有不同的表现
self.running_mean = np.zeros(in_channel)
self.running_var = np.zeros(in_channel)
def batchnorm_forward(self, x):
''' BatchNorm 层基本前向传播,输入 x 大小为 [N,D] '''
out = None
if self.train:
# 求整个 batch 的均值((x[0,:] + x[1,:] + ... )/ batch_size)
mean = np.mean(x, 0)
# 求整个 batch 的方差
variance = np.var(x, 0)
# 减去均值,除以标准差,得到 x_hat
x_hat = (x - mean.reshape((1,-1))) / (variance.reshape((1,-1)) + self.eps) ** 0.5
# out = x_hat * gamma + beta
out = self.gamma.reshape((1,-1)) * x_hat + self.beta.reshape((1,-1))
# 保存现场
self.cache = (x, self.gamma, self.beta, mean, variance, x_hat)
# 更新 running_mean 和 running_var,这个要在 test mode 下使用
self.running_mean = self.momentum * self.running_mean + (1-self.momentum) * mean
self.running_var = self.momentum * self.running_var + (1-self.momentum) * variance
else:
# 测试模式下的前向传播
x_hat = (x - self.running_mean.reshape((1,-1))) / (self.running_var.reshape((1,-1)) + self.eps) ** 0.5
out = self.gamma.reshape((1,-1)) * x_hat + self.beta.reshape((1,-1))
return out
def batchnorm_backward(self, dout):
''' BatchNorm 层基本反向传播,输入 dout 大小为 [N,D] '''
x, gamma, beta, mean, variance, x_hat = self.cache
# gamma 的梯度
dgamma = np.sum(dout * x_hat, 0)
# beta 的梯度
dbeta = np.sum(dout, 0)
# x_hat 对 dout 的梯度
dx_hat = dout * gamma.reshape((1,-1))
# x 对 x_hat 的梯度
dxi_of_x_hat = dx_hat / (variance.reshape((1,-1)) + self.eps) ** 0.5
# mean 对 x_hat 的梯度
dmean_of_x_hat = np.sum(-dx_hat / (variance.reshape((1,-1)) + self.eps) ** 0.5, 0)
# var 对 x_hat 的梯度
dvar_of_x_hat = np.sum(dx_hat * (-1/2) * ((variance.reshape((1,-1)) + self.eps) ** (-1.5)) * (x - mean.reshape((1, -1))), 0)
# x 对 mean 的梯度
dxi_of_mean = 1/x.shape[0] * dmean_of_x_hat * np.ones_like(x)
# x 对 var 的梯度
dxi_of_var = 1/x.shape[0] * 2 * (x-mean.reshape((1,-1))) * dvar_of_x_hat.reshape((1,-1))
# x 对 dout 的梯度,三个加起来
dxi = dxi_of_x_hat + dxi_of_mean + dxi_of_var
# 得到 dx
dx = dxi
# 更新参数
self.gamma = self.optimizer.optim(self.gamma, dgamma)
self.beta = self.optimizer.optim(self.beta, dbeta)
return dx
def forward(self, x):
''' 前向传播 '''
if x is None:
print("(%s)\n\tBatchNorm Layer -> in_channel=%d\tspatial=%d\t" % (
self.name, self.gamma.shape[0], self.spatial))
return None
# 根据 spatial 的值进行不同的前向传播
if self.spatial:
assert len(x.shape) == 4 and x.shape[1] == self.gamma.shape[0], "输入维度不符合要求"
N, C, H, W = x.shape
# 先进行维度变换以及 reshape,N x C x H x W 转为 N x H x W x C 然后转为 N*H*W x C 最后利用 N x D 的 batchnorm 函数进行 batchnorm
x_flat = np.transpose(x, (0,2,3,1)).reshape((-1,C))
# 利用输入为 N x D 的 batchnorm 函数进行操作
out = self.batchnorm_forward(x_flat)
# 输出形状变回来
out = np.transpose(out.reshape((N,H,W,C)), (0,3,1,2))
return out
else:
assert len(x.shape) == 2 and x.shape[1] == self.gamma.shape[0], "输入维度不符合要求"
return self.batchnorm_forward(x)
def backward(self, dout):
''' 反向传播 '''
# 根据 spatial 的值进行不同的前向传播
if self.spatial:
assert len(dout.shape) == 4 and dout.shape[1] == self.gamma.shape[0], "输入维度不符合要求"
N, C, H, W = dout.shape
dout = np.transpose(dout, (0,2,3,1)).reshape((-1,C))
dx = self.batchnorm_backward(dout)
| |
<gh_stars>100-1000
# Test the runpy module
import unittest
import os
import os.path
import sys
import re
import tempfile
from test.test_support import verbose, run_unittest, forget
from test.script_helper import (temp_dir, make_script, compile_script,
make_pkg, make_zip_script, make_zip_pkg)
from runpy import _run_code, _run_module_code, run_module, run_path
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
"""Unit tests for runpy._run_code and runpy._run_module_code"""
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.assertEqual(d["result"], self.expected_result)
self.assertIs(d["__name__"], None)
self.assertIs(d["__file__"], None)
self.assertIs(d["__loader__"], None)
self.assertIs(d["__package__"], None)
self.assertIs(d["run_argv0"], saved_argv0)
self.assertNotIn("run_name", d)
self.assertIs(sys.argv[0], saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.assertNotIn("result", d1)
self.assertIs(d2["initial"], initial)
self.assertEqual(d2["result"], self.expected_result)
self.assertEqual(d2["nested"]["x"], 1)
self.assertIs(d2["__name__"], name)
self.assertTrue(d2["run_name_in_sys_modules"])
self.assertTrue(d2["module_in_sys_modules"])
self.assertIs(d2["__file__"], file)
self.assertIs(d2["run_argv0"], file)
self.assertIs(d2["__loader__"], loader)
self.assertIs(d2["__package__"], package)
self.assertIs(sys.argv[0], saved_argv0)
self.assertNotIn(name, sys.modules)
class RunModuleTest(unittest.TestCase):
"""Unit tests for runpy.run_module"""
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package without __main__.py
self.expect_import_error("multiprocessing")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth, mod_base="runpy_test"):
pkg_name = "__runpy_pkg__"
test_fname = mod_base+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + mod_base
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _check_package(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth, "__main__"))
pkg_name, _, _ = mod_name.rpartition(".")
forget(mod_name)
try:
if verbose: print "Running from source:", pkg_name
d1 = run_module(pkg_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", pkg_name
d2 = run_module(pkg_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, pkg_name)
if verbose: print "Package executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.assertIn("__package__", d1)
self.assertTrue(d1["__package__"] == pkg_name)
self.assertIn("sibling", d1)
self.assertIn("nephew", d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.assertIn("__package__", d2)
self.assertTrue(d2["__package__"] == pkg_name)
self.assertIn("sibling", d2)
self.assertIn("nephew", d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_run_package(self):
for depth in range(1, 4):
if verbose: print "Testing package depth:", depth
self._check_package(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
class RunPathTest(unittest.TestCase):
"""Unit tests for runpy.run_path"""
# Based on corresponding tests in test_cmd_line_script
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIs(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check the sys module
import sys
assertIs(globals(), sys.modules[__name__].__dict__)
argv0 = sys.argv[0]
"""
def _make_test_script(self, script_dir, script_basename, source=None):
if source is None:
source = self.test_source
return make_script(script_dir, script_basename, source)
def _check_script(self, script_name, expected_name, expected_file,
expected_argv0, expected_package):
result = run_path(script_name)
self.assertEqual(result["__name__"], expected_name)
self.assertEqual(result["__file__"], expected_file)
self.assertIn("argv0", result)
self.assertEqual(result["argv0"], expected_argv0)
self.assertEqual(result["__package__"], expected_package)
def _check_import_error(self, script_name, msg):
msg = re.escape(msg)
self.assertRaisesRegexp(ImportError, msg, run_path, script_name)
def test_basic_script(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_name, "<run_path>", script_name,
script_name, None)
def test_script_compiled(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(compiled_name, "<run_path>", compiled_name,
compiled_name, None)
def test_directory(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_dir, "<run_path>", script_name,
script_dir, '')
def test_directory_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(script_dir, "<run_path>", compiled_name,
script_dir, '')
def test_directory_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_main_recursion_error(self):
with temp_dir() as script_dir, temp_dir() as dummy_dir:
mod_name = '__main__'
source = ("import runpy\n"
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "recursion depth exceeded"
self.assertRaisesRegexp(RuntimeError, msg, run_path, zip_name)
def test_main():
| |
<reponame>victorrsouzas/TeoriaDosGrafos
import time
import networkx as nx
import matplotlib.pyplot as plt
import sys
import numpy as np
import glob
buffer = 0
d = 1
e = 1
G = nx.DiGraph() # Direcionado
G2 = nx.Graph() # Não Direcionado
contaAresta = 0
def menu_Grafos():
print("""
-----------------------------
Teoria Grafos
-----------------------------
""")
time.sleep(1)
while d == 1:
print("""
-----------------------------
SISTEMA GRAFOS
-----------------------------
VOCÊ DESEJA INICIAR UM GRAFO?
- SIM
- NÃO
-----------------------------""")
a = input(" Opção: ").upper()
if a == "SIM":
grafos()
elif a == "NÃO" or a == "NAO":
sys.exit()
else:
print("\nRESPONDA APENAS SIM OU NÃO")
def menu_Tipo_Grafo():
print("""
-----------------------------
ADICIONAR GRAFO
-----------------------------
""")
print("""
Escolha a opção do grafo:
(1) Direcionado
(2) Não Direcionado
(3) Sair
""")
def menu_Tipo():
print("""
-----------------------------
TIPO GRAFO
-----------------------------
""")
print("""
(1) Valorado
(2) Não Valorado
(3) Sair
""")
def menu_Visualização(peso, buffer):
global opcao
global contaAresta
global G
if (peso == 1 and buffer == 1) or (peso == 1 and buffer == 2):
print("""
-----------------------------
VISUALIZAÇÃO
-----------------------------
""")
print("""
(1) Lista de arestas
(2) Lista de graus
(3) Lista de vertices
(4) Tamanho do grafo
(5) Matriz de adjacências
(6) Verificar vertices adjacentes
(7) Plot do Grafo
(8) Algoritmo de Dijkstra
(9) Algoritmo de Bellman-Ford
(10) Sair
""")
opcao = int(input(" Opção: "))
print("\n")
if (peso == 2 and buffer == 1) or (peso == 2 and buffer == 2):
print("""
-----------------------------
VISUALIZAÇÃO
-----------------------------
""")
print("""
(1) Lista de arestas
(2) Lista de graus
(3) Lista de vertices
(4) Tamanho do grafo
(5) Matriz de adjacências
(6) Verificar vertices adjacentes
(7) Plot do Grafo
(8) Sair
""")
opcao = int(input(" Opção: "))
print("\n")
def menu_Opcoes(peso, buffer):
global opcao
global contaAresta
global G
if (peso == 1 and buffer == 1) or (peso == 1 and buffer == 2):
print("""
-----------------------------
OPÇÕES
-----------------------------
""")
print("""
(1) Incluir Vertices e Arestas
(2) Alterar Peso da Aresta
(3) Remover Vertices
(4) Visualizar o grafo e os dados
(5) Importar arquivo .csv
(6) Sair
""")
opcao = int(input(" Opção: "))
print("\n")
if (peso == 2 and buffer == 1) or (peso == 2 and buffer == 2):
print("""
-----------------------------
OPÇÕES
-----------------------------
""")
print("""
(1) Incluir Vertices
(2) Remover Vertices
(3) Visualizar o grafo e os dados
(4) Importar arquivo .csv
(5) Sair
""")
opcao = int(input(" Opção: "))
print("\n")
def grafos():
while d == 1:
menu_Tipo_Grafo()
try:
buffer = int(input(" Opção: "))
# FOR DIRECIONADO
if buffer == 1:
menu_Tipo()
peso = int(input(" O seu grafo direcionado vai ser:"))
# FOR DIRECIONADO E VALORADO
if peso == 1:
opcoes(peso, buffer)
# FOR DIRECIONADO E NÃO VALORADO
elif peso == 2:
opcoes(peso, buffer)
# SAIR DA OPÇÃO DIRECIONADO: VALORADO OU NÃO VALORADO
elif peso == 3:
menu_Grafos()
# FOR NÃO DIRECIONADO
elif buffer == 2:
menu_Tipo()
peso = int(
input(" O seu grafo não direcionado vai ser: "))
# FOR NÃO DIRECIONADO E VALORADO
if peso == 1:
opcoes(peso, buffer)
# FOR NÃO DIRECIONADO E NÃO VALORADO
elif peso == 2:
opcoes(peso, buffer)
# SAIR
elif peso == 3:
menu_Grafos()
elif buffer == 3:
menu_Grafos()
except ValueError:
print(f" Erro no tipo da entrada {ValueError}")
break
def opcoes(peso, buffer):
global opcao
global contaAresta
global G
menu_Opcoes(peso, buffer)
try:
if (peso == 1 and buffer == 1) or (peso == 1 and buffer == 2):
# INCLUIR VERTICE
if opcao == 1:
if (peso == 1 and buffer == 1) or (peso == 1 and buffer == 2):
incluir_Vertice_ArestaValorado(peso, buffer)
if (peso == 2 and buffer == 1) or (peso == 2 and buffer == 2):
incluir_Vertice_ArestaNValorado(peso, buffer)
return opcoes(peso, buffer)
#ALTERAR O PESO DA ARESTA
elif opcao == 2:
alterarPeso(peso, buffer)
return opcoes(peso,buffer)
# REMOVER VERTICE
elif opcao == 3:
removerVertice()
return opcoes(peso, buffer)
# VISUALIZAR GRAFO E OS DADOS
elif opcao == 4:
opcoes_Visualização(peso, buffer)
return opcoes(peso, buffer)
# IMPORTAR DADOS .CSV
elif opcao == 5:
import_csv(peso, buffer)
return opcoes(peso, buffer)
# SAIR
elif opcao == 6:
export_csv(peso, buffer)
return grafos()
if (peso == 2 and buffer == 1) or (peso == 2 and buffer == 2):
# INCLUIR VERTICE
if opcao == 1:
if (peso == 1 and buffer == 1) or (peso == 1 and buffer == 2):
incluir_Vertice_ArestaValorado(peso, buffer)
if (peso == 2 and buffer == 1) or (peso == 2 and buffer == 2):
incluir_Vertice_ArestaNValorado(peso, buffer)
return opcoes(peso, buffer)
# REMOVER VERTICE
elif opcao == 2:
removerVertice()
return opcoes(peso, buffer)
# VISUALIZAR GRAFO E OS DADOS
elif opcao == 3:
opcoes_Visualização(peso, buffer)
return opcoes(peso, buffer)
# IMPORTAR DADOS .CSV
elif opcao == 4:
import_csv(peso, buffer)
return opcoes(peso, buffer)
# SAIR
elif opcao == 5:
export_csv(peso, buffer)
return grafos()
except ValueError:
print(f"Erro no tipo da entrada {ValueError}")
def export_csv(peso, buffer):
if (peso == 1 and buffer == 1) or (peso == 1 and buffer == 2):
x = input(
" Você deseja exportar o grafo em .CSV [S/N]? ").upper()
if x == "S":
y = str(input(" Digite o nome do arquivo:"))
nx.write_weighted_edgelist(
G, # grafo
y, # nome do arquivo
delimiter=",", # separador
encoding='utf-8' # codificação
)
G.clear()
G2.clear()
contaAresta = 0
if x == "N":
G.clear()
G2.clear()
contaAresta = 0
if (peso == 2 and buffer == 1) or (peso == 2 and buffer == 2):
x = input(
" Você deseja exportar o grafo em .CSV [S/N]? ").upper()
if x == "S":
y = str(input(" Digite o nome do arquivo:"))
nx.write_weighted_edgelist(
G2, # grafo
y, # nome do arquivo
delimiter=",", # separador
encoding='utf-8' # codificação
)
G.clear()
G2.clear()
contaAresta = 0
if x == "N":
G.clear()
G2.clear()
contaAresta = 0
def import_csv(peso, buffer):
if (peso == 1 and buffer == 1) or (peso == 1 and buffer == 2):
print("""
-----------------------------
LISTA DE ARQUIVOS
-----------------------------
""")
for f in glob.glob('*.*'):
print(f" {f}")
print("\n")
x = str(input(" Digite o nome do arquivo:"))
if x in glob.glob('*.*'):
graph = nx.read_weighted_edgelist(x, delimiter=',', create_using=G,nodetype=str,encoding='utf-8')
else:
print("\n Arquivo inexistente")
if (peso == 2 and buffer == 1) or (peso == 2 and buffer == 2):
print("""
-----------------------------
LISTA DE ARQUIVOS
-----------------------------
""")
for f in glob.glob('*.*'):
print(f" {f}")
print("\n")
x = str(input(" Digite o nome do arquivo:"))
if x in glob.glob('*.*'):
graph = nx.read_weighted_edgelist(x, delimiter=',', create_using=G2,nodetype=str,encoding='utf-8')
else:
print("\n Arquivo inexistente")
def incluir_Vertice_ArestaValorado(op, buff):
global contaAresta
global verticeInput1
global verticeInput2
global opcao
global G
try:
while e == 1:
if op == 1 and buff == 1:
verticeInput1 = input(" Digite o vertice de saida: ")
verticeInput2 = input(" Digite o vertice de chegada: ")
if op == 1 and buff == 2:
verticeInput1 = input(" Digite o vertice: ")
verticeInput2 = input(" Digite o vertice: ")
if ((verticeInput1 != verticeInput2) and (verticeInput2 != None) and (verticeInput1 != None)):
if verticeInput2 != "":
valor = int(input(" Digite o peso da aresta: "))
contaAresta += 1
G.add_node(verticeInput1)
G2.add_node(verticeInput1)
else:
valor = 0
contaAresta += 1
G.add_node(verticeInput1)
G2.add_node(verticeInput1)
if verticeInput2 != "":
G.add_node(verticeInput2)
G2.add_node(verticeInput2)
if op == 1 and buff == 1:
G.add_edge(verticeInput1, verticeInput2, weight=valor)
G2.add_edge(verticeInput1, verticeInput2)
if op == 1 and buff == 2:
G.add_edge(verticeInput1, verticeInput2)
G2.add_edge(verticeInput1, verticeInput2, weight=valor)
else:
print(" Operação não válida, tente novamente")
x = input(
"\n Você deseja continuar adicionando vértices[S/N]? ").upper()
if x == "S":
continue
if x == "N":
break
except ValueError:
print(f" Erro no tipo da entrada {ValueError}")
def incluir_Vertice_ArestaNValorado(op, buff):
global contaAresta
global verticeInput1
global verticeInput2
try:
while e == 1:
if op == 2 and buff == 1:
verticeInput1 = input(" Digite o vertice de saida: ")
verticeInput2 = input(" Digite o vertice de chegada: ")
if op == 2 and buff == 2:
verticeInput1 = input(" Digite o vertice: ")
verticeInput2 = input(" Digite o vertice: ")
if ((verticeInput1 != verticeInput2) and (verticeInput2 != None) and (verticeInput1 != None)):
contaAresta += 1
if op == 2 and buff == 1:
if verticeInput2 != "":
G.add_node(verticeInput1)
else:
G.add_node(verticeInput1)
if verticeInput2 != "":
G.add_node(verticeInput2)
G.add_edge(verticeInput1,
verticeInput2)
if op == 2 and buff == 2:
if verticeInput2 | |
from PIL import ImageTk, Image, ImageGrab, EpsImagePlugin, ImageFont, ImageDraw
from tkinter import ttk, font
from tkinter.filedialog import askopenfilename, asksaveasfilename
from tkinter.messagebox import showinfo
import tkinter as tk, numpy as np, webbrowser, serial, time, math, cv2, os
from threading import Thread
from languages import *
EpsImagePlugin.gs_windows_binary = r'C:\\Program Files\\gs\\gs9.53.3\bin\\gswin64c'
# ------------------------------
# lang selected -> start app
def selectLang(lang_selected):
# update all widgets's text with lang selected
# translation in "gorlu_languages.py" file
global lang
lang = lang_selected
tk.Label(lang_frame, text=loading_[lang], **text_config).grid(row = 3)
lang_bar = ttk.Progressbar(lang_frame, length = 450, value = 0)
lang_bar.grid(row = 3, column = 1, columnspan = 2)
text_widget = {info_name: 'info_name', info_text_1: 'info_text_1', info_text_2: 'info_text_2',
config_text_0: 'config_text_0', config_text_1: 'config_text_1', config_text_2: 'config_text_2', config_save_code: 'config_save_code',
calibration_setting_text_0: 'calibration_setting_text_0', calibration_setting_text_1: 'calibration_setting_text_1', calibration_setting_text_2: 'calibration_setting_text_2', calibration_setting_text_3: 'calibration_setting_text_3', calibration_setting_text_4: 'calibration_setting_text_4', calibration_setting_text_5: 'calibration_setting_text_5', calibration_setting_text_6: 'calibration_setting_text_6', calibration_setting_load: 'calibration_setting_load',
printimg_setting_text_0: 'printimg_setting_text_0', printimg_setting_selection: 'printimg_setting_selection', printimg_setting_text_1: 'printimg_setting_text_1', printimg_setting_filling: 'printimg_setting_filling', printimg_setting_go: 'printimg_setting_go',
printhand_setting_text_0: 'printhand_setting_text_0', printhand_setting_text_1: 'printhand_setting_text_1', printhand_setting_text_2: 'printhand_setting_text_2', printhand_setting_text_3: 'printhand_setting_text_3', printhand_setting_button_0: 'printhand_setting_button_0', printhand_setting_button_1: 'printhand_setting_button_1',
printtext_setting_text_0: 'printtext_setting_text_0', printtext_setting_text_1: 'printtext_setting_text_1', printtext_setting_text_2: 'printtext_setting_text_2', printtext_setting_text_3: 'printtext_setting_text_3', printtext_setting_text_4: 'printtext_setting_text_4', printtext_setting_text_5: 'printtext_setting_text_5', printtext_setting_button: 'printtext_setting_button'}
for i in text_widget.keys():
i.config(text=dictionary_[text_widget[i]][lang])
calibration_setting_direction_X.config(value=(direction_[lang]))
calibration_setting_direction_Y.config(value=(direction_[lang]))
printtext_setting_align_o.config(value=align_o[lang])
printtext_setting_align_v.config(value=align_v[lang])
printtext_setting_align_o.set(align_o[lang][1])
printtext_setting_align_v.set(align_v[lang][1])
# search for installed font and display them as option for "printtext page"
permitted_font = []
tot_font = len(font.families())
cont = 0
for i in font.families():
try:
ImageFont.truetype(i, size=12)
permitted_font.append(i)
except:
pass
cont += 1
lang_bar['value'] = (round(cont*100/tot_font))
windows.update()
printtext_setting_character.config(value=permitted_font)
printtext_setting_character.set(permitted_font[0])
printimg_setting_quality.config(state='disabled')
printimg_setting_filling.config(state='disabled')
windows.title(windows_[lang])
menubar.add_cascade(label=menu_[lang][0][0], menu=info_menu)
menubar.add_cascade(label=menu_[lang][1][0], menu=config_menu)
menubar.add_command(label=menu_[lang][2], command=lambda: openPage(2))
menubar.add_command(label=menu_[lang][3], command=lambda: openPage(3))
menubar.add_command(label=menu_[lang][4], command=lambda: openPage(4))
menubar.add_command(label=menu_[lang][5], command=lambda: openPage(5))
info_menu.add_command(label=menu_[lang][0][1], command=lambda: openPage(0))
info_menu.add_separator()
info_menu.add_command(label=menu_[lang][0][2], command=lambda: launchLink(0))
info_menu.add_command(label=menu_[lang][0][3], command=lambda: launchLink(1))
config_menu.add_command(label=menu_[lang][1][1], command=lambda: openPage(1))
config_menu.add_separator()
config_menu.add_command(label=menu_[lang][1][2], command=lambda: launchLink(2))
windows.config(menu=menubar)
lang_frame.destroy()
loadData()
openPage(0)
# windows generic references
def loadData():
# read initial values from "sources\\data\\Printer_calibration.txt" to set them in "calibration page"
# if file fails sets defaults values
global setting_data
setting_data = []
try:
setting_file = open(setting_file_, 'r')
setting_file.readline()
for i in range(4):
line = setting_file.readline()
setting_data.append(int(line[-4:-1]))
setting_file.close()
except:
setting_data = [155, 129, 1, 1]
servo_max.set(setting_data[0])
servo_min.set(setting_data[1])
calibration_setting_direction_X.set(
direction_[lang][0] if setting_data[2] == 1 else direction_[lang][1])
calibration_setting_direction_Y.set(
direction_[lang][0] if setting_data[3] == 1 else direction_[lang][1])
def openPage(page):
# forget all frames then pack the selected one
page_dictionary = [info_frame, config_frame, calibration_frame, printimg_frame, printhand_frame, printtext_frame, lang_frame]
for i in range(len(page_dictionary)):
page_dictionary[i].pack_forget()
pack_config = {'fill': tk.BOTH, 'expand': True}
page_dictionary[page].pack(**pack_config)
def launchLink(link):
# launch webbrower with the selected link
link_dictionary = ['https://github.com/Bocchio01/Arduino_CNC_plotter',
'https://bocchio.altervista.org/2021/Gorlu la stampante/',
'https://learn.adafruit.com/adafruit-motor-shield/library-install']
webbrowser.open_new(link_dictionary[link])
# config references
def setConfig(event):
# open serial with selected COM_port
# if serial fails shows an error message
global ser, calib_check
calib_check = False
try:
ser.close()
except:
pass
try:
ser = serial.Serial(config_COM.get(), 9600, timeout=1)
showinfo(title=setConfig_[lang][0], message=setConfig_[lang][1])
except:
showinfo(title=setConfig_[lang][0], message=setConfig_[lang][2])
def saveCode():
# asks for filepath and saves Arduino's code
filepath = asksaveasfilename(title=saveCode_[lang][0], initialfile=saveCode_[lang][1], defaultextension="txt", filetypes=[("Text Files", "*.txt"), ("All Files", "*.*")])
if not filepath:
return
with open(filepath, 'w') as output_file:
output_file.write(config_arduino_code.get(1.0, tk.END))
# calibration references
def setCalibr(event):
global calib_check
# get data from "calibration page", saves them in "sources\\data\\Printer_calibration.txt"
# by 'sendData()' loads on Arduino board servo position for pen up/down
setting_data[0] = servo_max.get()
setting_data[1] = servo_min.get()
if ((setting_data[0] < 180) and (setting_data[1] < 180) and (setting_data[0] > 0) and (setting_data[1] > 0)):
setting_data[2] = (1 if calibration_setting_direction_X.get() == direction_[lang][0] else -1)
setting_data[3] = (1 if calibration_setting_direction_Y.get() == direction_[lang][0] else -1)
setting_file = open(setting_file_, 'w')
t = 0
setting_file.write(setCalibr_[lang][0] + '\n')
for i in range(4):
t = (t + 2 if i == 2 else t + 1)
setting_file.write(dictionary_['calibration_setting_text_' + str(t)][lang] + '\t ' + str(setting_data[i]) + '\n')
setting_file.close()
try:
ser.open()
except:
try:
ser.close()
ser.open()
except:
showinfo(title=error_msg[lang][0], message=error_msg[lang][1])
return
time.sleep(2)
ser.write((str(setting_data[0]) + ' ' + str(setting_data[1])).encode('utf-8'))
calib_check = True
for j in range(1, 5):
for i in range(1, -1, -1):
s = ('D' if ((i+j) % 2) == 0 else 'U')
dataSend(s, setting_data[2] * j * 30 * i, setting_data[3] * j * 30 * i)
time.sleep(0.5)
else:
showinfo(title=error_msg[lang][0], message=setCalibr_[lang][1])
# printimg references
def openImg():
# asks for filepath and loads selected img into 'img_global'
# then he calls 'elabImg()' in order to visualize CANNY(img_global) into 'printimg page'
filepath = askopenfilename(title=openImg_[lang], filetypes=[("File img", "*.jpg *.png *.jpeg"), ("JPG", "*.jpg"), ("PNG", "*.png"), ("JPEG", "*.jpeg"), ("All Files", "*.*")])
if not filepath:
return
else:
global img_global
img_global = cv2.imread(filepath)
printimg_setting_filling.config(state='normal')
printimg_setting_quality.config(state='normal')
elabImg(0)
printimg_setting_quality.set(100)
def elabImg(print_go):
# works on 'img_work' = 'img_global'
# delta_Y/X are delta between dimension of 'dim_visualizer' and dimension in 'img_work'
# 'filling.get()' is off -> he uses quality as parameters for 'cv2.Canny()'
# 'filling.get()' is on -> he uses quality as parameters for 'cv2.threshold()', working on a GRAY version of img_work
# 'print_go' is False -> he displays 'img_work' in 'printimg page'
# 'print_go' is True -> return ('img_work', 'delta_X', 'delta_Y')
printimg_setting_go['state']=tk.NORMAL
img_work = img_global
quality = printimg_setting_quality.get()
height, width = img_work.shape[:2]
if (width > height):
delta_Y = dim_visualizer - round(height*dim_visualizer/width)
delta_X = 0
else:
delta_Y = 0
delta_X = dim_visualizer - round(width*dim_visualizer/height)
img_work = cv2.resize(img_work, (dim_visualizer-delta_X, dim_visualizer-delta_Y))
if (filling.get() == 0):
img_work = cv2.bitwise_not(cv2.Canny(img_work, quality, quality))
else:
img_work = cv2.threshold(cv2.cvtColor(img_work, cv2.COLOR_BGR2GRAY), quality / 2, 255, cv2.THRESH_BINARY)[1]
if (print_go != 1):
img_to_display = ImageTk.PhotoImage(image=Image.fromarray(img_work))
printimg_visualizer.configure(image=img_to_display)
printimg_visualizer.image = img_to_display
else:
return(img_work, delta_X, delta_Y)
def stopPrint():
# global variable for terminate print process
global user_stop
user_stop = True
dataSend('U', 0, 0)
# new version
def startPrintImg():
# ask 'elabImg()' to get initial parameters
# while all 'img_to_print' is not white, he continusly looks for next black pixel nearest from the previous one found
# he uses slicing technique for 2D array to find black pixel position
# by 'dataSend()' he send pen_position and coordinates to serial port
global user_stop
user_stop = False
sub_windows = tk.Toplevel(bg = bg_general, borderwidth=2, relief='solid')
sub_windows.geometry('300x200')
sub_label = tk.Label(sub_windows, **text_config)
sub_progressbar = ttk.Progressbar(sub_windows, length = 200, value = 0)
sub_label.pack(pady = 20)
sub_progressbar.pack()
tk.Button(sub_windows, text=sub_windows_[lang][0], command = stopPrint, **button_config).pack(pady = 20)
sub_windows.protocol('WM_DELETE_WINDOW', stopPrint)
img_to_print, delta_X, delta_Y = elabImg(1)
black_pixel = np.sum(img_to_print == 0)
X, Y, cont = 0, 0, 0
while not img_to_print.all():
i, cont = 0, cont + 1
while True:
i += 1
try:
y = np.where(img_to_print[max(0, Y-i): Y+i+1, max(0, X-i): X+i+1] == 0)[0][0]
x = np.where(img_to_print[max(0, Y-i): Y+i+1, max(0, X-i): X+i+1] == 0)[1][0]
X, Y = x + max(0, X-i), y + max(0, Y-i)
img_to_print[Y][X] = 255
break
except:
pass
if user_stop == False:
if dataSend(('U' if (i > 2) else 'D'), round(setting_data[2]*(X + delta_X/2)*250/dim_visualizer), round(setting_data[3]*(Y + delta_Y/2)*250/dim_visualizer)) == False:
user_stop = True
break
time.sleep(0.01)
sub_label['text'] = (sub_windows_[lang][1] % (cont, black_pixel, str(sub_progressbar['value'])+' %' ))
sub_progressbar['value'] = (round(cont*100/black_pixel))
sub_windows.update()
try:
dataSend('U', 0, 0)
sub_windows.after(2000, sub_windows.destroy())
except:
pass
# printhand references
def savePosn(event):
# he gets coordinates from the current mouse position
# if new position is far from the previous one, set pen_position up
# by 'dataSend' sends pen_position and current coordinates to the board
global lastx, lasty
try:
if ((abs(event.x - lastx) > 15) or (abs(event.y - lasty) > 15)):
s = 'U'
else:
s = 'D'
except:
s = 'U'
lastx, lasty = event.x, event.y
if ((lastx > 0) and (lastx < dim_visualizer) and (lasty > 0) and (lasty < dim_visualizer)):
dataSend(s, round(setting_data[2]*event.x/2), round(setting_data[3]*event.y/2))
def addLine(event):
# draw a line on the whiteboard and call 'savePosn()' to get next mouse position
printhand_visualizer_board.create_line(lastx, lasty, event.x, event.y)
savePosn(event)
def saveCanvas():
# create a postscript file and ask filepath to save the canvas witheboard
# then he eliminate postscript file
printhand_visualizer_board.postscript(file="sources\\data\\Lavagna__work.eps")
board_work = Image.open("sources\\data\\Lavagna__work.eps")
filepath = asksaveasfilename(title=saveCanvas_[lang][0], initialfile=saveCanvas_[lang][1], defaultextension="png", filetypes=[("PNG", "*.png"), ("JPG", "*.jpg"), ("JPEG", "*.jpeg"), ("All Files", "*.*")])
if filepath:
board_work.save(filepath)
else:
pass
board_work.close()
os.remove("sources\\data\\Lavagna__work.eps")
# printtext referencs
def fontChanged(event):
# when a text parameters is changhed, he reload the entire text and save il into an image file (using PIL library funtions)
# then he set the 'text image' into the 'text page'
# text_image is declared as a global variable
global text_image
text_to_print = printtext_setting_entry.get('1.0', 'end-1c')
text_image = Image.new('RGB', (500, 500), (255,255,255))
draw = ImageDraw.Draw(text_image)
font_ = ImageFont.truetype(printtext_setting_character.get(), size=int(printtext_setting_dimension.get()))
w, h = draw.textsize(text_to_print, font=font_)
horizontal = {align_o[lang][0]: (1, 'left'), align_o[lang][1]: ((500-w)/2, 'center'), align_o[lang][2]: ((500-w-1), 'right')}
vertical = {align_v[lang][0]: 1, align_v[lang][1]: (500-h)/2, align_v[lang][2]: (500-h-1)}
X, align = horizontal[printtext_setting_align_o.get()]
Y = vertical[printtext_setting_align_v.get()]
| |
np.zeros((sz, sz), order='F', dtype=np.float64)
# apply boundary values
phi[0, :] = lower_bndy; phi[:, 0] = lower_bndy
phi[-1, :] = upper_bndy; phi[:, -1] = upper_bndy
# apply source term
rho = np.zeros((sz, sz), order='F', dtype=np.float64)
rho[sz//2, sz//2] = -100/h**2
# iterate PDE steps to get solution
for _ in range(niter):
func(rho, phi, h, sz, sz)
return phi
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Now test the timings.
# + {"slideshow": {"slide_type": "fragment"}, "hideCode": false, "hidePrompt": false}
# %timeit -r 2 -n 100 phi_soln_f = PDE_solve_fortran(n_iter, phi_iteration_fortran)
# %timeit -r 2 -n 100 phi_soln_fb = PDE_solve_fortran(n_iter, phi_iteration_broadcast)
# %timeit -r 2 -n 100 phi_soln_fomp = PDE_solve_fortran(n_iter, phi_iteration_fortran_omp)
# + {"slideshow": {"slide_type": "fragment"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Side note, look how easy it is to pass in the functions (after all, they are just objects) compared to MATLAB where you'd have to make handles or some anonymous function that wraps up the calls.
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Just to show that the answer is the same as before, the improvement isn't because the code is wrong.
# + {"slideshow": {"slide_type": "fragment"}, "hideCode": false, "hidePrompt": false}
import matplotlib.pyplot as plt
phi_soln_f = PDE_solve_fortran(n_iter, phi_iteration_fortran)
plt.figure()
plt.imshow(phi_soln_f, origin='lower')
# + {"slideshow": {"slide_type": "slide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# #### C++
# + {"slideshow": {"slide_type": "fragment"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Most major python libraries have some level of C++ code in the background (e.g. numpy, scipy, and even Python itself — not to mention the bytecode interpreter). As such, the interface between Python and C++ has lots of capabilities — but also lots of information to get lost in. Since this is about algorithm development in Python, not trying to build up Django from scratch, I'm just going to go into interfacing with numpy.
# * [Here's](https://docs.scipy.org/doc/numpy-1.16.1/reference/c-api.html) a more in-depth look at all the numpy C-API functionalities.
# * The full-on Python information for its C-API can be found [here](https://docs.python.org/3/c-api/index.html).
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# The basics:
# * Use a C-structure (usually named Py{Name}Object) that is binary- compatible with the PyObject structure itself but holds the additional information needed for that particular object
# * Pointers to PyTypeObject can safely be cast to PyObject pointers, whereas the inverse is safe only if the object is known to be an array.
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# In this case, we're interested in the [PyArrayObject](https://docs.scipy.org/doc/numpy-1.16.1/reference/c-api.types-and-structures.html)
# ```C++
# typedef struct PyArrayObject
# {
# PyObject_HEAD // formality
# char *data; // the data bytes
# int nd; // array dimensionality
# npy_intp *dimensions; // the shape of the dimensions
# npy_intp *strides; // the byte strides in each dimension
# PyObject *base; // manages memory if a "copy" of another array
# PyArray_Descr *descr; // struct for memory and data types (endian, bool, int, etc.)
# int flags; // Flags indicating how the memory pointed to by data (C-style, F-style, contiguous, etc.)
# PyObject *weakreflist;
# } PyArrayObject;
# ```
# + {"slideshow": {"slide_type": "subslide"}, "cell_type": "markdown"}
# Note: all the cpp code below needs more and better error checking, if you develop you should do error checking but they're omitted here in order to not clutter the concepts.
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# So, let's dive in and see how things work. The first piece of code to develop will be reading in a value from python
#
# ```C++
# PyObject*
# pde_solve_cpp(PyObject *self, PyObject *args)
# {
# // read in n_iter
# int n_iter;
# if (!PyArg_ParseTuple(args, "i", &n_iter))
# {
# std::cerr << "Bad input parameters. Put in just n_iter." << std::endl;
# return NULL;
# }
# ...
# }
# ```
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# This first bit just says we will return a pointer to a PyObject*, which is really just to say that we're going to give Python all the bits that go into making a Python object. The first argument is a dummy argument and not used. The second argument contains all the input. In this case, it will contain the number of finite element iterations.
# ```C++
# PyObject*
# pde_solve_cpp(PyObject *self, PyObject *args)
# {
# ...
# }
# ```
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# The next piece parses the expected values from the passed arguments. In this case, we are expecting an integer, so read the args and put it into the n_iter memory reference. [This](https://docs.python.org/3/c-api/arg.html) goes over what do with more arguments, including keywords.
# ```C++
# PyObject*
# pde_solve_cpp(PyObject *self, PyObject *args)
# {
# // read in n_iter
# int n_iter;
# if (!PyArg_ParseTuple(args, "i", &n_iter))
# {
# std::cerr << "Bad input parameters. Put in just n_iter." << std::endl;
# return NULL;
# }
# ...
# }
# ```
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Now, just like in the Python version, initialize the variables and boundary values.
# ```C++
# PyObject*
# pde_solve_cpp(PyObject *self, PyObject *args)
# {
# ...
# // problem size
# constexpr int sz = 101;
# const int rows = sz;
# const int cols = sz;
# vector<double> phi(rows * cols, 0.0); // initialize with 0s
# // fill last row with 100.0 BC
# std::fill(phi.begin() + (rows - 1)*cols,
# phi.end(), 100.0);
# // make last column 100.0 BC
# for (int i = 0; i < rows; ++i)
# {
# phi[i * cols + cols - 1] = 100.0;
# }
# double h = 1 / (static_cast<double>(sz) - 1); // step size
# // initialize source term and apply BC
# vector<double> rho(rows * cols, 0.0);
# rho[rows/2 * cols + cols/2] = -100.0 / (h * h);
# ...
# }
# ```
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Then run the finite element iterations.
# ```C++
# pde_solve_cpp(PyObject *self, PyObject *args)
# {
# ...
# for(auto i = 0; i < n_iter; ++i)
# {
# phi_iteration_cpp(phi, rho, h, rows, cols);
# }
# ...
# }
# ```
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Where the finite_element iteration is the same old thing, though using linear indexing instead of 2D array indexing. (In linear indexing arr[i, j] -> arr[i*cols + j], based on the row-major memory order shown earlier.)
# ```C++
# void
# phi_iteration_cpp(vector<double>& phi,
# const vector<double>& rho,
# const double h,
# const int rows,
# const int cols)
# {
# vector<double> phi_now;
# double finite_elem;
# phi_now.reserve(phi.size());
# std::copy(phi.begin(), phi.end(), phi_now.begin());
#
# // do update equation
# for(int i=1; i < rows - 1; ++i)
# {
# for(int j=1; j < cols - 1; ++j)
# {
# finite_elem =
# (-h*h * rho[i * cols + j] +
# phi_now[(i - 1) * cols + j] +
# phi_now[i * cols + (j - 1)] +
# phi_now[(i + 1) * cols + j] +
# phi_now[i * cols + (j + 1)]);
# phi[i * cols + j] = 0.25 * finite_elem;
# }
# }
# }
# ```
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Lastly, in the pde_solve_cpp function, we need to send the calculated phi vector off to numpy. Essentially, this makes an array telling numpy what dimensions to use (the rows and cols), then convert the phi vector into a Python object and return it. Pretty simple. (Will go into vector_to_2Dnparray function next.)
# ```C++
# PyObject*
# pde_solve_cpp(PyObject *self, PyObject *args)
# {
# ...
# // point to array for np to make Python object
# npy_intp dims[2]{rows, cols};
# PyObject* phi_np = vector_to_2Dnparray(phi, dims, NPY_DOUBLE);
# return phi_np;
# }
# ```
# + {"slideshow": {"slide_type": "subslide"}, "hideCode": false, "hidePrompt": false, "cell_type": "markdown"}
# Converting the vector is actually pretty easy. One thing to mention is that the vector data needs to be copied into the numpy array because C++ will deallocate the vector memory once the vector goes out of scope. If I had used arrays, I wouldn't have to copy, but I would have to tell numpy that it has the job of freeing the memory.
# ```C++
# template<typename T>
# static PyObject* vector_to_2Dnparray(const vector<T>& vec, npy_intp* dims, int type_num)
# {
# // note assumes row-major order, can either handle column-major at numpy level or with different API calls
# // | |
// receptive_field_for_pool_layer[1]
)
if (feature_map_for_pool_layer[0] <= 0) or (feature_map_for_pool_layer[1] <= 0):
raise ValueError('Pooling layer 1: ({0}, {1}) is wrong size of feature map!'.format(
feature_map_for_pool_layer[0], feature_map_for_pool_layer[1]
))
l_pool = lasagne.layers.Pool2DLayer(
l_conv,
pool_size=receptive_field_for_pool_layer,
name='l_pool_1'
)
input_size = feature_map_for_pool_layer
for ind in range(len(conv_layers) - 1):
if conv_layers[ind + 1][0] <= 0:
raise ValueError('Convolution layer {0}: {1} is wrong number of feature maps!'.format(
ind + 2, conv_layers[ind + 1][0]
))
receptive_field_for_conv_layer = (
conv_layers[ind + 1][1][0] if conv_layers[ind + 1][1][0] > 0 else input_size[0],
conv_layers[ind + 1][1][1] if conv_layers[ind + 1][1][1] > 0 else input_size[1]
)
if (receptive_field_for_conv_layer[0] <= 0) or (receptive_field_for_conv_layer[1] <= 0):
raise ValueError('Convolution layer {0}: ({1}, {2}) is wrong size of receptive field!'.format(
ind + 2, receptive_field_for_conv_layer[0], receptive_field_for_conv_layer[1]
))
feature_map_for_conv_layer = (
input_size[0] + 1 - receptive_field_for_conv_layer[0],
input_size[1] + 1 - receptive_field_for_conv_layer[1]
)
if (feature_map_for_conv_layer[0] <= 0) or (feature_map_for_conv_layer[1] <= 0):
raise ValueError('Convolution layer {0}: ({1}, {2}) is wrong size of feature map!'.format(
ind + 2, feature_map_for_conv_layer[0], feature_map_for_conv_layer[1]
))
if self.batch_norm:
l_conv = lasagne.layers.batch_norm(
lasagne.layers.Conv2DLayer(
l_pool, num_filters=conv_layers[ind + 1][0],
filter_size=receptive_field_for_conv_layer,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[(ind + 1) * 3],
name='l_conv_{0}'.format(ind + 2)
)
)
else:
l_conv = lasagne.layers.Conv2DLayer(
l_pool, num_filters=conv_layers[ind + 1][0],
filter_size=receptive_field_for_conv_layer,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[(ind + 1) * 2],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None
else trainable_shared_params[(ind + 1) * 2 + 1],
name='l_conv_{0}'.format(ind + 2)
)
receptive_field_for_pool_layer = (
pooling_layers[ind + 1][0] if pooling_layers[ind + 1][0] > 0 else 1,
pooling_layers[ind + 1][1] if pooling_layers[ind + 1][1] > 0 else 1
)
if (feature_map_for_pool_layer[0] <= 0) or (feature_map_for_pool_layer[1] <= 0):
raise ValueError('Pooling layer {0}: ({1}, {2}) is wrong size of feature map!'.format(
ind + 2, feature_map_for_pool_layer[0], feature_map_for_pool_layer[1]
))
feature_map_for_pool_layer = (
feature_map_for_conv_layer[0] // receptive_field_for_pool_layer[0],
feature_map_for_conv_layer[1] // receptive_field_for_pool_layer[1]
)
if (feature_map_for_pool_layer[0] <= 0) or (feature_map_for_pool_layer[1] <= 0):
raise ValueError('Pooling layer {0}: ({1}, {2}) is wrong size of feature map!'.format(
ind + 2, feature_map_for_pool_layer[0], feature_map_for_pool_layer[1]
))
l_pool = lasagne.layers.Pool2DLayer(
l_conv,
pool_size=receptive_field_for_pool_layer,
name='l_pool_{0}'.format(ind + 2)
)
input_size = feature_map_for_pool_layer
layer_ind = len(conv_layers) * 3 if self.batch_norm else len(conv_layers) * 2
l_in_drop = lasagne.layers.DropoutLayer(l_pool, p=self.dropout)
if self.batch_norm:
l_hid_old = lasagne.layers.batch_norm(
lasagne.layers.DenseLayer(
l_in_drop, num_units=dense_layers[0],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
name='l_dense_1'
)
)
layer_ind += 3
else:
l_hid_old = lasagne.layers.DenseLayer(
l_in_drop, num_units=dense_layers[0],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None
else trainable_shared_params[layer_ind + 1],
name='l_dense_1'
)
layer_ind += 2
last_real_layer = l_hid_old
l_hid_old_drop = lasagne.layers.DropoutLayer(l_hid_old, p=self.dropout)
for ind in range(len(dense_layers) - 1):
if self.batch_norm:
l_hid_new = lasagne.layers.batch_norm(
lasagne.layers.DenseLayer(
l_hid_old_drop, num_units=dense_layers[ind + 1],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
name='l_dense_{0}'.format(ind + 2)
)
)
layer_ind += 3
else:
l_hid_new = lasagne.layers.DenseLayer(
l_hid_old_drop, num_units=dense_layers[ind + 1],
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.HeUniform(gain='relu') if trainable_shared_params is None
else trainable_shared_params[layer_ind],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None
else trainable_shared_params[layer_ind + 1],
name='l_dense_{0}'.format(ind + 2)
)
layer_ind += 2
last_real_layer = l_hid_new
l_hid_new_drop = lasagne.layers.DropoutLayer(l_hid_new, p=self.dropout)
l_hid_old_drop = l_hid_new_drop
last_layer = l_hid_old_drop
if number_of_classes > 2:
cnn_output = lasagne.layers.DenseLayer(
last_layer, num_units=number_of_classes,
nonlinearity=lasagne.nonlinearities.softmax,
W=lasagne.init.GlorotUniform() if trainable_shared_params is None else trainable_shared_params[-2],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None else trainable_shared_params[-1],
name='l_cnn'
)
else:
cnn_output = lasagne.layers.DenseLayer(
last_layer, num_units=1,
nonlinearity=lasagne.nonlinearities.sigmoid,
W=lasagne.init.GlorotUniform() if trainable_shared_params is None else trainable_shared_params[-2],
b=lasagne.init.Constant(0.0) if trainable_shared_params is None else trainable_shared_params[-1],
name='l_cnn'
)
return cnn_output, last_real_layer
def build_loss(self, number_of_classes, target_var, cnn, deterministic):
""" Построить вычислительный граф для функции потерь и классификационной функции средствами Theano/Lasagne.
:param number_of_classes: Число распознаваемых классов.
:param target_var: Символьная переменная Theano, задающая желаемые метки классов для обучения/тестирования.
:param cnn: вычислительный граф Theano для всей нейросети.
:param deterministic: булевый флаг, определяющий режим работы (True - тестирование, False - обучение).
:return 2-элементный кортеж: построенные графы для функции потерь и для классификационной функции.
"""
if number_of_classes > 2:
prediction = lasagne.layers.get_output(cnn)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
output_prediction = lasagne.layers.get_output(cnn, deterministic=deterministic)
else:
prediction = lasagne.layers.get_output(cnn)
loss = lasagne.objectives.binary_crossentropy(prediction, target_var)
output_prediction = T.flatten(lasagne.layers.get_output(cnn, deterministic=deterministic))
loss = loss.sum()
return loss, output_prediction
def dump_all(self):
""" Выполнить сериализацию нейросети в словарь (dict).
Метод выгружает значения всех параметров нейросети в словарь, ключами которого являются названия параметров,
а значениями - соответственно, значения. В сериализации участвуют абсолютно все параметры, кроме random_state,
т.е. и управляющие параметры, задаваемые, например, в конструкторе, и настраиваемые параметры, значения которых
устанавливаются по итогам обучения (веса нейросети, распознаваемые классы и прочее).
При сериализации выполняется копирование (а не передача по ссылка) всех составных структур данных.
:return: словарь для всех параметров нейросети.
"""
try:
check_is_fitted(self, ['cnn_', 'predict_fn_', 'n_iter_', 'input_size_', 'loss_value_', 'classes_list_'])
is_trained = True
except:
is_trained = False
params = self.get_params(True)
if is_trained:
params['weights_and_biases'] = lasagne.layers.get_all_param_values(self.cnn_)
params['loss_value_'] = self.loss_value_
params['n_iter_'] = self.n_iter_
params['input_size_'] = self.input_size_
params['classes_list_'] = copy.copy(self.classes_list_)
return params
def load_all(self, new_params):
""" Выполнить десериализацию нейросети из словаря (dict).
Метод проверяет корректность всех параметров нейросети, заданных во входном словаре, и в случае успешной
проверки переносит эти значения в нейросеть (в случае неудачи бросает исключение ValueError). В десериализации
участвуют абсолютно все параметры, кроме random_state, т.е. и управляющие параметры, задаваемые, например, в
конструкторе, и настраиваемые параметры, значения которых устанавливаются по итогам обучения (веса нейросети,
распознаваемые классы и прочее).
При десериализации выполняется копирование (а не передача по ссылка) всех составных структур данных.
:param new_params: словарь (dict) со всеми параметрами нейросети для десериализации.
:return: self
"""
if not isinstance(new_params, dict):
raise ValueError('`new_params` is wrong! Expected {0}.'.format(type({0: 1})))
self.check_params(**new_params)
expected_param_keys = {'layers', 'dropout', 'learning_rate', 'max_epochs_number', 'validation_fraction',
'epochs_before_stopping', 'beta1', 'beta2', 'epsilon', 'batch_size', 'verbose',
'batch_norm', 'warm_start', 'eval_metric'}
params_after_training = {'weights_and_biases', 'loss_value_', 'n_iter_', 'input_size_', 'classes_list_'}
is_fitted = len(set(new_params.keys())) > len(expected_param_keys)
if is_fitted:
if set(new_params.keys()) != (expected_param_keys | params_after_training):
raise ValueError('`new_params` does not contain the expected keys!')
self.layers = copy.deepcopy(new_params['layers'])
self.dropout = new_params['dropout']
self.learning_rate = new_params['learning_rate']
self.max_epochs_number = new_params['max_epochs_number']
self.validation_fraction = new_params['validation_fraction']
self.beta1 = new_params['beta1']
self.beta2 = new_params['beta2']
self.epsilon = new_params['epsilon']
self.verbose = new_params['verbose']
self.epochs_before_stopping = new_params['epochs_before_stopping']
self.batch_size = new_params['batch_size']
self.batch_norm = new_params['batch_norm']
self.warm_start = new_params['warm_start']
self.eval_metric = new_params['eval_metric']
if getattr(self, 'random_state', None) is None:
self.random_state = None
if is_fitted:
if not isinstance(new_params['loss_value_'], float):
raise ValueError('`new_params` is wrong! Generalization loss `loss_value_` must be '
'floating-point number!')
if not isinstance(new_params['n_iter_'], int):
raise ValueError('`new_params` is wrong! Generalization loss `n_iter_` must be positive integer!')
if new_params['n_iter_'] <= 0:
raise ValueError('`new_params` is wrong! Generalization loss `n_iter_` must be positive integer!')
if (not isinstance(new_params['input_size_'], tuple)) and (not isinstance(new_params['input_size_'], list)):
raise ValueError('`new_params` is wrong! All input data sizes `input_size_` must be list or tuple!')
if len(new_params['input_size_']) != 3:
raise ValueError('`new_params` is wrong! All input data sizes `input_size_` must be 3-D sequence!')
for cur in new_params['input_size_']:
if not isinstance(cur, int):
raise ValueError('`new_params` is wrong! Each input data size `input_size_` must be '
'positive integer number!')
if cur <= 0:
raise ValueError('`new_params` is wrong! Each input data size `input_size_` must be '
'positive integer number!')
if (not isinstance(new_params['classes_list_'], list)) and \
(not isinstance(new_params['classes_list_'], tuple)):
raise ValueError('`new_params` is wrong! The classes list `classes_list_` must be list or tuple!')
if len(new_params['classes_list_']) < 2:
raise ValueError('`new_params` is wrong! The classes list `classes_list_` must consist from '
'two or more classes!')
self.random_state = check_random_state(self.random_state)
self.loss_value_ = new_params['loss_value_']
self.n_iter_ = new_params['n_iter_']
self.classes_list_ = copy.copy(new_params['classes_list_'])
self.input_size_ = copy.copy(new_params['input_size_'])
cnn_input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
self.cnn_, _ = self.build_cnn(self.input_size_, len(self.classes_list_), cnn_input_var)
_, test_prediction = self.build_loss(len(self.classes_list_), target_var, self.cnn_, True)
lasagne.layers.set_all_param_values(self.cnn_, new_params['weights_and_biases'])
self.predict_fn_ = theano.function([cnn_input_var], test_prediction, allow_input_downcast=True)
return self
def __iterate_minibatches(self, inputs, targets, indices, shuffle=False):
""" Итерироваться "минибатчами" по датасету - входным примерам inputs и соответствующим меткам классов targets.
:param inputs: Входные примеры X.
:param targets: Метки классов y.
:param indices: Индексы интересных нам примеров, участвующих в итерировании.
:param shuffle: Булевый флажок, указывающий, итерироваться случайно или всё же последовательно.
:return Итератор (каждый элемент: "минибатч" из batch_size входных примеров и соответствующих им меток классов).
"""
for indices_in_batch in iterate(indices, self.batch_size, shuffle, self.random_state if shuffle else None):
yield inputs[indices_in_batch], targets[indices_in_batch]
def __iterate_minibatches_for_prediction(self, inputs):
""" Итерироваться "минибатчами" по входным примерам inputs.
:param inputs: Входные примеры X.
:return Итератор (каждый элемент: "минибатч" из batch_size входных примеров).
"""
for indices_in_batch in iterate(numpy.arange(0, inputs.shape[0], 1, numpy.int32), self.batch_size, False, None):
yield inputs[indices_in_batch]
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.load_all(self.dump_all())
| |
"""
Cityscape Database
"""
from __future__ import print_function
import cv2
import os
import numpy as np
import multiprocessing
from six.moves import cPickle
from multiprocessing.dummy import Pool
from imdb import IMDB
from ..processing.bbox_transform import bbox_overlaps
from ..core.helper import get_scale_factor
class Cityscape(IMDB):
def __init__(self, image_set, root_path, dataset_path, load_memory=False, use_mask=False, panoptic=False):
"""
fill basic information to initialize imdb
:param image_set: train or val or trainval or test
:param root_path: 'cache' and 'rpn_data'
:param dataset_path: data and results
:return: imdb object
"""
super(Cityscape, self).__init__('cityscape', image_set, root_path, dataset_path)
self.image_set = image_set
self.root_path = root_path
self.data_path = dataset_path
self.load_memory = load_memory
self.use_mask = use_mask
self.panoptic = panoptic
self.classes = ['__background__', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
self.class_id = [0, 24, 25, 26, 27, 28, 31, 32, 33]
self.num_classes = len(self.classes)
self.image_set_index = self.load_image_set_index()
self.num_images = len(self.image_set_index)
print('num_images %d' % self.num_images)
def load_image_set_index(self):
"""
find out which indexes correspond to given image set (train or val)
:return:
"""
image_set_index_file = os.path.join(self.data_path, 'imglists', self.image_set + '.lst')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
image_set_index = []
with open(image_set_index_file, 'r') as f:
for line in f:
if len(line) > 1:
label = line.strip().split('\t')
image_set_index.append(label[1])
return image_set_index
def image_path_from_index(self, index):
"""
given image index, find out full path
:param index: index of a specific image
:return: full path of this image
"""
image_file = os.path.join(self.data_path, index)
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
def gt_roidb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'ins_id', 'ins_seg', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as f:
roidb = cPickle.load(f)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = self.load_cityscape_annotations()
with open(cache_file, 'wb') as f:
cPickle.dump(gt_roidb, f, cPickle.HIGHEST_PROTOCOL)
return gt_roidb
def load_from_seg(self, ins_seg_gt_path):
ins_seg_gt_path = os.path.join(self.data_path, ins_seg_gt_path)
print(ins_seg_gt_path)
assert os.path.exists(ins_seg_gt_path), 'Path does not exist: {}'.format(ins_seg_gt_path)
ins_seg_mask = cv2.imread(ins_seg_gt_path, -1)
boxes = []
gt_classes = []
ins_id = []
gt_overlaps = []
for c in range(1, len(self.class_id)):
px = np.where((ins_seg_mask >= self.class_id[c] * 1000) & (ins_seg_mask < (self.class_id[c] + 1) * 1000))
if len(px[0]) == 0:
continue
ids = np.unique(ins_seg_mask[px])
for id in ids:
px = np.where(ins_seg_mask == id)
x_min = np.min(px[1])
y_min = np.min(px[0])
x_max = np.max(px[1])
y_max = np.max(px[0])
if x_max - x_min <= 1 or y_max - y_min <= 1:
continue
boxes.append([x_min, y_min, x_max, y_max])
gt_classes.append(c)
ins_id.append(id % 1000)
overlaps = np.zeros(self.num_classes)
overlaps[c] = 1
gt_overlaps.append(overlaps)
return np.asarray(boxes), np.asarray(gt_classes), np.asarray(ins_id), ins_seg_mask, np.asarray(gt_overlaps)
def load_cityscape_annotations(self):
"""
for a given index, load image and bounding boxes info from a single image list
:return: list of record['boxes', 'gt_classes', 'ins_id', 'ins_seg', 'gt_overlaps', 'flipped']
"""
imglist_file = os.path.join(self.data_path, 'imglists', self.image_set + '.lst')
assert os.path.exists(imglist_file), 'Path does not exist: {}'.format(imglist_file)
imgfiles_list = []
with open(imglist_file, 'r') as f:
for line in f:
file_list = dict()
label = line.strip().split('\t')
file_list['img_id'] = label[0]
file_list['img_path'] = label[1]
file_list['ins_seg_path'] = label[2].replace('labelTrainIds', 'instanceIds')
if self.panoptic:
file_list['sem_seg_path'] = label[2]
imgfiles_list.append(file_list)
assert len(imgfiles_list) == self.num_images, 'number of boxes matrix must match number of images'
pool = Pool(2 * multiprocessing.cpu_count())
roidb = pool.map(self.process_image_list, imgfiles_list)
return roidb
def process_image_list(self, img_rec):
roi_rec = dict()
if self.load_memory:
im_path = os.path.join(self.data_path, img_rec['img_path'])
print('loading %s to memory' % im_path)
im_array = cv2.imread(im_path)
roi_rec['image'] = im_array
size = im_array.shape
else:
roi_rec['image'] = os.path.join(self.data_path, img_rec['img_path'])
size = cv2.imread(roi_rec['image']).shape
roi_rec['height'] = size[0]
roi_rec['width'] = size[1]
boxes, gt_classes, ins_id, ins_seg_mask, gt_overlaps = self.load_from_seg(img_rec['ins_seg_path'])
if boxes.size == 0:
total_num_objs = 0
boxes = np.zeros((total_num_objs, 4), dtype=np.uint16)
gt_overlaps = np.zeros((total_num_objs, self.num_classes), dtype=np.float32)
gt_classes = np.zeros((total_num_objs,), dtype=np.int32)
roi_rec['boxes'] = boxes
roi_rec['gt_classes'] = gt_classes
roi_rec['ins_id'] = ins_id
if self.load_memory:
roi_rec['ins_seg'] = ins_seg_mask
if self.panoptic:
roi_rec['sem_seg'] = cv2.imread(img_rec['sem_seg_path'], -1)
else:
roi_rec['ins_seg'] = os.path.join(self.data_path, img_rec['ins_seg_path'])
if self.panoptic:
roi_rec['sem_seg'] = os.path.join(self.data_path, img_rec['sem_seg_path'])
roi_rec['max_classes'] = gt_overlaps.argmax(axis=1) # this field is not for gt bbox
roi_rec['max_overlaps'] = gt_overlaps.max(axis=1) # this field is not for gt bbox
roi_rec['flipped'] = False
return roi_rec
def append_flipped_images(self, roidb):
"""
append flipped images to an roidb
flip boxes coordinates, images will be actually flipped when loading into network
:param roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
print('append flipped images to roidb')
assert self.num_images == len(roidb)
for i in range(self.num_images):
roi_rec = roidb[i]
boxes = roi_rec['boxes'].copy()
if boxes.shape[0] != 0:
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = roi_rec['width'] - oldx2 - 1
boxes[:, 2] = roi_rec['width'] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all(), \
'img_name %s, width %d\n' % (roi_rec['image'], roi_rec['width']) + \
np.array_str(roi_rec['boxes'], precision=3, suppress_small=True)
entry = {'image': roi_rec['image'],
'height': roi_rec['height'],
'width': roi_rec['width'],
'boxes': boxes,
'gt_classes': roidb[i]['gt_classes'],
'max_classes': roidb[i]['max_classes'],
'max_overlaps': roidb[i]['max_overlaps'],
'ins_seg': roidb[i]['ins_seg'],
'ins_id': roidb[i]['ins_id'],
'flipped': True}
if self.panoptic:
entry.update({'sem_seg': roidb[i]['sem_seg']})
roidb.append(entry)
self.image_set_index *= 2
return roidb
def evaluate_maskfcn(self, results_pack):
for result_rec in results_pack['results_list']:
image_path = result_rec['image']
im_info = result_rec['im_info']
detections = result_rec['boxes']
seg_masks = result_rec['masks']
filename = image_path.split("/")[-1]
filename = filename.replace('.png', '')
result_path = 'eval_ins/'
print('writing results for: %s' % filename)
result_txt = os.path.join(result_path, filename)
result_txt = result_txt + '.txt'
count = 0
f = open(result_txt, 'w')
for j, labelID in enumerate(self.class_id):
if labelID == 0:
continue
dets = detections[j]
masks = seg_masks[j]
for i in range(len(dets)):
bbox = dets[i, :4]
score = dets[i, -1]
bbox = map(int, bbox)
mask_image = np.zeros((int(im_info[0, 0]), int(im_info[0, 1])))
mask = masks[i, :, :]
mask_scale_factor = get_scale_factor(max(bbox[2] - bbox[0], bbox[3] - bbox[1]))
mask = cv2.resize(mask, dsize=None, fx=mask_scale_factor, fy=mask_scale_factor)
mask = mask[0:(bbox[3]-bbox[1]), 0:(bbox[2]-bbox[0])]
mask[mask > 0.5] = 200
mask[mask <= 0.5] = 0
mask_image[bbox[1]: bbox[3], bbox[0]: bbox[2]] = mask
cv2.imwrite(os.path.join(result_path, filename) + '_' + str(count) + '.png', mask_image)
f.write('{:s} {:s} {:.8f}\n'.format(filename + '_' + str(count) + '.png', str(labelID), score))
count += 1
f.flush()
f.close()
def evaluate_mask(self, results_pack):
for result_rec in results_pack['results_list']:
image_path = result_rec['image']
im_info = result_rec['im_info']
detections = result_rec['boxes']
seg_masks = result_rec['masks']
filename = image_path.split("/")[-1]
filename = filename.replace('.png', '')
result_path = 'eval_ins/'
print('writing results for: %s' % filename)
result_txt = os.path.join(result_path, filename)
result_txt = result_txt + '.txt'
count = 0
f = open(result_txt, 'w')
for j, labelID in enumerate(self.class_id):
if labelID == 0:
continue
dets = detections[j]
masks = seg_masks[j]
for i in range(len(dets)):
bbox = dets[i, :4]
score = dets[i, -1]
bbox = map(int, bbox)
mask_image = np.zeros((int(im_info[0, 0]), int(im_info[0, 1])))
mask = masks[i, :, :]
mask = cv2.resize(mask, (bbox[2] - bbox[0], (bbox[3] - bbox[1])), interpolation=cv2.INTER_LINEAR)
mask[mask > 0.5] = 200
mask[mask <= 0.5] = 0
mask_image[bbox[1]: bbox[3], bbox[0]: bbox[2]] = mask
cv2.imwrite(os.path.join(result_path, filename) + '_' + str(count) + '.png', mask_image)
f.write('{:s} {:s} {:.8f}\n'.format(filename + '_' + str(count) + '.png', str(labelID), score))
count += 1
f.flush()
f.close()
def evaluate_detections(self, results_pack):
"""
:param results_pack: {
all_boxes: [class_id][image_id][box_id]
all_masks: [class_id][image_id][box_id]
results_list: #img {
image:
im_info:
boxes:
masks:
}
}
:return:
"""
all_boxes_and_scores = results_pack["all_boxes"]
all_boxes = [[None for _ in range(self.num_images)] for _ in range(self.num_classes)]
all_scores = [[None for _ in range(self.num_images)] for _ in range(self.num_classes)]
for i in range(1, self.num_classes):
for j in range(self.num_images):
if all_boxes_and_scores[i][j] is not None:
all_boxes[i][j] = all_boxes_and_scores[i][j][:, :4]
all_scores[i][j] = all_boxes_and_scores[i][j][:, 4]
gt_roidb = self.gt_roidb()
gt_boxes = [roirec["boxes"] for roirec in gt_roidb]
gt_classes = [roirec["gt_classes"] for roirec in gt_roidb]
all_gt_boxes = [[None for _ in range(self.num_images)] for _ in range(self.num_classes)]
for i in range(self.num_images):
for cls in range(1, self.num_classes):
all_gt_boxes[cls][i] = gt_boxes[i][np.where(gt_classes[i] == cls)[0]]
class_names = self.classes
# the following codes are dataset-agnostic
# generalized evaluation of detection takes 4 inputs
# all_boxes, all_scores, all_gt_boxes, class names(not mandatory)
from rcnn.core.helper import single_class_ap
threshs = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
aps = np.empty(shape=(len(threshs), len(all_gt_boxes) - 1))
for i_thrs, thresh in enumerate(threshs):
for i_cls, cls in enumerate(range(1, len(all_gt_boxes))):
aps[i_thrs, i_cls] = single_class_ap(all_gt_boxes[cls], all_boxes[cls], all_scores[cls], thresh)
for i_ap, (ap50, ap) in enumerate(zip(aps[0], aps.mean(axis=0))):
print("%-15s: AP50=%.3f, AP=%.3f" % (class_names[i_ap + 1], ap50, ap))
print("%-15s: AP50=%.3f, AP=%.3f" % ("average", aps[0].mean(), aps.mean()))
def evaluate_segmentation(self, results_pack):
"""
:param results_pack: {
all_boxes: [class_id][image_id][box_id]
all_masks: [class_id][image_id][box_id]
results_list: #img {
image:
im_info:
boxes:
masks:
seg:
}
}
:return:
"""
result_path = 'eval_seg/'
trainId2id = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, | |
"video_gold": 3600
},
"68": {
"level": 68,
"exp": 42000,
"cost": 49000,
"order_good_min": 3,
"order_good_max": 20,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 6.79999999999999,
"video_gold": 3650
},
"69": {
"level": 69,
"exp": 43000,
"cost": 50000,
"order_good_min": 3,
"order_good_max": 20,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 6.89999999999999,
"video_gold": 3700
},
"70": {
"level": 70,
"exp": 44000,
"cost": 51000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 6.99999999999999,
"video_gold": 3750
},
"71": {
"level": 71,
"exp": 45000,
"cost": 52000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.09999999999999,
"video_gold": 3800
},
"72": {
"level": 72,
"exp": 46000,
"cost": 53000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.19999999999999,
"video_gold": 3850
},
"73": {
"level": 73,
"exp": 47000,
"cost": 54000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.29999999999999,
"video_gold": 3900
},
"74": {
"level": 74,
"exp": 48000,
"cost": 55000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.39999999999999,
"video_gold": 3950
},
"75": {
"level": 75,
"exp": 49000,
"cost": 56000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.49999999999999,
"video_gold": 4000
},
"76": {
"level": 76,
"exp": 50000,
"cost": 57000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.59999999999999,
"video_gold": 4050
},
"77": {
"level": 77,
"exp": 51000,
"cost": 58000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.69999999999999,
"video_gold": 4100
},
"78": {
"level": 78,
"exp": 52000,
"cost": 59000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.79999999999999,
"video_gold": 4150
},
"79": {
"level": 79,
"exp": 53000,
"cost": 60000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.89999999999999,
"video_gold": 4200
},
"80": {
"level": 80,
"exp": 54000,
"cost": 61000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 7.99999999999999,
"video_gold": 4250
},
"81": {
"level": 81,
"exp": 55000,
"cost": 62000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.09999999999999,
"video_gold": 4300
},
"82": {
"level": 82,
"exp": 56000,
"cost": 63000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.19999999999999,
"video_gold": 4350
},
"83": {
"level": 83,
"exp": 57000,
"cost": 64000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.29999999999999,
"video_gold": 4400
},
"84": {
"level": 84,
"exp": 58000,
"cost": 65000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.39999999999999,
"video_gold": 4450
},
"85": {
"level": 85,
"exp": 59000,
"cost": 66000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.49999999999999,
"video_gold": 4500
},
"86": {
"level": 86,
"exp": 60000,
"cost": 67000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.59999999999999,
"video_gold": 4550
},
"87": {
"level": 87,
"exp": 61000,
"cost": 68000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.69999999999999,
"video_gold": 4600
},
"88": {
"level": 88,
"exp": 62000,
"cost": 69000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.79999999999998,
"video_gold": 4650
},
"89": {
"level": 89,
"exp": 63000,
"cost": 70000,
"order_good_min": 3,
"order_good_max": 25,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.89999999999998,
"video_gold": 4700
},
"90": {
"level": 90,
"exp": 64000,
"cost": 71000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 8.99999999999998,
"video_gold": 4750
},
"91": {
"level": 91,
"exp": 65000,
"cost": 72000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.09999999999998,
"video_gold": 4800
},
"92": {
"level": 92,
"exp": 66000,
"cost": 73000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.19999999999998,
"video_gold": 4850
},
"93": {
"level": 93,
"exp": 67000,
"cost": 74000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.29999999999998,
"video_gold": 4900
},
"94": {
"level": 94,
"exp": 68000,
"cost": 75000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.39999999999998,
"video_gold": 4950
},
"95": {
"level": 95,
"exp": 69000,
"cost": 76000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.49999999999998,
"video_gold": 5000
},
"96": {
"level": 96,
"exp": 70000,
"cost": 77000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.59999999999998,
"video_gold": 5050
},
"97": {
"level": 97,
"exp": 71000,
"cost": 78000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.69999999999998,
"video_gold": 5100
},
"98": {
"level": 98,
"exp": 72000,
"cost": 79000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.79999999999998,
"video_gold": 5150
},
"99": {
"level": 99,
"exp": 73000,
"cost": 80000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.89999999999998,
"video_gold": 5200
},
"100": {
"level": 100,
"exp": 74000,
"cost": 81000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 9.99999999999998,
"video_gold": 5250
},
"101": {
"level": 101,
"exp": 75000,
"cost": 82000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.05,
"video_gold": 5300
},
"102": {
"level": 102,
"exp": 76000,
"cost": 83000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.1,
"video_gold": 5350
},
"103": {
"level": 103,
"exp": 77000,
"cost": 84000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.15,
"video_gold": 5400
},
"104": {
"level": 104,
"exp": 78000,
"cost": 85000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.2,
"video_gold": 5450
},
"105": {
"level": 105,
"exp": 79000,
"cost": 86000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.25,
"video_gold": 5500
},
"106": {
"level": 106,
"exp": 80000,
"cost": 87000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.3,
"video_gold": 5550
},
"107": {
"level": 107,
"exp": 81000,
"cost": 88000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.35,
"video_gold": 5600
},
"108": {
"level": 108,
"exp": 82000,
"cost": 89000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.4,
"video_gold": 5650
},
"109": {
"level": 109,
"exp": 83000,
"cost": 90000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.45,
"video_gold": 5700
},
"110": {
"level": 110,
"exp": 84000,
"cost": 91000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.5,
"video_gold": 5750
},
"111": {
"level": 111,
"exp": 85000,
"cost": 92000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.55,
"video_gold": 5800
},
"112": {
"level": 112,
"exp": 86000,
"cost": 93000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.6,
"video_gold": 5850
},
"113": {
"level": 113,
"exp": 87000,
"cost": 94000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.65,
"video_gold": 5900
},
"114": {
"level": 114,
"exp": 88000,
"cost": 95000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.7,
"video_gold": 5950
},
"115": {
"level": 115,
"exp": 89000,
"cost": 96000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.75,
"video_gold": 6000
},
"116": {
"level": 116,
"exp": 90000,
"cost": 97000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.8,
"video_gold": 6050
},
"117": {
"level": 117,
"exp": 91000,
"cost": 98000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.85,
"video_gold": 6100
},
"118": {
"level": 118,
"exp": 92000,
"cost": 99000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.9,
"video_gold": 6150
},
"119": {
"level": 119,
"exp": 93000,
"cost": 100000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 10.95,
"video_gold": 6200
},
"120": {
"level": 120,
"exp": 94000,
"cost": 102000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11,
"video_gold": 6250
},
"121": {
"level": 121,
"exp": 95000,
"cost": 104000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11.05,
"video_gold": 6300
},
"122": {
"level": 122,
"exp": 96000,
"cost": 106000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11.1,
"video_gold": 6350
},
"123": {
"level": 123,
"exp": 97000,
"cost": 108000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11.15,
"video_gold": 6400
},
"124": {
"level": 124,
"exp": 98000,
"cost": 110000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11.2,
"video_gold": 6450
},
"125": {
"level": 125,
"exp": 99000,
"cost": 112000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11.25,
"video_gold": 6500
},
"126": {
"level": 126,
"exp": 100000,
"cost": 114000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11.3,
"video_gold": 6550
},
"127": {
"level": 127,
"exp": 101000,
"cost": 116000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": 11.35,
"video_gold": 6600
},
"128": {
"level": 128,
"exp": 102000,
"cost": 118000,
"order_good_min": 3,
"order_good_max": 30,
"order_refresh_min": 600,
"order_refresh_max": 900,
"worker_gold": | |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from scipy.constants import pi
from scipy.special import binom
import warnings
from .utilities import _initialize_figure, _format_axes
class _BasePipe(object):
"""
Template for pipe classes.
Pipe classes inherit from this class.
Attributes
----------
borehole : Borehole object
Borehole class object of the borehole containing the U-Tube.
nPipes : int
Number of U-Tubes, equals to 1.
nInlets : int
Total number of pipe inlets, equals to 1.
nOutlets : int
Total number of pipe outlets, equals to 1.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the borehole, and both correspond to the number of
independent parallel pipes. `nSegments` is the number of discretized
segments along the borehole. `nPipes` is the number of pipes (i.e. the
number of U-tubes) in the borehole. `nDepths` is the number of depths at
which temperatures are evaluated.
"""
def __init__(self, borehole):
self.b = borehole
self.nPipes = 1
self.nInlets = 1
self.nOutlets = 1
def get_temperature(self, z, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the fluid temperatures of the borehole at a depth (z).
Parameters
----------
z : float or (nDepths,) array
Depths (in meters) to evaluate the fluid temperatures.
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
T_f : (2*nPipes,) or (nDepths, 2*nPipes,) array
Fluid temperature (in Celsius) in each pipe. The returned shape
depends on the type of the parameter `z`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
z_all = np.atleast_1d(z).flatten()
AB = list(zip(*[self.coefficients_temperature(
zi, m_flow_borehole, cp_f, nSegments) for zi in z_all]))
a_in = np.stack(AB[0], axis=-1)
a_b = np.stack(AB[1], axis=-1)
T_f = np.einsum('ijk,j->ki', a_in, np.atleast_1d(T_f_in)) \
+ np.einsum('ijk,j->ki', a_b, T_b)
# Return 1d array if z was supplied as scalar
if np.isscalar(z):
T_f = T_f.flatten()
return T_f
def get_inlet_temperature(self, Q_f, T_b, m_flow_borehole, cp_f):
"""
Returns the outlet fluid temperatures of the borehole.
Parameters
----------
Q_f : float or (nInlets,) array
Heat extraction from the fluid circuits (in Watts).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
T_in : float or (nOutlets,) array
Inlet fluid temperatures (in Celsius) into each inlet pipe. The
returned type corresponds to the type of the parameter `Q_f`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
# Build coefficient matrices
a_qf, a_b = self.coefficients_inlet_temperature(
m_flow_borehole, cp_f, nSegments)
# Evaluate outlet temperatures
T_f_in = a_qf @ np.atleast_1d(Q_f) + a_b @ T_b
# Return float if Qf was supplied as scalar
if np.isscalar(Q_f) and not np.isscalar(T_f_in):
T_f_in = T_f_in.item()
return T_f_in
def get_outlet_temperature(self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the outlet fluid temperatures of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
T_f_out : float or (nOutlets,) array
Outlet fluid temperatures (in Celsius) from each outlet pipe. The
returned type corresponds to the type of the parameter `T_f_in`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
# Build coefficient matrices
a_in, a_b = self.coefficients_outlet_temperature(
m_flow_borehole, cp_f, nSegments)
# Evaluate outlet temperatures
T_f_out = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
# Return float if Tin was supplied as scalar
if np.isscalar(T_f_in) and not np.isscalar(T_f_out):
T_f_out = T_f_out.item()
return T_f_out
def get_borehole_heat_extraction_rate(
self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the heat extraction rates of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
Q_b : float or (nSegments,) array
Heat extraction rates along each borehole segment (in Watts). The
returned type corresponds to the type of the parameter `T_b`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
a_in, a_b = self.coefficients_borehole_heat_extraction_rate(
m_flow_borehole, cp_f, nSegments)
Q_b = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
# Return float if Tb was supplied as scalar
if np.isscalar(T_b) and not np.isscalar(Q_b):
Q_b = Q_b.item()
return Q_b
def get_fluid_heat_extraction_rate(
self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the heat extraction rates of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
Q_f : float or (nOutlets,) array
Heat extraction rates from each fluid circuit (in Watts). The
returned type corresponds to the type of the parameter `T_f_in`.
"""
T_b = np.atleast_1d(T_b)
nSegments = len(T_b)
a_in, a_b = self.coefficients_fluid_heat_extraction_rate(
m_flow_borehole, cp_f, nSegments)
Q_f = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
# Return float if Tb was supplied as scalar
if np.isscalar(T_f_in) and not np.isscalar(Q_f):
Q_f = Q_f.item()
return Q_f
def get_total_heat_extraction_rate(
self, T_f_in, T_b, m_flow_borehole, cp_f):
"""
Returns the total heat extraction rate of the borehole.
Parameters
----------
T_f_in : float or (nInlets,) array
Inlet fluid temperatures (in Celsius).
T_b : float or (nSegments,) array
Borehole wall temperatures (in Celsius).
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
Q_t : float
Total net heat extraction rate of the borehole (in Watts).
"""
Q_f = self.get_fluid_heat_extraction_rate(
T_f_in, T_b, m_flow_borehole, cp_f)
Q_t = np.sum(Q_f)
return Q_t
def coefficients_inlet_temperature(self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate outlet fluid temperature.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,in}} = \\mathbf{a_{q,f}} \\mathbf{Q_{f}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_qf : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_inlet_temperature is 3
method_id = 3
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_borehole, cp_f, nSegments, method_id):
a_qf, a_b = self._get_stored_coefficients(method_id)
else:
# Coefficient matrices for fluid heat extraction rates:
# [Q_{f}] = [b_in]*[T_{f,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_fluid_heat_extraction_rate(
m_flow_borehole, cp_f, nSegments)
b_in_m1 = np.linalg.inv(b_in)
# Matrices for fluid heat extraction rates:
# [T_{f,in}] = [a_qf]*[Q_{f}] + [a_b]*[T_{b}]
a_qf = b_in_m1
a_b = -b_in_m1 @ b_b
# Store coefficients
self._set_stored_coefficients(
m_flow_borehole, cp_f, nSegments, (a_qf, a_b), method_id)
return a_qf, a_b
def coefficients_outlet_temperature(
self, m_flow_borehole, cp_f, nSegments):
"""
Build coefficient matrices to evaluate outlet fluid temperature.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,out}} = \\mathbf{a_{in}} \\mathbf{T_{f,in}}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_borehole : float or (nInlets,) array
Inlet mass flow rates (in kg/s) into the borehole.
cp_f : float or (nInlets,) array
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int
Number of borehole segments.
Returns
-------
a_in : (nOutlets, nInlets,) array
Array of coefficients for inlet fluid temperature.
a_b : (nOutlets, nSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_outlet_temperature is | |
from typing import Tuple
from NewDeclarationInQueue.processfiles.customprocess.formulars.davere import DAvere
from NewDeclarationInQueue.processfiles.customprocess.search_text_line_parameter import SearchTextLineParameter
from NewDeclarationInQueue.processfiles.customprocess.table_config_detail import TableConfigDetail
from NewDeclarationInQueue.processfiles.tableobjects.art import Art
from NewDeclarationInQueue.processfiles.tableobjects.building import Building
from NewDeclarationInQueue.processfiles.tableobjects.debt import Debt
from NewDeclarationInQueue.processfiles.tableobjects.finance import Finance
from NewDeclarationInQueue.processfiles.tableobjects.gift import Gift
from NewDeclarationInQueue.processfiles.tableobjects.income import Income
from NewDeclarationInQueue.processfiles.tableobjects.investment import Investment
from NewDeclarationInQueue.processfiles.tableobjects.mobile import Mobile
from NewDeclarationInQueue.processfiles.tableobjects.parcel import Parcel
from NewDeclarationInQueue.processfiles.tableobjects.transport import Transport
#from NewDeclarationInQueue.processfiles.customprocess.text_with_special_ch import TextWithSpecialCharacters
from NewDeclarationInQueue.processfiles.process_messages import ProcessMessages
class Davere01(DAvere):
"""
Class for a specific formular for Wealth Declaration
"""
def __init__(self, no_of_pages: int):
self.no_of_pages = no_of_pages
def get_parcels(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('parcels', tables, lambda x: Parcel(), message)
if message.has_errors() or result is not None:
json['parcels'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_buildings(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('buildings', tables, lambda x: Building(), message)
if message.has_errors() or result is not None:
json['buildings'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_transport(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('transport', tables, lambda x: Transport(), message)
if message.has_errors() or result is not None:
json['transport'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_art(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('art', tables, lambda x: Art(), message)
if message.has_errors() or result is not None:
json['art'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_mobile(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('mobile', tables, lambda x: Mobile(), message)
if message.has_errors() or result is not None:
json['mobile'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_finances(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('finances', tables, lambda x: Finance(), message)
if message.has_errors() or result is not None:
json['finance'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_investments(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('investments', tables, lambda x: Investment(), message)
if message.has_errors() or result is not None:
json['investment'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_extra_finance_info(self, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get text from a specific section
Args:
data (dict): text info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: esponse JSON for the specific object, processing messages
and the page number where the table ends
"""
lines, end_page_no = self.find_lines_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, \
'3. Alte active producatoare de venituri nete,', None, False, \
'NOTA:', None, False)
result = self.extract_lines_info_to_json(lines)
if result is not None and len(result) > 0:
json['finance_extra_info'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_debt(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_to_json('debt', tables, lambda x: Debt(), message)
if message.has_errors() or result is not None:
json['debt'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_gift(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel table is
json (dict): output JSON info
message (ProcessMessages): processing message collector
Returns:
Tuple[dict, ProcessMessages, int]: response JSON for the specific object, processing messages
and the page number where the table ends
"""
tables, message, end_page_no = self.find_table_in_document_between_lines(data['ocr_form_response'], \
n_page, self.no_of_pages, config.upper, config.lower, config.header, message)
message, result = self.extract_table_info_one_level_to_json(tables, \
config.first_level, lambda x: Gift(), message)
if message.has_errors() or result is not None:
json['gift'] = result
return json, message, (end_page_no if end_page_no > 0 else n_page)
def get_income(self, config: TableConfigDetail, data: dict, n_page: int, json: dict, message: ProcessMessages) -> Tuple[dict, ProcessMessages, int]:
"""
Get the info from the table of the specific object
Args:
data (dict): table info from the Form Recognizer service
n_page (int): page number where the parcel | |
Type of action to broadcast. Choose one, depending on what the user is about to receive: typing for text messages, upload_photo for photos, record_video or upload_video for videos, record_voice or upload_voice for voice notes, upload_document for general files, choose_sticker for stickers, find_location for location data, record_video_note or upload_video_note for video notes.
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"action": action,
}
return self.response(self.sendRequest("sendChatAction", data), bool)
def getUserProfilePhotos(self, user_id: int, offset: int = None, limit: int = None):
"""Use this method to get a list of profile pictures for a user. Returns a UserProfilePhotos object. [See Telegram API](https://core.telegram.org/bots/api#getuserprofilephotos)
- - - - -
**Args**:
- `user_id` :`int` Unique identifier of the target user
- `offset` :`int` Sequential number of the first photo to be returned. By default, all photos are returned.
- `limit` :`int` Limits the number of photos to be retrieved. Values between 1-100 are accepted. Defaults to 100.
**Returns:**
- A `tuple`, on success a `types.UserProfilePhotos` as first member and a botApiResponse object as second member
"""
data = {
"user_id": user_id,
"offset": offset,
"limit": limit,
}
return self.response(self.sendRequest("getUserProfilePhotos", data), types.UserProfilePhotos)
def getFile(self, file_id: str):
"""Use this method to get basic info about a file and prepare it for downloading. For the moment, bots can download files of up to 20MB in size. On success, a File object is returned. The file can then be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>, where <file_path> is taken from the response. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile again. [See Telegram API](https://core.telegram.org/bots/api#getfile)
- - - - -
**Args**:
- `file_id` :`str` File identifier to get info about
**Returns:**
- A `tuple`, on success a `types.File` as first member and a botApiResponse object as second member
"""
data = {
"file_id": file_id,
}
return self.response(self.sendRequest("getFile", data), types.File)
def banChatMember(self, chat_id: Union[int, str, ], user_id: int, until_date: int = None, revoke_messages: bool = None):
"""Use this method to ban a user in a group, a supergroup or a channel. In the case of supergroups and channels, the user will not be able to return to the chat on their own using invite links, etc., unless unbanned first. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#banchatmember)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target group or username of the target supergroup or channel (in the format @channelusername)
- `user_id` :`int` Unique identifier of the target user
- `until_date` :`int` Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever. Applied for supergroups and channels only.
- `revoke_messages` :`bool` Pass True to delete all messages from the chat for the user that is being removed. If False, the user will be able to see messages in the group that were sent before the user was removed. Always True for supergroups and channels.
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"user_id": user_id,
"until_date": until_date,
"revoke_messages": revoke_messages,
}
return self.response(self.sendRequest("banChatMember", data), bool)
def unbanChatMember(self, chat_id: Union[int, str, ], user_id: int, only_if_banned: bool = None):
"""Use this method to unban a previously banned user in a supergroup or channel. The user will not return to the group or channel automatically, but will be able to join via link, etc. The bot must be an administrator for this to work. By default, this method guarantees that after the call the user is not a member of the chat, but will be able to join it. So if the user is a member of the chat they will also be removed from the chat. If you don't want this, use the parameter only_if_banned. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#unbanchatmember)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target group or username of the target supergroup or channel (in the format @username)
- `user_id` :`int` Unique identifier of the target user
- `only_if_banned` :`bool` Do nothing if the user is not banned
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"user_id": user_id,
"only_if_banned": only_if_banned,
}
return self.response(self.sendRequest("unbanChatMember", data), bool)
def restrictChatMember(self, chat_id: Union[int, str, ], user_id: int, permissions: types.ChatPermissions, until_date: int = None):
"""Use this method to restrict a user in a supergroup. The bot must be an administrator in the supergroup for this to work and must have the appropriate administrator rights. Pass True for all permissions to lift restrictions from a user. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#restrictchatmember)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername)
- `user_id` :`int` Unique identifier of the target user
- `permissions` :`types.ChatPermissions` A JSON-serialized object for new user permissions
- `until_date` :`int` Date when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"chat_id": chat_id,
"user_id": user_id,
"permissions": helper.toDict(permissions, True),
"until_date": until_date,
}
return self.response(self.sendRequest("restrictChatMember", data), bool)
def promoteChatMember(self, chat_id: Union[int, str, ], user_id: int, is_anonymous: bool = None, can_manage_chat: bool = None, can_post_messages: bool = None, can_edit_messages: bool = None, can_delete_messages: bool = None, can_manage_voice_chats: bool = None, can_restrict_members: bool = None, can_promote_members: bool = None, can_change_info: bool = None, can_invite_users: bool = None, can_pin_messages: bool = None):
"""Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Pass False for all boolean parameters to demote a user. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#promotechatmember)
- - - - -
**Args**:
- `chat_id` :`Union[int,str,]` Unique identifier for the target chat or username of the target channel (in the format @channelusername)
- `user_id` :`int` Unique identifier of the target user
- `is_anonymous` :`bool` Pass True, if the administrator's presence in the chat is hidden
- `can_manage_chat` :`bool` Pass True, if the administrator can access the chat event log, chat statistics, message statistics in channels, see channel members, see anonymous administrators in supergroups and ignore slow mode. Implied by any other administrator privilege
- `can_post_messages` :`bool` Pass True, if the administrator can create channel posts, channels only
- `can_edit_messages` :`bool` Pass True, if the administrator can edit messages of other users and can pin messages, channels only
- `can_delete_messages` :`bool` Pass True, if the administrator can delete messages of other users
- `can_manage_voice_chats` :`bool` Pass True, if the administrator can manage voice chats
- `can_restrict_members` :`bool` Pass True, if the administrator can restrict, ban or unban chat members
- `can_promote_members` :`bool` Pass True, if the administrator can add new administrators with a subset of their own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him)
- `can_change_info` :`bool` Pass True, if the administrator can change chat title, photo and other settings
- `can_invite_users` :`bool` Pass True, if the administrator can invite | |
import math
import os
import itertools
from tqdm import tqdm
import IPython
import numpy as np
import sklearn.cluster
import cv2
from ..models import basnet
from ..utils.downloads import *
from ..utils.console import *
from ..utils import EasyDict
from ..image import *
from .canvas import *
default_color_labels = [
[255,0,0], [0, 255, 0], [0,0,255],
[255,255,0], [255,0,255], [255,255,0],
[0,0,0], [255,255,255], [64,0,192],
[192,64,0], [0,192,64], [192,0,64],
[64,192,0], [0,64,192]
]
def mask_to_image(mask):
mask = (255.0 * mask).astype(np.uint8)
mask_pil = Image.fromarray(mask).convert('RGB')
return mask_pil
def image_to_mask(img):
mask = np.array(img).astype(np.float32) / 255.0
return mask
def generate_mask_frames(masks, flatten_blend=False, merge=True):
masks = masks if isinstance(masks, list) else [masks]
color_labels = default_color_labels
frames = []
for mask in masks:
h, w, nc = mask.shape
mask_arr = np.zeros((h, w * nc))
for c in range(nc):
mask_arr[:, c * w:(c+1)*w] = mask[:, :, c]
if flatten_blend:
mask_arr = 0.5*(mask_arr>0.0)+0.5*(mask_arr==1.0)
if merge:
mask_sum = np.sum(mask, axis=2)
mask_norm = mask / mask_sum[:, :, np.newaxis]
mask_frame = np.sum(
[[mask_norm[:, :, c] * clr
for clr in color_labels[c%len(color_labels)]]
for c in range(nc)],
axis=0).transpose((1,2,0))
else:
mask_frame = 255 * mask if merge else 255 * mask_arr
frames.append(mask_frame)
return frames
def view_mask(masks, flatten_blend=False, merge=True, animate=True, fps=30):
masks = masks if isinstance(masks, list) else [masks]
masks = [get_mask(m, 0) if isinstance(m, dict) else m for m in masks]
animate = animate if len(masks)>1 else False
frames = generate_mask_frames(masks, flatten_blend, merge)
if animate:
return frames_to_movie(frames, fps=fps)
else:
for frame in frames:
display(frame)
def save_mask_video(filename, masks, flatten_blend=False, merge=True, fps=30):
frames = generate_mask_frames(masks, flatten_blend, merge)
clip = ImageSequenceClip(frames, fps=fps)
folder = os.path.dirname(filename)
if folder and not os.path.isdir(folder):
os.mkdir(folder)
clip.write_videofile(filename, fps=fps)
IPython.display.clear_output()
def mask_arcs(size, num_channels, center, radius, period, t, blend=0.0, inwards=False, reverse=False):
blend += 1e-8 # hack to fix bugs
(w, h), (ctr_x, ctr_y), n = size, center, num_channels
rad = radius * n
mask = np.zeros((h, w, n))
pts = np.array([[[i/(h-1.0),j/(w-1.0)] for j in range(w)] for i in range(h)])
ctr = np.array([[[ctr_y, ctr_x] for j in range(w)] for i in range(h)])
pts -= ctr
dist = (pts[:,:,0]**2 + pts[:,:,1]**2)**0.5
pct = (float(-t if inwards else period+t) / (n * period)) % 1.0
d = (dist + rad * (1.0 - pct)) % rad
for c in range(0, n):
cidx = n-c-1 if (reverse != inwards) else c
x1, x2 = radius * (n-c-1), radius * (n-c)
x1b, x2b = x1 - d, d - x2
dm = np.maximum(0, np.maximum(d-x2, x1-d))
mask[:, :, cidx] = np.clip(1.0-x1b/(blend*radius), 0, 1) * np.clip(1.0-x2b/(blend*radius), 0, 1) if blend > 0 else (np.maximum(0, np.maximum(d-x2, x1-d)) <=0) * (dist < radius)
return mask
def mask_rects(size, num_channels, p1, p2, width, period, t, blend=0.0, reverse=False):
p2 = (p2[0] + 1e-8 if p2[0]==p1[0] else p2[0],
p2[1] + 1e-8 if p2[1]==p1[1] else p2[1]) # hack to fix bugs
blend += 1e-8 # hack to fix bugs
(w, h), n = size, num_channels
mask = np.zeros((h, w, n))
length = ((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)**0.5
m1 = 1e8 if p2[0]==p1[0] else (p2[1] - p1[1]) / (p2[0]-p1[0])
b1 = p1[1] - m1 * p1[0]
m2 = -1.0 / (m1+1e-8) #9e8 if m1==0 else (1e-8 if m1==9e8 else -1.0 / m1)
pts = np.array([[[i/(h-1.0),j/(w-1.0)] for j in range(w)] for i in range(h)])
x1, y1 = pts[:,:,0], pts[:,:,1]
x_int = (y1 - m2 * x1 - b1) / (m1 - m2)
y_int = m2 * x_int + y1 - m2 * x1
isect = np.zeros(pts.shape)
isect[:,:,0] = (y1 - m2 * x1 - b1) / (m1 - m2)
isect[:,:,1] = m2 * isect[:,:,0] + y1 - m2 * x1
inside = (isect[:,:,0] >= min(p1[0], p2[0])) * (isect[:,:,0] <= max(p1[0], p2[0]))
dist = ((isect[:,:,0]-p1[0])**2 + (isect[:,:,1]-p1[1])**2)**0.5
pts_from_isect = pts - isect
dst_from_isect = ((pts_from_isect[:,:,0])**2 + (pts_from_isect[:,:,1])**2)**0.5
offset = length - length * float(t)/(n*period)
dist = (dist + offset) % length
dist_diag = (dist * inside) / length
rad = 1.0 / n
for r in range(n):
ridx = n-r-1 if reverse else r
t1, t2 = rad * (n-r-1), rad * (n-r)
t1d, t2d = t1 - dist_diag, dist_diag - t2
val = np.clip(1.0-t1d/(blend*rad), 0, 1) * np.clip(1.0-t2d/(blend*rad), 0, 1) if blend > 0 else (dist_diag >= t1)*(dist_diag<t2)
val = val.astype(np.float32)
dc = dst_from_isect - width/2.0
val *= (dst_from_isect <= width/2.0)
mask[:, :, ridx] = val
return mask
def mask_identity(size, num_channels):
w, h = size
mask = np.ones((h, w, num_channels))
return mask
def mask_interpolation(size, num_channels, period, t, blend=0.0, reverse=False, cross_fade=True):
(w, h), n = size, num_channels
mask = np.zeros((h, w, n))
idx1 = int(math.floor(t / period) % n) if period > 0 else 0
idx2 = int((idx1 + 1) % n)
if reverse:
idx1 = n-idx1-1
idx2 = n-idx2-1
pct = float(t % period) / period if period > 0 else 0
progress = min(1.0, float(1.0 - pct) / blend) if blend > 0 else (1.0 - pct)
t2 = 1.0 - progress # * progress
t1 = 1.0 - t2 if cross_fade else 1.0
mask[:, :, idx1] = t1
mask[:, :, idx2] = t2
return mask
def mask_image_manual(size, num_channels, image, thresholds, blur_k, n_dilations):
(w, h), n = size, num_channels
assert len(thresholds) == n, 'Number of thresholds doesn\'t match number of channels in mask'
mask = np.zeros((h, w, n))
img = load_image(image) if isinstance(image, str) else image
img = resize(img, size)
img = np.array(img)[:, :, ::-1]
img = crop_to_aspect_ratio(img, float(w)/h)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#img = cv2.blur(img, (blur_k, blur_k))
cumulative = np.zeros(img.shape[:2]).astype('uint8')
for channel, thresh in enumerate(thresholds):
ret, img1 = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY_INV)
img1 -= cumulative
cumulative += img1
for d in range(n_dilations):
img1 = cv2.dilate(img1, (2, 2))
ih, iw = img.shape
img1 = cv2.blur(img1, (blur_k, blur_k))
img1 = cv2.resize(img1, (w, h))
mask[:,:,channel] += img1/255.
return mask
def mask_image_auto(size, num_channels, image, blur_k):
(w, h), n = size, num_channels
mask = np.zeros((h, w, n))
img = load_image(image) if isinstance(image, str) else image
img = resize(img, size)
img = np.array(img)[:, :, ::-1]
img = crop_to_aspect_ratio(img, float(w)/h)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#img = cv2.blur(img, (blur_k, blur_k))
mask_cumulative = 255 * img.shape[0] * img.shape[1] / num_channels
cumulative = np.zeros(img.shape[0:2]).astype(np.uint8)
thresh, thresholds = 0, []
for channel in range(n):
amt_mask = 0
while amt_mask < mask_cumulative and thresh<=256:
thresh += 1
ret, img1 = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY_INV)
#img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
#cumulative = cumulative.reshape((780,1035,1))
img1 -= cumulative
amt_mask = np.sum(img1)
#print(channel, thresh, amt_mask, mask_cumulative)
cumulative += img1
img1 = cv2.blur(img1, (blur_k, blur_k))
img1 = cv2.resize(img1, (w, h))
thresholds.append(thresh)
mask[:,:,channel] += img1/255.
return mask
def mask_image_kmeans(size, num_channels, image, blur_k, n_dilations, prev_mask=None):
(w, h), n = size, num_channels
mask = np.zeros((h, w, n))
img = load_image(image) if isinstance(image, str) else image
img = resize(img, size)
img = np.array(img)[:, :, ::-1]
img = cv2.blur(img, (blur_k, blur_k))
img = crop_to_aspect_ratio(img, float(w)/h)
#img = cv2.resize(img, (w, h), cv2.INTER_NEAREST) # CHANGE
img = np.array(resize(img, (w, h)))
pixels = np.array(list(img)).reshape(h * w, 3)
clusters, assign, _ = sklearn.cluster.k_means(pixels, n, init='k-means++', random_state=3425)
if prev_mask is not None:
prev_assign = np.array(list(np.argmax(prev_mask, 2))).reshape(h * w)
assign_candidates, best_total = list(itertools.permutations(range(n))), -1
for ac in assign_candidates:
reassign = np.array([ac[a] for a in assign])
total = np.sum(reassign == prev_assign)
if total > best_total:
best_total = total
best_assign = reassign
assign = best_assign
else:
amts = [np.sum(assign==c) for c in range(n)]
order = list(reversed(sorted(range(len(amts)), key=lambda k: amts[k])))
reorder = [order.index(i) for i in range(n)]
assign = np.array([reorder[a] for a in assign])
for c in range(n):
channel_mask = np.multiply(np.ones((h*w)), assign==c).reshape((h,w))
for d in range(n_dilations):
channel_mask = cv2.dilate(channel_mask, (3, 3))
mask[:,:,c] = channel_mask
return mask
def mask_image_basnet(size, image):
(w, h), n = size, 2
mask = np.zeros((h, w, n))
img = load_image(image) if isinstance(image, str) else image
img = resize(img, size)
img = np.array(img)[:, :, ::-1]
img = crop_to_aspect_ratio(img, float(w)/h)
mask[:,:,0] = basnet.get_foreground(img)[:,:,0]/255.0
mask[:,:,1] = 1.0-mask[:,:,0]
return mask
def get_mask(mask, size=None, t=0):
m = EasyDict(mask)
if size is None and m.type != 'image':
size = m.size if 'size' in m else (512, 512)
m.num_channels = m.num_channels if 'num_channels' in m else 1
m.period = m.period if 'period' in | |
else:
plt.plot(h_min, c_min, 'k')
plt.plot(h_max, c_max, 'k')
plt.plot(h_cen, c_cen, 'g')
plt.ylabel('Complexity (C)')
plt.xlabel('Entropy (H)')
plt.plot(D.nsent[cnl], D.jscom[cnl], 'o')
for i, c in enumerate(cnl):
plt.annotate(D.clist[c], (D.nsent[c], D.jscom[c]))
plt.show()
def js_complexity(self, dnum=0, cnl=[0], d=6, bins=1, **kwargs):
self.Dlist[dnum].vkind = 'jscom'
cnum = len(self.Dlist[dnum].data) # number of cmp channels
nst = math.factorial(d) # number of possible states
bsize = int(1.0*len(self.Dlist[dnum].data[0,:])/bins)
print('For an accurate estimation of the probability, bsize {:g} should be considerably larger than nst {:g}'.format(bsize, nst))
# data dimension
self.Dlist[dnum].pi = np.zeros((cnum, nst))
self.Dlist[dnum].std = np.zeros((cnum, nst))
self.Dlist[dnum].val = np.zeros(cnum)
for i, c in enumerate(cnl):
x = self.Dlist[dnum].data[c,:]
self.Dlist[dnum].ax, self.Dlist[dnum].pi[c,:], self.Dlist[dnum].std[c,:] = st.bp_prob(x, d, bins)
self.Dlist[dnum].val[c] = st.js_complexity(self.Dlist[dnum].pi[c,:])
def ns_entropy(self, dnum=0, cnl=[0], d=6, bins=1, **kwargs):
self.Dlist[dnum].vkind = 'nsent'
cnum = len(self.Dlist[dnum].data) # number of cmp channels
nst = math.factorial(d) # number of possible states
bsize = int(1.0*len(self.Dlist[dnum].data[0,:])/bins)
print('For an accurate estimation of the probability, bsize {:g} should be considerably larger than nst {:g}'.format(bsize, nst))
# data dimension
self.Dlist[dnum].pi = np.zeros((cnum, nst))
self.Dlist[dnum].std = np.zeros((cnum, nst))
self.Dlist[dnum].val = np.zeros(cnum)
for i, c in enumerate(cnl):
x = self.Dlist[dnum].data[c,:]
self.Dlist[dnum].ax, self.Dlist[dnum].pi[c,:], self.Dlist[dnum].std[c,:] = st.bp_prob(x, d, bins)
self.Dlist[dnum].val[c] = st.ns_entropy(self.Dlist[dnum].pi[c,:])
def intermittency(self, dnum=0, cnl=[0], bins=20, overlap=0.2, qstep=0.3, fitrange=[20.0,100.0], verbose=1, **kwargs):
# intermittency parameter from multi-fractal analysis [Carreras PoP 2000]
# this ranges from 0 (mono-fractal) to 1
# add D fitting later
if 'ylimits' in kwargs: ylimits = kwargs['ylimits']
if 'xlimits' in kwargs: xlimits = kwargs['xlimits']
self.Dlist[dnum].vkind = 'intermittency'
pshot = self.Dlist[dnum].shot
cnum = len(self.Dlist[dnum].data) # number of cmp channels
self.Dlist[dnum].intmit = np.zeros(cnum)
for i, c in enumerate(cnl):
t = self.Dlist[dnum].time
x = self.Dlist[dnum].data[c,:]
self.Dlist[dnum].intmit[c] = st.intermittency(t, x, bins, overlap, qstep, fitrange, verbose, **kwargs)
############################# default plot functions ###########################
def mplot(self, dnum=1, cnl=[0], type='time', fig=None, axs=None, show=1, **kwargs):
if 'ylimits' in kwargs: ylimits = kwargs['ylimits']
if 'xlimits' in kwargs: xlimits = kwargs['xlimits']
D = self.Dlist[dnum]
fig, axs = make_axes(len(cnl), ptype='mplot', fig=fig, axs=axs, type=type)
pshot = D.shot
for i, c in enumerate(cnl):
pname = D.clist[c]
# set data
if type == 'time':
pbase = D.time
pdata = D.data[c,:]
elif type == 'val':
vkind = D.vkind
if hasattr(D, 'rname'):
rname = D.rname[c]
else:
rname = ''
# set data
if vkind in ['skewness','kurtosis']:
pdata = D.data[c,:]
elif vkind == 'hurst':
pdata = D.ers[c,:]
elif vkind in ['jscom','nsent']:
pdata = D.pi[c,:]
else:
pdata = D.val[c,:].real
# set base
if vkind in ['correlation','corr_coef']:
pbase = D.ax*1e6
elif vkind in ['cross_power','coherence','cross_phase','bicoherence']:
pbase = D.ax/1000
elif vkind in ['skewness','kurtosis']:
pbase = D.time
else:
pbase = D.ax
if type == 'time':
axs[i].plot(pbase, pdata, label='#{:d}, {:s}, [{:g},{:g}]'.format(pshot, pname, D.time[0]*1000, D.time[-1]*1000)) # plot
elif type == 'val':
axs[i].plot(pbase, pdata, '-x', label='#{:d}, {:s}-{:s}, [{:g},{:g}]'.format(pshot, rname, pname, D.time[0]*1000, D.time[-1]*1000)) # plot
if show:
axs[i].legend(fontsize='x-small')
# aux plot
if type == 'val':
if vkind == 'coherence':
axs[i].axhline(y=1/np.sqrt(D.bins), color='r')
elif vkind == 'hurst':
axs[i].plot(pbase, D.fit[c,:], 'r')
elif vkind in ['correlation','corr_coef']:
hdata = signal.hilbert(pdata)
axs[i].plot(pbase, np.abs(hdata), '--')
# xy limits
if 'ylimits' in kwargs: # ylimits
axs[i].set_ylim([ylimits[0], ylimits[1]])
if 'xlimits' in kwargs: # xlimits
axs[i].set_xlim([xlimits[0], xlimits[1]])
else:
axs[i].set_xlim([pbase[0], pbase[-1]])
# title
chpos = '({:.1f}, {:.1f})'.format(D.rpos[c]*100, D.zpos[c]*100) # [cm]
if type == 'time':
axs[i].set_title('#{:d} \n {:s} {:s}'.format(pshot, pname, chpos), fontsize=8)
elif type == 'val':
if vkind in ['skewness','kurtosis','hurst','jscom','nsent']:
axs[i].set_title('#{:d} \n {:s} {:s} \n {:s} = {:g}'.format(pshot, pname, chpos, vkind, D.val[c]), fontsize=8)
else:
axs[i].set_title('#{:d} \n {:s}-{:s} \n {:s}'.format(pshot, rname, pname, chpos), fontsize=8)
# xy scale
if type == 'val':
if vkind in ['hurst']:
axs[i].set_xscale('log')
if vkind in ['cross_power','hurst','jscom','nsent']:
axs[i].set_yscale('log')
# xy label
if type == 'time':
axs[i].set_xlabel('Time [s]')
axs[i].set_ylabel('Signal')
elif type == 'val':
if vkind in ['cross_power','coherence','cross_phase','bicoherence']:
axs[i].set_xlabel('Frequency [kHz]')
axs[i].set_ylabel(vkind)
elif vkind == 'hurst':
axs[i].set_xlabel('Time lag [us]')
axs[i].set_ylabel('R/S')
elif vkind in ['jscom','nsent']:
axs[i].set_xlabel('order number')
axs[i].set_ylabel('BP probability')
elif vkind in ['correlation','corr_coef']:
axs[i].set_xlabel('Time lag [us]')
axs[i].set_ylabel(vkind)
else:
axs[i].set_xlabel('Time [s]')
axs[i].set_ylabel('Signal')
if show == 1:
# fig.tight_layout(w_pad=0.3, h_pad=0.3) # not working properly in OMFIT. :(
plt.show()
return fig, axs
def cplot(self, dnum, snum=0, frange=[0, 100], vlimits=None, fig=None, axs=None, show=1, **kwargs):
if 'ylimits' in kwargs: ylimits = kwargs['ylimits']
if 'xlimits' in kwargs: xlimits = kwargs['xlimits']
# calculate summed coherence image
# or cross power rms image
# or group velocity image
D = self.Dlist[dnum]
vkind = D.vkind
# sample plot data
if vkind in ['cross_power','coherence','cross_phase','bicoherence']:
# axis
sbase = D.ax/1000 # [kHz]
# fidx
idx = np.where((sbase >= frange[0])*(sbase <= frange[1]))
idx1 = int(idx[0][0])
idx2 = int(idx[0][-1]+1)
sdata = D.val[snum,:]
elif vkind == 'hurst':
sbase = D.ax
sdata = D.ers[snum,:]
elif vkind in ['jscom','nsent']:
sbase = D.ax
sdata = D.pi[snum,:]
else:
sbase = D.time
sdata = D.data[snum,:]
# calculate pdata
if vkind == 'cross_power': # rms
pdata = np.sqrt(np.sum(D.val[:,idx1:idx2], 1))
elif vkind == 'coherence': # summed coherence
pdata = np.sum(D.val[:,idx1:idx2], 1)
elif vkind == 'cross_phase': # phase velocity
cnum = len(D.val)
base = D.ax[idx1:idx2] # [Hz]
pdata = np.zeros(cnum)
phase_v = np.zeros((cnum, len(base)-1))
for c in range(cnum):
data = D.val[c,idx1:idx2]
# pfit = np.polyfit(base, data, 1)
# fitdata = np.polyval(pfit, base)
# chisq = np.sum((data - fitdata)**2)
# if c == snum:
# fbase = base/1000 # [kHz]
# fdata = fitdata
# pdata[c] = 2*np.pi*D.dist[c]/pfit[0]/1000.0 # [km/s]
phase_v[c,:] = 2*np.pi*D.dist[c]/(data[1:]/base[1:])/1000.0 # [km/s]
pdata[c] = np.mean(phase_v[c,:])
D.phase_v = phase_v
else:
pdata = D.val
# remove not finite values
pidx = np.isfinite(pdata)
pdata[~pidx] = 0
# save results
D.pdata = pdata
if show == 1:
# make axes
fig, axs = make_axes(len(D.clist), ptype='cplot', fig=fig, axs=axs)
# get position to plot
rpos = D.rpos[:]
zpos = D.zpos[:]
# sample plot
axs[0].plot(sbase, sdata) # ax1.hold(True)
# if vkind == 'cross_phase':
# axs[0].plot(fbase, fdata)
if vkind in ['cross_power','coherence','cross_phase']:
axs[0].axvline(x=sbase[idx1], color='g')
axs[0].axvline(x=sbase[idx2], color='g')
if vkind in ['hurst']:
axs[0].set_xscale('log')
if vkind in ['cross_power','hurst','jscom','nsent']:
axs[0].set_yscale('log')
if 'ylimits' in kwargs: # ylimits
axs[0].set_ylim([ylimits[0], ylimits[1]])
if 'xlimits' in kwargs: # xlimits
axs[0].set_xlim([xlimits[0], xlimits[1]])
else:
axs[0].set_xlim([sbase[0], sbase[-1]])
if vkind in ['cross_power','coherence','cross_phase','bicoherence']:
axs[0].set_xlabel('Frequency [kHz]')
axs[0].set_ylabel(vkind)
elif vkind == 'hurst':
axs[0].set_xlabel('Time lag [us]')
axs[0].set_ylabel('R/S')
elif vkind in ['jscom','nsent']:
axs[0].set_xlabel('order number')
axs[0].set_ylabel('BP probability')
else:
axs[0].set_xlabel('Time [s]')
axs[0].set_ylabel('Signal')
# pdata plot
if vlimits == None:
vlimits = [np.mean(pdata) - np.std(pdata), np.mean(pdata) + np.std(pdata)]
sc = axs[1].scatter(rpos, zpos, 250, pdata, marker='s', vmin=vlimits[0], vmax=vlimits[1], cmap=CM)
axs[1].set_aspect('equal')
axs[1].margins(0.01)
# color bar
fig.colorbar(sc, cax=axs[2])
axs[1].set_xlabel('R [m]')
axs[1].set_ylabel('z [m]')
if vkind == 'cross_power':
axs[1].set_title('RMS')
elif vkind == 'coherence':
axs[1].set_title('Coherence sum')
elif vkind == 'cross_phase':
axs[1].set_title('Phase velocity [km/s]')
else:
axs[1].set_title(vkind)
# fig.tight_layout(w_pad=0.3, h_pad=0.3) # not working properly in OMFIT. :(
plt.show()
def spec(self, dnum=0, cnl=[0], nfft=512, **kwargs):
if 'flimits' in kwargs: flimits = kwargs['flimits']
if 'xlimits' in kwargs: xlimits = kwargs['xlimits']
fs = self.Dlist[dnum].fs
nov = nfft*0.9
for c in cnl:
pshot = self.Dlist[dnum].shot
pname = self.Dlist[dnum].clist[c]
pbase = self.Dlist[dnum].time
pdata = self.Dlist[dnum].data[c,:]
pxx, freq, time, cax = plt.specgram(pdata, NFFT=nfft, Fs=fs, noverlap=nov,
xextent=[pbase[0], pbase[-1]], cmap=CM, detrend='mean') # spectrum
maxP = math.log(np.amax(pxx),10)*10
minP = math.log(np.amin(pxx),10)*10
dP = maxP - minP
plt.clim([minP+dP*0.55, maxP])
plt.colorbar(cax)
if 'flimits' in kwargs: # flimits
plt.ylim([flimits[0]*1000, flimits[1]*1000])
if 'xlimits' in kwargs: # xlimits
plt.xlim([xlimits[0], xlimits[1]])
else:
plt.xlim([pbase[0], pbase[-1]])
plt.title(pname, fontsize=10) # labeling
plt.xlabel('Time [s]')
plt.ylabel('Frequency [Hz]')
plt.show()
def iplot(self, dnum, snum=0, c=None, type='time', vlimits=[-0.1, 0.1], istep=0.002, imethod='cubic', bcut=0.03, pmethod='contour', fig=None, axs=None, **kwargs):
# keyboard interactive image plot
D = self.Dlist[dnum]
if type == 'time':
pbase = D.time
elif type == 'val':
pbase = D.ax*1e+6
vkind = D.vkind
CM = plt.cm.get_cmap('RdYlBu_r')
if c == None:
c = int(input('automatic, or manual [0,1]: '))
tidx1 = 0 # starting index
if c == 0:
# make axes
fig, axs = make_axes(len(D.clist), ptype='iplot', fig=fig, axs=axs)
tstep = int(input('time step [idx]: ')) # jumping index # tstep = 10
for tidx in range(tidx1, len(pbase), tstep):
# take data and channel position
if type == 'time':
pdata = D.data[:,tidx]
psample = D.data[snum,:]
elif type == 'val':
pdata = D.val[:,tidx]
psample = D.val[snum,:]
rpos = D.rpos[:]
zpos = D.zpos[:]
# fill bad channel
pdata = ms.fill_bad_channel(pdata, rpos, zpos, D.good_channels, bcut)
# interpolation
if istep > 0:
ri, zi, pi = ms.interp_pdata(pdata, rpos, zpos, istep, imethod)
# plot
axs[0].cla()
axs[1].cla()
axs[2].cla()
plt.ion()
axs[0].plot(pbase, | |
'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.6067528326109377, 263: 0.19647265040047726, 796: 0.04759065393663096, 1685: 0}, 'stoploss': {'stoploss': -0.05394588767607611}}, # noqa: E501
'results_metrics': {'total_trades': 621, 'wins': 320, 'draws': 0, 'losses': 301, 'profit_mean': -0.043883302093397747, 'profit_median': -0.012222, 'profit_total': -0.13639474, 'profit_total_abs': -272.515306, 'holding_avg': timedelta(minutes=1691.207729468599)}, # noqa: E501
'results_explanation': ' 621 trades. Avg profit -0.44%. Total profit -0.13639474 BTC (-272.52Σ%). Avg duration 1691.2 min.', # noqa: E501
'total_profit': -0.13639474,
'current_epoch': 3,
'is_initial_point': True,
'is_best': False
}, {
'loss': 100000,
'params_dict': {'mfi-value': 13, 'fastd-value': 35, 'adx-value': 39, 'rsi-value': 29, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 54, 'sell-adx-value': 63, 'sell-rsi-value': 93, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1402, 'roi_t2': 676, 'roi_t3': 215, 'roi_p1': 0.06264755784937427, 'roi_p2': 0.14258587851894644, 'roi_p3': 0.20671291201040828, 'stoploss': -0.11818343570194478}, # noqa: E501
'params_details': {'buy': {'mfi-value': 13, 'fastd-value': 35, 'adx-value': 39, 'rsi-value': 29, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 54, 'sell-adx-value': 63, 'sell-rsi-value': 93, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.411946348378729, 215: 0.2052334363683207, 891: 0.06264755784937427, 2293: 0}, 'stoploss': {'stoploss': -0.11818343570194478}}, # noqa: E501
'results_metrics': {'total_trades': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit': 0.0, 'holding_avg': timedelta()}, # noqa: E501
'results_explanation': ' 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.', # noqa: E501
'total_profit': 0, 'current_epoch': 4, 'is_initial_point': True, 'is_best': False
}, {
'loss': 0.22195522184191518,
'params_dict': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 1269, 'roi_t2': 601, 'roi_t3': 444, 'roi_p1': 0.07280999507931168, 'roi_p2': 0.08946698095898986, 'roi_p3': 0.1454876733325284, 'stoploss': -0.18181041180901014}, # noqa: E501
'params_details': {'buy': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3077646493708299, 444: 0.16227697603830155, 1045: 0.07280999507931168, 2314: 0}, 'stoploss': {'stoploss': -0.18181041180901014}}, # noqa: E501
'results_metrics': {'total_trades': 14, 'wins': 6, 'draws': 0, 'losses': 8, 'profit_mean': -0.003539515, 'profit_median': -0.012222, 'profit_total': -0.002480140000000001, 'profit_total_abs': -4.955321, 'holding_avg': timedelta(minutes=3402.8571428571427)}, # noqa: E501
'results_explanation': ' 14 trades. Avg profit -0.35%. Total profit -0.00248014 BTC ( -4.96Σ%). Avg duration 3402.9 min.', # noqa: E501
'total_profit': -0.002480140000000001,
'current_epoch': 5,
'is_initial_point': True,
'is_best': True
}, {
'loss': 0.545315889154162,
'params_dict': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower', 'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 319, 'roi_t2': 556, 'roi_t3': 216, 'roi_p1': 0.06251955472249589, 'roi_p2': 0.11659519602202795, 'roi_p3': 0.0953744132197762, 'stoploss': -0.024551752215582423}, # noqa: E501
'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.2744891639643, 216: 0.17911475074452382, 772: 0.06251955472249589, 1091: 0}, 'stoploss': {'stoploss': -0.024551752215582423}}, # noqa: E501
'results_metrics': {'total_trades': 39, 'wins': 20, 'draws': 0, 'losses': 19, 'profit_mean': -0.0021400679487179478, 'profit_median': -0.012222, 'profit_total': -0.0041773, 'profit_total_abs': -8.346264999999997, 'holding_avg': timedelta(minutes=636.9230769230769)}, # noqa: E501
'results_explanation': ' 39 trades. Avg profit -0.21%. Total profit -0.00417730 BTC ( -8.35Σ%). Avg duration 636.9 min.', # noqa: E501
'total_profit': -0.0041773,
'current_epoch': 6,
'is_initial_point': True,
'is_best': False
}, {
'loss': 4.713497421432944,
'params_dict': {'mfi-value': 13, 'fastd-value': 41, 'adx-value': 21, 'rsi-value': 29, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower', 'sell-mfi-value': 99, 'sell-fastd-value': 60, 'sell-adx-value': 81, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 771, 'roi_t2': 620, 'roi_t3': 145, 'roi_p1': 0.0586919200378493, 'roi_p2': 0.04984118697312542, 'roi_p3': 0.37521058680247044, 'stoploss': -0.14613268022709905}, # noqa: E501
'params_details': {
'buy': {'mfi-value': 13, 'fastd-value': 41, 'adx-value': 21, 'rsi-value': 29, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 99, 'sell-fastd-value': 60, 'sell-adx-value': 81, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.4837436938134452, 145: 0.10853310701097472, 765: 0.0586919200378493, 1536: 0}, # noqa: E501
'stoploss': {'stoploss': -0.14613268022709905}}, # noqa: E501
'results_metrics': {'total_trades': 318, 'wins': 100, 'draws': 0, 'losses': 218, 'profit_mean': -0.0039833954716981146, 'profit_median': -0.012222, 'profit_total': -0.06339929, 'profit_total_abs': -126.67197600000004, 'holding_avg': timedelta(minutes=3140.377358490566)}, # noqa: E501
'results_explanation': ' 318 trades. Avg profit -0.40%. Total profit -0.06339929 BTC (-126.67Σ%). Avg duration 3140.4 min.', # noqa: E501
'total_profit': -0.06339929,
'current_epoch': 7,
'is_initial_point': True,
'is_best': False
}, {
'loss': 20.0, # noqa: E501
'params_dict': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal', 'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 1149, 'roi_t2': 375, 'roi_t3': 289, 'roi_p1': 0.05571820757172588, 'roi_p2': 0.0606240398618907, 'roi_p3': 0.1729012220156157, 'stoploss': -0.1588514289110401}, # noqa: E501
'params_details': {'buy': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.2892434694492323, 289: 0.11634224743361658, 664: 0.05571820757172588, 1813: 0}, 'stoploss': {'stoploss': -0.1588514289110401}}, # noqa: E501
'results_metrics': {'total_trades': 1, 'wins': 0, 'draws': 1, 'losses': 0, 'profit_mean': 0.0, 'profit_median': 0.0, 'profit_total': 0.0, 'profit_total_abs': 0.0, 'holding_avg': timedelta(minutes=5340.0)}, # noqa: E501
'results_explanation': ' 1 trades. Avg profit 0.00%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration 5340.0 min.', # noqa: E501
'total_profit': 0.0,
'current_epoch': 8,
'is_initial_point': True,
'is_best': False
}, {
'loss': 2.4731817780991223,
'params_dict': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1012, 'roi_t2': 584, 'roi_t3': 422, 'roi_p1': 0.036764323603472565, 'roi_p2': 0.10335480573205287, 'roi_p3': 0.10322347377503042, 'stoploss': -0.2780610808108503}, # noqa: E501
'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.2433426031105559, 422: 0.14011912933552545, 1006: 0.036764323603472565, 2018: 0}, 'stoploss': {'stoploss': -0.2780610808108503}}, # noqa: E501
'results_metrics': {'total_trades': 229, 'wins': 150, 'draws': 0, 'losses': 79, 'profit_mean': -0.0038433433624454144, 'profit_median': -0.012222, 'profit_total': -0.044050070000000004, 'profit_total_abs': -88.01256299999999, 'holding_avg': timedelta(minutes=6505.676855895196)}, # noqa: E501
'results_explanation': ' 229 trades. Avg profit -0.38%. Total profit -0.04405007 BTC ( -88.01Σ%). Avg duration 6505.7 min.', # noqa: E501
'total_profit': -0.044050070000000004, # noqa: E501
'current_epoch': 9,
'is_initial_point': True,
'is_best': False
}, {
'loss': -0.2604606005845212, # noqa: E501
'params_dict': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 792, 'roi_t2': 464, 'roi_t3': 215, 'roi_p1': 0.04594053535385903, 'roi_p2': 0.09623192684243963, 'roi_p3': 0.04428219070850663, 'stoploss': -0.16992287161634415}, # noqa: E501
'params_details': {'buy': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.18645465290480528, 215: 0.14217246219629864, 679: 0.04594053535385903, 1471: 0}, 'stoploss': {'stoploss': -0.16992287161634415}}, # noqa: E501
'results_metrics': {'total_trades': 4, 'wins': 0, 'draws': 0, 'losses': 4, 'profit_mean': 0.001080385, 'profit_median': -0.012222, 'profit_total': 0.00021629, 'profit_total_abs': 0.432154, 'holding_avg': timedelta(minutes=2850.0)}, # noqa: E501
'results_explanation': ' 4 trades. Avg profit 0.11%. Total profit 0.00021629 BTC ( 0.43Σ%). Avg duration 2850.0 min.', # noqa: E501
'total_profit': 0.00021629,
'current_epoch': 10,
'is_initial_point': True,
'is_best': True
}, {
'loss': 4.876465945994304, # noqa: E501
'params_dict': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower', 'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': | |
<gh_stars>0
# (C) Copyright 2021- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import Magics.macro as magics
from . import data, macro
from .action import action
ARROW_STYLES = ["angle", "triangle", "triangle2", "triangle3"]
class GeoMap:
"""
Class for designing and plotting geospatial maps.
Parameters
----------
area_name : str, optional
The name of a an area (e.g. 'europe') with a pre-configured projection
and extent - see examples for a sample of valid area names.
projection : str, optional
The name of the map projection to use for this map. See examples for
a sample of valid projections.
extent : tuple, optional
A four-element list/tuple containing the latitude and longitude
extents to use in the map. These must be provided in the order: lower-
left latitude, lower-left longitude, upper-right latitude, upper-right
longitude.
"""
def __init__(self, *args, **kwargs):
self._sources = []
self.queue = []
self._map(*args, **kwargs)
self._show_legend = False
def register(self, item):
if item.__class__.__name__ == "page":
self.queue = [self.queue[0]] + [item] + self.queue[1:]
else:
self.queue.append(item)
def _map(self, *args, **kwargs):
extent = kwargs.pop("extent", None)
if extent is not None:
(
kwargs["lower_left_lat"],
kwargs["lower_left_lon"],
kwargs["upper_right_lat"],
kwargs["upper_right_lon"],
) = extent
self.__map(*args, **kwargs)
@action(
macro.mmap,
{
"page_id_line": False,
"subpage_map_area_name": {"subpage_map_library_area": True},
"subpage_expand_mode": True,
"subpage_clipping": True,
},
area_name="subpage_map_area_name",
projection="subpage_map_projection",
lower_left_lat="subpage_lower_left_latitude",
lower_left_lon="subpage_lower_left_longitude",
upper_right_lat="subpage_upper_right_latitude",
upper_right_lon="subpage_upper_right_longitude",
)
def __map(self, *args, **kwargs):
pass
@action(
macro.page,
{
"page_id_line": True,
"page_id_line_logo_name": {"page_id_line_logo_plot": True},
"page_id_line_logo_plot": False,
"page_id_line_system_plot": False,
"page_id_line_date_plot": False,
"page_id_line_magics": False,
"page_id_line_errors_plot": False,
"page_id_line_colour": "charcoal",
},
text="page_id_line_user_text",
font="page_id_line_font",
font_style="page_id_line_font_style",
font_size="page_id_line_height",
font_colour="page_id_line_colour",
logo="page_id_line_logo_name",
datestamp="page_id_line_date_plot",
)
def footer(self, *args, **kwargs):
"""
Add a footer to the bottom of the plot, containing text and/or a logo.
Parameters
----------
text : str, optional
A string of text to include in the footer.
font : str, default='sansserif'
The name of the font to use for footer text.
font_style : str, default='normal'
Style options for the footer font, e.g. `'bold'`.
font_size : float, optional
The font size to use, in cm.
font_colour : str, default='charcoal'
Either a hexadecimal colour or a named colour to use for the
footer font.
logo : bool or str, optional
The name of an organisation or project whose logo should be added
to the footer. Must be one of `'ecmwf'`, `'c3s'` or `'cams'` - or
`False` if no logo should be included (default).
datestamp : bool, optional
If `True`, the date and time at which the map was generated will
be included in the footer text.
"""
pass
@action(
macro.mcoast,
{
"map_rivers": True,
"map_coastline": True,
"map_coastline_thickness": 0,
"map_grid": False,
"map_label": False,
},
resolution="map_coastline_resolution",
line_colour="map_rivers_colour",
line_style="map_rivers_style",
line_thickness="map_rivers_thickness",
)
def rivers(self, *args, **kwargs):
"""
Add rivers to the map.
Parameters
----------
resolution : str, default='low'
The resolution of the rivers to be included in the map. Must be
one of `'low'`, `'medium'` or `'high'`.
line_colour : str, default='blue'
Either a hexadecimal colour or a named colour to use for the
river lines.
line_style : str, default='solid'
One of `'solid'`, `'dash'`, `'dot'`, `'chain_dash'`, or
`'chain_dot'`.
line_thickness : float, default=1.0
The thickness of the river lines.
"""
pass
@action(
macro.mcoast,
{
"map_coastline": True,
"map_grid": False,
"map_label": False,
"map_coastline_land_shade_colour": {
"map_coastline_land_shade": True,
},
"map_coastline_sea_shade_colour": {
"map_coastline_sea_shade": True,
},
},
resolution="map_coastline_resolution",
line_colour="map_coastline_colour",
line_style="map_coastline_style",
line_thickness="map_coastline_thickness",
land_colour="map_coastline_land_shade_colour",
ocean_colour="map_coastline_sea_shade_colour",
)
def coastlines(self, *args, **kwargs):
"""
Add coastlines to the map.
Parameters
----------
resolution : str, default='low'
The resolution of the coastlines to be included in the map. Must
be one of `'low'`, `'medium'` or `'high'`.
line_colour : str, default='black'
Either a hexadecimal colour or a named colour to use for the
coastlines.
line_style : str, default='solid'
One of `'solid'`, `'dash'`, `'dot'`, `'chain_dash'`, or
`'chain_dot'`.
line_thickness : float, default=1.0
The thickness of the coastlines.
land_colour : str, optional
Either a hexadecimal colour or a named colour to use for the fill
colour of areas within coastline polygons (i.e. land).
ocean_colour : str, optional
Either a hexadecimal colour or a named colour to use for the fill
colour of areas outside coastlines (i.e. oceans).
"""
pass
@action(
macro.mcoast,
{
"map_coastline": False,
"map_grid": True,
},
lat_frequency="map_grid_latitude_increment",
lon_frequency="map_grid_longitude_increment",
lat_reference="map_grid_latitude_reference",
lon_reference="map_grid_longitude_reference",
line_colour="map_grid_colour",
line_style="map_grid_line_style",
line_thickness="map_grid_thickness",
labels="map_label",
label_font="map_label_font_style",
label_font_size="map_label_height",
label_font_colour="map_label_colour",
label_latitude_frequency="map_label_latitude_frequency",
label_longitude_frequency="map_label_longitude_frequency",
label_top_edge="map_label_top",
label_bottom_edge="map_label_bottom",
label_left_edge="map_label_left",
label_right_edge="map_label_right",
)
def gridlines(self, *args, **kwargs):
"""
Add gridlines to the map.
Parameters
----------
lat_frequency : float, default=10
The interval in degrees of latitude between each latitude grid
line.
lon_frequency : float, default=20
The interval in degrees of longitude between each longitude grid
line.
lat_reference : float, default=0
The reference/starting latitude from which to begin drawing
latitude lines at a frequency given by `lat_frequency`.
lon_reference : float, default=0
The reference/starting longitude from which to begin drawing
longitude lines at a frequency given by `lon_frequency`.
line_colour : str, default='blue'
Either a hexadecimal colour or a named colour to use for the
river lines.
line_style : str, default='solid'
One of `'solid'`, `'dash'`, `'dot'`, `'chain_dash'`, or
`'chain_dot'`.
line_thickness : float, default=1.0
The thickness of the river lines.
labels : bool, optional
If `True`, gridlines will be given latitude and longitude labels.
label_font : str, default='sansserif'
The name of the font to use for gridline label text.
label_font_size : float, optional
The font size to use, in cm.
label_font_colour : str, default='charcoal'
Either a hexadecimal colour or a named colour to use for the
gridline label font.
label_latitude_frequency : int, optional
The frequency at which to label latitude gridlines. A frequency of
1 means every latitude gridline will be labelled.
label_longitude_frequency : int, optional
The frequency at which to label longitude gridlines. A frequency
of 1 means every longitude gridline will be labelled.
label_top_edge : bool, default=True
If `True`, labels will be drawn where gridlines intersect the top
edge/border of the map.
label_bottom_edge : bool, default=True
If `True`, labels will be drawn where gridlines intersect the
bottom edge/border of the map.
label_left_edge= : bool, default=True
If `True`, labels will be drawn where gridlines intersect the
left edge/border of the map.
label_right_edge : bool, default=True
If `True`, labels will be drawn where gridlines intersect the
right edge/border of the map.
"""
pass
@action(
macro.mtext,
{
"text_colour": "charcoal",
},
text="text_lines",
text_colour="text_colour",
)
def title(self, text, **kwargs):
"""
Add a title above the map.
"""
pass
def _input(self, source, **kwargs):
source = data.detect_source(source, self)
self._sources.append(source)
source.get(**kwargs)
def _vector_input(self, *args, wind_mode="uv", **kwargs):
source = data.detect_vector_source(*args, wind_mode=wind_mode, geomap=self)
self._sources.append(source)
source.get(**kwargs)
def contour_lines(self, source, *args, style=None, **kwargs):
"""
Plot line contours on a map.
"""
self._input(source)
self._contour_lines(*args, style=style, **kwargs)
def contour_shaded(self, source, *args, style=None, **kwargs):
"""
Plot filled contours on a map.
"""
self._input(source)
self._contour_shaded(*args, style=style, **kwargs)
def arrows(
self,
*,
u=None,
v=None,
speed=None,
direction=None,
shaded=False,
style=None,
**kwargs,
):
"""
Plot arrows on a map.
"""
if all((u, v)):
self._vector_input(u, v, wind_mode="uv")
elif all((speed, direction)):
self._vector_input(speed, direction, wind_mode="sd")
else:
raise TypeError("arrows() requires u and v OR speed and direction")
method = self._arrows if not shaded else self._arrows_shaded
return self._vector(method, style=style, **kwargs)
def _vector(self, plotter, *args, **kwargs):
arrow_head = kwargs.pop("arrow_head", None)
if arrow_head is not None:
if "-" in arrow_head:
style, angle = arrow_head.split("-")
else:
style, angle = arrow_head, 45
kwargs["_arrow_shape"] = ARROW_STYLES.index(style)
kwargs["_arrow_ratio"] = int(angle) / 90
plotter(*args, **kwargs)
@action(
macro.mcont,
{
"legend": True,
"contour": False,
"contour_shade": True,
"contour_shade_method": "area_fill",
"contour_interval": {
"contour_level_selection_type": "interval",
},
"contour_level_list": {
"contour_level_selection_type": "level_list",
},
"contour_label_text": {
"contour_label_type": "text",
},
"contour_shade_palette_name": {
"contour_shade_colour_method": "palette",
},
},
levels="contour_level_list",
interval="contour_interval",
interval_reference="contour_reference_level",
dynamic_levels="contour_level_count",
fill_pattern="contour_shade_method",
dot_size="contour_shade_dot_size",
dot_max_density="contour_shade_max_level_density",
dot_min_density="contour_shade_min_level_density",
hatch_index="contour_shade_hatch_index",
hatch_thickness="contour_shade_hatch_thickness",
hatch_density="contour_shade_hatch_density",
shade_type="contour_shade_technique",
contour_method="contour_method",
legend="legend",
)
def _contour_shaded(self, *args, **kwargs):
pass
@action(
macro.mcont,
{
"legend": False,
"contour": True,
"contour_shade": False,
"contour_interval": {
"contour_level_selection_type": "interval",
},
"contour_level_list": {
"contour_level_selection_type": "level_list",
},
"contour_label_text": {
"contour_label_type": "text",
},
},
line_style=["contour_line_style", "contour_highlight_style"],
line_colour=["contour_line_colour", "contour_highlight_colour"],
line_thickness=["contour_line_thickness", "contour_highlight_thickness"],
highlight="contour_highlight",
highlight_colour="contour_highlight_colour",
highlight_style="contour_highlight_style",
highlight_thickness="contour_highlight_thickness",
highlight_frequency="contour_highlight_frequency",
levels="contour_level_list",
interval="contour_interval",
interval_reference="contour_reference_level",
dynamic_levels="contour_level_count",
labels="contour_label",
label_text="contour_label_text",
label_size="contour_label_height",
label_blanks="contour_label_blanking",
label_quality="contour_label_quality",
label_font="contour_label_font",
label_font_style="contour_label_font_style",
label_colour="contour_label_colour",
label_frequency="contour_label_frequency",
| |
mag, dir):
for id in self.mGidSensor[whichfoot]['force_vec']:
self.mFeetCanvas.delete(id)
self.mGidSensor[whichfoot]['force_vec'] = []
if self.mDataClass == BpDataClassCooked:
self._forcevector(whichfoot, mag, dir)
#--
def FeetCanvasDrawCenterOfMass(self, whichfoot, sensors):
for id in self.mGidSensor[whichfoot]['com']:
self.mFeetCanvas.delete(id)
self.mGidSensor[whichfoot]['com'] = []
self._centerofmass(whichfoot, sensors)
#--
def FeetCanvasRefresh(self):
""" Refresh the Feet canvas. """
self.FeetCanvasClearFeet()
self.FeetCanvasDrawFeet()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Gui Control Panel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#--
def CtlPanelInit(self, parent, row, column):
""" Create the Feet Control Panel.
Parameters:
parent - parent to this frame
row - grid row in parent
column - grid column in parent
Return Value:
None
"""
# the frame
cpframe = tk.Frame(parent, relief=tk.RAISED, borderwidth=1)
cpframe.grid(row=row, column=column, padx=3, ipadx=3, ipady=3,
sticky=tk.N+tk.W+tk.E)
self.mCtlPanelFrame = cpframe
row = 0
column = 0
# control panel title
w = tk.Label(cpframe, text='vKHR2 BrainPack Feet Control Panel',
fg=gt.ColorGreen1)
w.grid(row=row, column=column)
row += 1
column = 0
# subframe
subframe = tk.Frame(cpframe, relief=tk.FLAT, borderwidth=0)
subframe.grid(row=row, column=column, padx=1, pady=1, ipadx=3, ipady=3,
sticky=tk.N+tk.W+tk.E)
subrow = 0
subcol = 0
# auto/manual visual feedback
autoFiles = []
for n in [0, 1, 2, 3]:
filename = gut.GetFusionImageFileName('RalfWalking%d.gif' % n)
if filename:
autoFiles += [filename]
manFile = gut.GetFusionImageFileName('RalfWalkingMan.gif')
discFile = gut.GetFusionImageFileName('SerDisc.gif')
w = gut.ActiveImageWidget(subframe,
activesets={'disconnected':[discFile],
'automatic':autoFiles,
'manual':[manFile]},
activetag='disconnected',
period=0.25)
w.grid(row=subrow, column=subcol, sticky=tk.W)
self.mWidgetActiveImage = w
# read button
w = tk.Button(subframe, text='Read', fg=gt.ColorBlack, width=8,
command=self.CbRead)
GuiToolTip.GuiToolTip(w, text="Read the robot's current position.")
self.mButtonRead = w
subcol += 1
# column spacer
tk.Label(subframe, width=1, text=' ').grid(row=subrow, column=subcol)
# automatic/manual updates button
w = tk.Button(subframe, width=8, command=self.CbAutoMan)
self.mTtAutoManManText = "Go to Manual Feet Updates"
self.mTtAutoManAutoText = "Go to Automatic Feet Updates"
w.grid(row=subrow, column=subcol, sticky=tk.W)
self.mTtAutoMan = GuiToolTip.GuiToolTip(w, 'no tip')
self.mButtonAutoMan = w
self.CtlPanelCfgAutoMan()
subcol += 1
# draw read button
self.mButtonRead.grid(row=subrow, column=subcol, sticky=tk.W)
subcol += 1
# column spacer
tk.Label(subframe, width=2, text=' ').grid(row=subrow, column=subcol)
subcol += 1
# raw/cooked data button
w = tk.Button(subframe, fg=gt.ColorBlack, width=8, command=self.CbDataClass)
w.grid(row=subrow, column=subcol)
self.mTtDataClass = GuiToolTip.GuiToolTip(w, text="")
self.mButtonDataClass = w
subcol += 1
# column spacer
tk.Label(subframe, width=2, text=' ').grid(row=subrow, column=subcol)
subcol += 1
# clear button
w = tk.Button(subframe, text='Clear', fg=gt.ColorBlack, width=8,
command=self.CbFeetClear)
w.grid(row=subrow, column=subcol, sticky=tk.E)
GuiToolTip.GuiToolTip(w, text="Clear the feet data and visualization")
subcol += 1
# column spacer
tk.Label(subframe, width=2, text=' ').grid(row=subrow, column=subcol)
subcol += 1
# connect/disconnect button
w = tk.Button(subframe, text='Connect', width=8, fg=gt.ColorBttnGo,
command=self.CbConn)
w.grid(row=subrow, column=subcol, sticky=tk.W)
self.mTtConn= GuiToolTip.GuiToolTip(w,
text="Connect to BotSense IP Proxy Server.")
self.mButtonConn= w
subcol += 1
# close button
w = tk.Button(subframe, text='Close', fg=gt.ColorBttnStop, width=8,
activeforeground=gt.ColorBttnStop, command=self.CbClose)
w.grid(row=subrow, column=subcol, sticky=tk.W)
GuiToolTip.GuiToolTip(w, text="Close this window.")
row += 1
column = 0
# status bar width
self.update_idletasks()
sbwidth = gut.geometry(cpframe)[gut.W] - 4
if self.mFeetCanvasWidth > sbwidth:
sbwidth = self.mFeetCanvasWidth
# Status Bar
self.mStatusBar = GuiStatusBar.GuiStatusBar(cpframe,
[
{'tag': 'run_time',
'prefix': 'state:',
'max_width': 11,
'val': 'disconnected',
'tooltip': "Visualizer's run-time state."
},
{'tag': 'proxy_addr',
'prefix': 'proxy-addr:',
'max_width': 32,
'val': '',
'tooltip': "BotSense Proxy's IP address."
},
{'tag': 'proxy_port',
'prefix': 'proxy-port:',
'max_width': 5,
'val': 0,
'fmt': '%d',
'tooltip': "BotSense Proxy's TCP port."
},
{'tag': 'i2c_dev_name',
'prefix': 'proxied-device:',
'max_width': 12,
'val': '',
'tooltip': "Proxied feet I2C device name."
},
{'tag': 'foot_left_i2c',
'prefix': 'left-foot I'+gt.UniSuperscript['2']+'C:',
'max_width': 4,
'val': 0,
'fmt': '0x%02x',
'tooltip': "Left foot proxied I2C address."
},
{'tag': 'foot_right_i2c',
'prefix': 'right-foot I'+gt.UniSuperscript['2']+'C:',
'max_width': 4,
'val': 0,
'fmt': '0x%02x',
'tooltip': "Right foot proxied I2C address."
},
{'tag': 'data_class',
'prefix': 'data-class:',
'max_width': 6,
'val': 'raw',
'tooltip': "Foot data are raw values or calibrated (cooked)."
},
],
initWidth=sbwidth,
maxRows=2)
self.mStatusBar.grid(row=row, column=column, pady=3, sticky=tk.W)
# set control panel initial state
if self.mDataClass == BpDataClassRaw:
self.CtlPanelStateRaw()
else:
self.CtlPanelStateCooked()
#--
def CtlPanelCfgAutoMan(self):
""" Place control panel into automatic/manual update configuration. """
w = self.mButtonAutoMan
if self.mIsAutoMode == True:
w['text'] = 'Manual'
w['fg'] = gt.ColorBttnStop
w['activeforeground'] = gt.ColorBttnStop
self.mTtAutoMan.newtip(self.mTtAutoManManText)
self.mButtonRead['state'] = tk.DISABLED
else:
w['text'] = 'Auto'
w['fg'] = gt.ColorBttnGo
w['activeforeground'] = gt.ColorBttnGo
self.mTtAutoMan.newtip(self.mTtAutoManAutoText)
self.mButtonRead['state'] = tk.NORMAL
#--
def CtlPanelStateRaw(self):
""" Set Control Panel widgets' states to raw data class. """
self.mButtonDataClass['text'] = 'Cooked'
self.mTtDataClass.newtip("Read feet calibrated sensor data")
#--
def CtlPanelStateCooked(self):
""" Set Control Panel widgets' states to cooked data class. """
self.mButtonDataClass['text'] = 'Raw'
self.mTtDataClass.newtip("Read feet raw sensor data")
#--
def CtlPanelStateConnected(self):
""" Set Control Panel widgets' states to connected. """
self.mButtonConn['state'] = tk.NORMAL
self.mButtonConn['text'] = 'Disconnect'
self.mButtonConn['fg'] = gt.ColorBttnStop
self.mButtonConn['activeforeground'] = gt.ColorBttnStop
self.mTtConn.newtip("Disconnect to BotSense IP Proxy Server.")
#--
def CtlPanelStateDisconnected(self):
""" Set Control Panel widgets' states to disconnected. """
self.mButtonConn['state'] = tk.NORMAL
self.mButtonConn['text'] = 'Connect'
self.mButtonConn['fg'] = gt.ColorBttnGo
self.mButtonConn['activeforeground'] = gt.ColorBttnGo
self.mTtConn.newtip("Connect to BotSense IP Proxy Server.")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Gui Window Callbacks
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#--
def CbAutoMan(self):
""" Automatic/Manual Viz canvas updates toggle callback. """
if self.mIsAutoMode == True:
self.mIsAutoMode = False
else:
self.mIsAutoMode = True
self.CtlPanelCfgAutoMan()
self.WinQueueRequest('cfg', run_time="update")
#--
def CbRead(self):
""" Read feet callback. """
if self.mTestGui:
self.TestGuiSensorDataManual()
else:
self.BsSensorDataPull()
#--
def CbDataClass(self):
""" Raw/Cooked data class callback. """
if self.mDataClass == BpDataClassRaw:
newClass = BpDataClassCooked
self.CtlPanelStateCooked()
self.WinQueueRequest('cfg', data_class="cooked")
else:
newClass = BpDataClassRaw
self.CtlPanelStateRaw()
self.WinQueueRequest('cfg', data_class="raw")
self.mDataClass = newClass
self.FeetCanvasRefresh()
#--
def CbFeetClear(self):
""" Clear the viz's current set of data callback. """
self.WinQueueRequest('clear')
#--
def CbLogData(self):
""" Dump waypoints to file callback. """
ltime = time.localtime()
initialfile = 'wp_%d%02d%02d_%02d%02d%02d.py' % \
(ltime[0], ltime[1], ltime[2], ltime[3], ltime[4], ltime[5])
dlg = GuiDlgSaveAs.GuiDlgSaveAs(self, self._cbSave,
title='Save vKHR2 Waypoints As',
filetypes=[('Python data files', '*.py', 'TEXT'),
('Text files', '*.txt', 'TEXT'),
('All files', '*')],
initialfile=initialfile,
defaultextension='.py')
#--
def _cbSave(self, filename):
""" Save As callback to actually save the waypoints.
Parameters:
filename - file name to write the data
"""
fp = open(filename, 'w')
print('#', file=fp)
print('# vKHR2 Feet Waypoints', file=fp)
ltime = time.localtime()
print('# %d.%02d.%02d %02d:%02d:%02d' % \
(ltime[0], ltime[1], ltime[2], ltime[3], ltime[4], ltime[5]), file=fp)
print('#\n', file=fp)
print('# waypoint feet: (x(mm), y(mm), theta(radians))', file=fp)
print('vKHR2 = [', end='', file=fp)
n = 0
sep = ''
for p in self.mBotFeet:
if n > 0:
sep = ','
if n % 2 == 0:
sep += '\n '
else:
sep += ' '
print('%s(%.2f, %.2f, %.7f)' % (sep, p[X], p[Y], p[THETA]),
end='', file=fp)
n += 1
print('\n]', file=fp)
fp.close()
#--
def CbConn(self):
""" Connect to bsproxy window callback. """
self.mButtonConn['state'] = tk.DISABLED
if self.mBsClient.mBsIsConn:
self.WinQueueRequest('disconnect')
else:
self.WinQueueRequest('connect')
self.WinQueueRequest('cfg', run_time="update")
#--
def CbClose(self):
""" Close window callback. """
self.IVCancel()
self.destroy()
#--
def CbCfgBsProxy(self):
""" Configure bsproxy callback. """
keys = <KEY>.GetSettingNames()
last = {}
for k in keys:
if k == 'proxy_addr':
last[k] = self.mBsClient.mBsProxyAddr
elif k == 'proxy_port':
last[k] = self.mBsClient.mBsProxyPort
elif k == 'i2c_dev_name':
last[k] = self.mI2CDevName
elif k == 'bpfoot_left':
last[k] = {
'enable':self.mBpFootState['bpfoot_left'],
'i2c_addr': self.mBsClient.mBsProxiedDev['bpfoot_left']['i2c']}
elif k == 'bpfoot_right':
last[k] = {
'enable':self.mBpFootState['bpfoot_right'],
'i2c_addr': self.mBsClient.mBsProxiedDev['bpfoot_right']['i2c']}
dlg = GuiDlgKHR2Proxy.GuiDlgKHR2Proxy(self, lastSettings=last)
if dlg.result:
for k,v in dlg.result.items():
if k == 'proxy_addr' and v:
self.mBsClient.SetProxyAddr(v)
elif k == 'proxy_port' and v:
self.mBsClient.SetProxyPort(v)
elif k == 'i2c_dev_name':
self.mI2CDevName = v
elif k == 'bpfoot_left':
self.mBpFootState['bpfoot_left'] = v['enable']
self.mBsClient.mBsProxiedDev['bpfoot_left']['i2c'] = v['i2c_addr']
elif k == 'bpfoot_right':
self.mBpFootState['bpfoot_right'] = v['enable']
self.mBsClient.mBsProxiedDev['bpfoot_right']['i2c'] = v['i2c_addr']
self.FeetCanvasRefresh()
self.WinQueueRequest('cfg',
proxy_addr=self.mBsClient.mBsProxyAddr,
proxy_port=self.mBsClient.mBsProxyPort,
foot_left_i2c=self.mBsClient.mBsProxiedDev['bpfoot_left']['i2c'],
foot_right_i2c=self.mBsClient.mBsProxiedDev['bpfoot_right']['i2c'],
i2c_dev_name=self.mI2CDevName)
print("Left foot enabled = ", self.mBpFootState['bpfoot_left'])
print("Right foot enabled = ", self.mBpFootState['bpfoot_right'])
#--
def CbI2CScan(self):
if self.mBsClient.mBsIsConn:
for proxdev,proxdata in self.mBsClient.mBsProxiedDev.items():
handle | |
<reponame>SUNET/ni
# -*- coding: utf-8 -*-
__author__ = 'ffuentes'
import graphene
import norduniclient as nc
import apps.noclook.vakt.utils as sriutils
from apps.noclook import activitylog, helpers
from apps.noclook.forms import *
from apps.noclook.models import Role as RoleModel, \
DEFAULT_ROLES, DEFAULT_ROLES, DEFAULT_ROLE_KEY
from apps.noclook.schema.types import *
from django.test import RequestFactory
from django_comments.forms import CommentForm
from django_comments.models import Comment
from graphene import Field
from graphene_django.forms.mutation import DjangoModelFormMutation, BaseDjangoFormMutation
from django.core.exceptions import ObjectDoesNotExist
from binascii import Error as BinasciiError
logger = logging.getLogger(__name__)
class NIGroupMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewGroupForm
update_form = EditGroupForm
request_path = '/'
graphql_type = Group
class Meta:
abstract = False
class NIProcedureMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewProcedureForm
update_form = EditProcedureForm
request_path = '/'
graphql_type = Procedure
class Meta:
abstract = False
def empty_processor(request, form, nodehandler, relation_name):
pass
def process_works_for(request, form, nodehandler, relation_name):
if relation_name in form.cleaned_data and 'role' in form.cleaned_data and \
form.cleaned_data[relation_name] and form.cleaned_data['role']:
organization_nh = NodeHandle.objects.get(pk=form.cleaned_data[relation_name])
role_handle_id = form.cleaned_data['role']
role = RoleModel.objects.get(handle_id=role_handle_id)
helpers.set_works_for(request.user, nodehandler, organization_nh.handle_id, role.name)
def process_member_of(request, form, nodehandler, relation_name):
if relation_name in form.cleaned_data and form.cleaned_data[relation_name]:
group_nh = NodeHandle.objects.get(pk=form.cleaned_data[relation_name])
helpers.set_member_of(request.user, nodehandler, group_nh.handle_id)
def process_has_phone(request, form, nodehandler, relation_name):
if relation_name in form.cleaned_data and form.cleaned_data[relation_name]:
contact_id = form.cleaned_data[relation_name]
helpers.add_phone_contact(request.user, nodehandler, contact_id)
def process_has_email(request, form, nodehandler, relation_name):
if relation_name in form.cleaned_data and form.cleaned_data[relation_name]:
contact_id = form.cleaned_data[relation_name]
helpers.add_email_contact(request.user, nodehandler, contact_id)
def process_has_address(request, form, nodehandler, relation_name):
if relation_name in form.cleaned_data and form.cleaned_data[relation_name]:
organization_id = form.cleaned_data[relation_name]
helpers.add_address_organization(request.user, nodehandler, organization_id)
class NIPhoneMutationFactory(NIMutationFactory):
class NIMetaClass:
form = PhoneForm
request_path = '/'
graphql_type = Phone
relations_processors = {
'contact': process_has_phone,
}
property_update = ['name', 'type']
class Meta:
abstract = False
class NIEmailMutationFactory(NIMutationFactory):
class NIMetaClass:
form = EmailForm
request_path = '/'
graphql_type = Email
relations_processors = {
'contact': process_has_email,
}
property_update = ['name', 'type']
class Meta:
abstract = False
class NIAddressMutationFactory(NIMutationFactory):
class NIMetaClass:
form = AddressForm
request_path = '/'
graphql_type = Address
relations_processors = {
'organization': process_has_address,
}
property_update = ['name', 'phone', 'street', 'postal_code', \
'postal_area', 'floor', 'room']
class Meta:
abstract = False
def delete_outgoing_nodes(nodehandler, relation_name, user):
node = nodehandler.get_node()
relations = node.get_outgoing_relations()
for relname, link_nodes in relations.items():
if relname == relation_name:
for link_node in link_nodes:
link_node = link_node['node']
helpers.delete_node(user, link_node.handle_id)
class NIContactMutationFactory(NIMutationFactory):
class NIMetaClass:
form = MailPhoneContactForm
request_path = '/'
graphql_type = Contact
relations_processors = {
'relationship_works_for': process_works_for,
'relationship_member_of': process_member_of,
}
subentity_processors = {
'email': {
'form': EmailForm,
'type_slug': 'email',
'meta_type': 'Logical',
'fields': {
'id': 'email_id',
'name': 'email',
'type': 'email_type',
},
'link_method': 'add_email',
},
'phone': {
'form': PhoneForm,
'type_slug': 'phone',
'meta_type': 'Logical',
'fields': {
'id': 'phone_id',
'name': 'phone',
'type': 'phone_type',
},
'link_method': 'add_phone',
},
}
delete_nodes = {
'Has_email': delete_outgoing_nodes,
'Has_phone': delete_outgoing_nodes,
}
property_update = [
'first_name', 'last_name', 'contact_type', 'name', 'title',
'pgp_fingerprint', 'notes'
]
relay_extra_ids = ['role', ]
class Meta:
abstract = False
class CreateOrganization(CreateNIMutation):
@classmethod
def do_request(cls, request, **kwargs):
form_class = kwargs.get('form_class')
nimetaclass = getattr(cls, 'NIMetaClass')
graphql_type = getattr(nimetaclass, 'graphql_type')
nimetatype = getattr(graphql_type, 'NIMetaType')
node_type = getattr(nimetatype, 'ni_type').lower()
node_meta_type = getattr(nimetatype, 'ni_metatype').capitalize()
context_method = getattr(nimetatype, 'context_method')
has_error = False
context = context_method()
# check it can write on this context
authorized = sriutils.authorize_create_resource(request.user, context)
if not authorized:
raise GraphQLAuthException()
# Get needed data from node
if request.POST:
# replace relay ids for handle_id in contacts if present
post_data = request.POST.copy()
for field, roledict in DEFAULT_ROLES.items():
if field in post_data:
handle_id = post_data.get(field)
handle_id = relay.Node.from_global_id(handle_id)[1]
post_data.pop(field)
post_data.update({field: handle_id})
relay_extra_ids = ('relationship_parent_of', 'relationship_uses_a')
for field in relay_extra_ids:
handle_id = post_data.get(field)
if handle_id:
try:
handle_id = relay.Node.from_global_id(handle_id)[1]
post_data.pop(field)
post_data.update({field: handle_id})
except BinasciiError:
pass # the id is already in handle_id format
form = form_class(post_data)
form.strict_validation = True
if form.is_valid():
try:
nh = helpers.form_to_generic_node_handle(request, form,
node_type, node_meta_type, context)
except UniqueNodeError:
has_error = True
return has_error, [ErrorType(field="_", messages=["A {} with that name already exists.".format(node_type)])]
# Generic node update
# use property keys to avoid inserting contacts as a string property of the node
property_keys = [
'name', 'description', 'organization_id', 'type', 'incident_management_info',
'affiliation_customer', 'affiliation_end_customer', 'affiliation_provider',
'affiliation_partner', 'affiliation_host_user', 'affiliation_site_owner',
'website', 'organization_number'
]
helpers.form_update_node(request.user, nh.handle_id, form, property_keys)
nh_reload, organization = helpers.get_nh_node(nh.handle_id)
# add default context
NodeHandleContext(nodehandle=nh, context=context).save()
# specific role setting
for field, roledict in DEFAULT_ROLES.items():
if field in form.cleaned_data:
contact_id = form.cleaned_data[field]
role = RoleModel.objects.get(slug=field)
set_contact = helpers.get_contact_for_orgrole(organization.handle_id, role)
if contact_id:
if set_contact:
if set_contact.handle_id != contact_id:
helpers.unlink_contact_with_role_from_org(request.user, organization, role)
helpers.link_contact_role_for_organization(request.user, organization, contact_id, role)
else:
helpers.link_contact_role_for_organization(request.user, organization, contact_id, role)
elif set_contact:
helpers.unlink_contact_and_role_from_org(request.user, organization, set_contact.handle_id, role)
# Set child organizations
if form.cleaned_data['relationship_parent_of']:
organization_nh = NodeHandle.objects.get(handle_id=form.cleaned_data['relationship_parent_of'])
helpers.set_parent_of(request.user, organization, organization_nh.handle_id)
if form.cleaned_data['relationship_uses_a']:
procedure_nh = NodeHandle.objects.get(handle_id=form.cleaned_data['relationship_uses_a'])
helpers.set_uses_a(request.user, organization, procedure_nh.handle_id)
return has_error, { graphql_type.__name__.lower(): nh }
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
class NIMetaClass:
django_form = EditOrganizationForm
request_path = '/'
graphql_type = Organization
is_create = True
relations_processors = {
'relationship_parent_of': empty_processor,
'relationship_uses_a': empty_processor,
}
class UpdateOrganization(UpdateNIMutation):
@classmethod
def do_request(cls, request, **kwargs):
form_class = kwargs.get('form_class')
nimetaclass = getattr(cls, 'NIMetaClass')
graphql_type = getattr(nimetaclass, 'graphql_type')
nimetatype = getattr(graphql_type, 'NIMetaType')
node_type = getattr(nimetatype, 'ni_type').lower()
node_meta_type = getattr(nimetatype, 'ni_metatype').capitalize()
id = request.POST.get('id')
has_error = False
# check authorization
handle_id = relay.Node.from_global_id(id)[1]
authorized = sriutils.authorice_write_resource(request.user, handle_id)
if not authorized:
raise GraphQLAuthException()
# Get needed data from node
nh, organization = helpers.get_nh_node(handle_id)
relations = organization.get_relations()
out_relations = organization.get_outgoing_relations()
if request.POST:
# set handle_id into POST data and remove relay id
post_data = request.POST.copy()
post_data.pop('id')
post_data.update({'handle_id': handle_id})
# replace relay ids for handle_id in contacts if present
for field, roledict in DEFAULT_ROLES.items():
if field in post_data:
handle_id = post_data.get(field)
handle_id = relay.Node.from_global_id(handle_id)[1]
post_data.pop(field)
post_data.update({field: handle_id})
relay_extra_ids = ('relationship_parent_of', 'relationship_uses_a')
for field in relay_extra_ids:
handle_id = post_data.get(field)
if handle_id:
handle_id = relay.Node.from_global_id(handle_id)[1]
post_data.pop(field)
post_data.update({field: handle_id})
form = form_class(post_data)
form.strict_validation = True
if form.is_valid():
# Generic node update
# use property keys to avoid inserting contacts as a string property of the node
property_keys = [
'name', 'description', 'organization_id', 'type', 'incident_management_info',
'affiliation_customer', 'affiliation_end_customer', 'affiliation_provider',
'affiliation_partner', 'affiliation_host_user', 'affiliation_site_owner',
'website', 'organization_number'
]
helpers.form_update_node(request.user, organization.handle_id, form, property_keys)
# specific role setting
for field, roledict in DEFAULT_ROLES.items():
if field in form.cleaned_data:
contact_id = form.cleaned_data[field]
role = RoleModel.objects.get(slug=field)
set_contact = helpers.get_contact_for_orgrole(organization.handle_id, role)
if contact_id:
if set_contact:
if set_contact.handle_id != contact_id:
helpers.unlink_contact_with_role_from_org(request.user, organization, role)
helpers.link_contact_role_for_organization(request.user, organization, contact_id, role)
else:
helpers.link_contact_role_for_organization(request.user, organization, contact_id, role)
elif set_contact:
helpers.unlink_contact_and_role_from_org(request.user, organization, set_contact.handle_id, role)
# Set child organizations
if form.cleaned_data['relationship_parent_of']:
organization_nh = NodeHandle.objects.get(handle_id=form.cleaned_data['relationship_parent_of'])
helpers.set_parent_of(request.user, organization, organization_nh.handle_id)
if form.cleaned_data['relationship_uses_a']:
procedure_nh = NodeHandle.objects.get(handle_id=form.cleaned_data['relationship_uses_a'])
helpers.set_uses_a(request.user, organization, procedure_nh.handle_id)
return has_error, { graphql_type.__name__.lower(): nh }
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
else:
# get the errors and return them
has_error = True
errordict = cls.format_error_array(form.errors)
return has_error, errordict
class NIMetaClass:
django_form = EditOrganizationForm
request_path = '/'
graphql_type = Organization
class NIOrganizationMutationFactory(NIMutationFactory):
class NIMetaClass:
create_form = NewOrganizationForm
update_form = EditOrganizationForm
request_path = '/'
graphql_type = Organization
# create_include or create_exclude
delete_nodes = {
'Has_address': delete_outgoing_nodes,
}
manual_create = CreateOrganization
manual_update = UpdateOrganization
class Meta:
abstract = False
## add the create and update manual mutations for Organization
## before its composite mutation is built, and after its factory is built
Organization.set_create_mutation(CreateOrganization)
Organization.set_update_mutation(UpdateOrganization)
class CreateRole(DjangoModelFormMutation):
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
context = sriutils.get_community_context()
# check it can write on this context
authorized = sriutils.authorize_create_resource(info.context.user, context)
if not authorized:
raise GraphQLAuthException()
form = cls.get_form(root, info, **input)
if form.is_valid():
return cls.perform_mutate(form, info)
else:
errors = [
ErrorType(field=key, messages=value)
for key, value in form.errors.items()
]
return cls(errors=errors)
class Meta:
form_class = NewRoleForm
class UpdateRole(DjangoModelFormMutation):
class Input:
id = graphene.ID(required=True)
@classmethod
def get_form_kwargs(cls, root, info, **input):
context = sriutils.get_community_context()
# check it can write on this context
authorized = sriutils.authorize_create_resource(info.context.user, context)
if not authorized:
raise GraphQLAuthException()
kwargs = {"data": input}
id = input.pop("id", None)
handle_id = relay.Node.from_global_id(id)[1]
if handle_id:
instance = cls._meta.model._default_manager.get(pk=handle_id)
kwargs["instance"] = instance
return kwargs
class Meta:
form_class = EditRoleForm
class DeleteRole(relay.ClientIDMutation):
class Input:
id = graphene.ID(required=True)
success = graphene.Boolean(required=True)
id = graphene.ID(required=True)
@classmethod
def mutate_and_get_payload(cls, root, info, **input):
id = input.get("id", None)
handle_id = relay.Node.from_global_id(id)[1]
success = False
context = sriutils.get_community_context()
# check it can write on this context
authorized = sriutils.authorize_create_resource(info.context.user, context)
if not authorized:
raise GraphQLAuthException()
try:
role = RoleModel.objects.get(handle_id=handle_id)
role.delete()
success = True
except ObjectDoesNotExist:
success = False
return DeleteRole(success=success, id=id)
## Necesary for the CompositeContactMutation
class | |
'geologic',
'geology',
'geometry',
'gland',
'glass',
'glucose',
'glycine',
'glycines',
'glycosylamine',
'glycosylamines',
'gold',
'growth',
'guild',
'gαs',
'haematopoiesis',
'health',
'heat',
'helix',
'hematological',
'hematopoiesis',
'hematoxylin',
'heredity',
'heterokont',
'heterokontae',
'heterokonts',
'heterolobosea',
'highly-digestible',
'histocompatibility',
'histology',
'histopathology',
'hormone',
'horse',
'human',
'humanism',
'humanities',
'humans',
'hushing',
'hydrogen',
'hydrolysis',
'hydroxyl',
'hypotheses',
'hypothesis',
'idarubicin',
'idea',
'il-15rα',
'illness',
'image',
'images',
'imidate',
'imide',
'imine',
'imines',
'immunology',
'immunotherapy',
'individual',
'inductor',
'inductors',
'infection',
'inflammation',
'informal',
'information',
'infrared',
'infrastructure',
'ingestion',
'injuries',
'injury',
'inorganic',
'insect',
'insects',
'instrument',
'instrumentalism',
'instrumentation',
'interact',
'interacting',
'interaction',
'interdisciplinarity',
'interleukin',
'interleukin-2',
'investigative',
'investment',
'ion',
'ions',
'iron',
'irritability',
'irritants',
'irritation',
'iso',
'isobutanol',
'isobutyl',
'isomer',
'isomerism',
'isomers',
'jaligner',
'kinase',
'knowledge',
'lame',
'lanthanide',
'lanthanides',
'large-scale',
'larva',
'larval',
'laser',
'lead',
'leads',
'leather',
'legume',
'legumin',
'lentil',
'lentils',
'levacetylmethadol',
'lice',
'life',
'ligand',
'ligase',
'light',
'linguistics',
'lipid',
'lipids',
'liquid',
'liver',
'lives',
'livestock',
'living',
'lobosa',
'lobose',
'louse',
'love',
'lustre',
'lymphocyte',
'lymphocytes',
'lysis',
'machine',
'macromolecule',
'macromolecules',
'mail',
'malignancy',
'mammal',
'mammals',
'maser',
'mass',
'material',
'materialism',
'mathematics',
'matter',
'measure',
'measurement',
'meat',
'mechanics',
'mechanisms',
'medal',
'medication',
'medicine',
'melanoma',
'membrane',
'metabolic',
'metabolism',
'metabolite',
'metabolites',
'metal',
'metallicity',
'methabol',
'methadol',
'methane',
'methanol',
'methionine',
'methodology',
'microfiltration',
'microorganism',
'microorganisms',
'microscope',
'microscopic',
'microscopy',
'microtubule',
'microtubules',
'milk',
'mineral',
'mining',
'mitochondria',
'mitochondrial',
'mitosis',
'mitoxantrone',
'mohism',
'mold',
'molds',
'molecule',
'molecules',
'monism',
'monitoring',
'monomer',
'monomers',
'monosaccharide',
'monosaccharides',
'morphology',
'motility',
'multi-subunit',
'muscle',
'mushroom',
'mushrooms',
'name',
'natural',
'nature',
'naturism',
'nbpf15',
'neoplasm',
'neoplasms',
'neutron',
'nitrate',
'nitrite',
'nitrogen',
'nuclease',
'nucleobase',
'nucleobases',
'nucleoside',
'nucleosides',
'nucleotide',
'nucleotides',
'nudism',
'nudist',
'nudity',
'numb',
'number',
'nutrient',
'nutrients',
'obesity',
'object',
'objects',
'observation',
'odor',
'odorless',
'officer',
'offspring',
'olfaction',
'opiod',
'opticks',
'optics',
'ore',
'organ',
'organelle',
'organelles',
'organisation',
'organised',
'organism',
'organisms',
'organization',
'organizing',
'oxidant',
'oxidation',
'oxide',
'oxides',
'oxidized',
'oxidizer',
'oxidoreductase',
'oxoacid',
'oxygen',
'oxymetholone',
'pain',
'painkiller',
'pancreas',
'pancreatitis',
'parasitic',
'parasitism',
'parotitis',
'part',
'particle',
'patent',
'pathogen',
'pathogens',
'pathology',
'pd-1',
'pd-l1',
'pea',
'peas',
'pediculosis',
'pelvis',
'peptide',
'peptides',
'peptidomimetic',
'peptidomimetics',
'peptoid',
'percolozoa',
'period',
'periodization',
'ph',
'pharmacodynamic',
'pharmacodynamics',
'pharmacology',
'pharmacy',
'phenomena',
'phenomenalism',
'phenomenon',
'phenotype',
'phenotypes',
'phosphate',
'phosphoprotein',
'phosphoproteins',
'phosphorus',
'phosphorylation',
'phosphotransferase',
'photon',
'photosynthesis',
'phototransduction',
'phylum',
'physics',
'physiology',
'planet',
'planets',
'plant',
'plants',
'planula',
'plastid',
'plastids',
'pleasure',
'ploidy',
'poison',
'polymer',
'polymerase',
'polymerization',
'polynucleotide',
'polyphyly',
'prediction',
'predictions',
'price',
'primate',
'process',
'processes',
'processivity',
'profession',
'program',
'programmer',
'programming',
'prokaryote',
'prokaryotes',
'prokaryotic',
'promotion',
'prostate',
'protein',
'proteins',
'proteobacteria',
'proteome',
'proteomics',
'proteopathy',
'proteostasis',
'proteotoxicity',
'proteus',
'protist',
'proton',
'protonated',
'protonation',
'protons',
'protozoa',
'protozoan',
'protozoans',
'pseudopod',
'pseudopodia',
'pseudopods',
'psychology',
'qmail',
'quantity',
'radiation',
'reaction',
'receptor',
'redox',
'repeated',
'report',
'reproduction',
'reptation',
'residue',
'resonance',
'resonant',
'response',
'result',
'retinoid',
'rhodopsin',
'ribonuclease',
'ribonucleases',
'ribonucleoprotein',
'rli-15',
'rna',
'roton',
'rumen',
'ruminant',
'ruminants',
'rush',
'sample',
'sandpaper',
'scale',
'scalp',
'schizophrenia',
'science',
'scientific',
'sediment',
'seed',
'sequence',
'sequencing',
'series',
'serine',
'sex',
'silurian',
'situation',
'skeleton',
'slide',
'societies',
'society',
'sociology',
'software',
'solute',
'solution',
'solvent',
'space',
'species',
'specimen',
'spectroscopy',
'sperm',
'spermatozoa',
'spermatozoon',
'stain',
'standard',
'standardization',
'star',
'starch',
'stars',
'state',
'stereochemistry',
'stereoisomerism',
'steroid',
'stomach',
'structuralism',
'structure',
'study',
'subphylum',
'substance',
'subtribe',
'sucrase',
'sucrose',
'sugar',
'sugaring',
'superagonist',
'symbiosis',
'symbol',
'symbols',
'system',
'systems',
'taxonomy',
'technology',
'temperature',
'temperatures',
'testosterone',
'thermal',
'thermodynamics',
'thing',
'tide',
'time',
'tissue',
'tobymac',
'topic',
'toxic',
'toxicant',
'toxicants',
'toxicity',
'toxin',
'toxins',
'trace',
'transaminase',
'transaminases',
'transamination',
'transcriptome',
'transduction',
'treatment',
'trna',
'troubleshoot',
'troubleshooting',
'tubulin',
'tumor',
'ultrasound',
'ungulate',
'unit',
'universe',
'universities',
'university',
'urosporidae',
'ustilaginomycetes',
'ustilaginomycotina',
'vaccination',
'vaccine',
'vacuolaria',
'vacuole',
'vahlkampfiidae',
'valrubicin',
'vampyrellidea',
'vannellidae',
'vannellina',
'variosea',
'varipodida',
'varisulca',
'vaucheriales',
'vault',
'velocity',
'vermamoebidae',
'verrucomicrobia',
'vertebra',
'vertebrate',
'vertebrates',
'vetches',
'vexilliferidae',
'vicia',
'virtue',
'virulence',
'virus',
'viruses',
'voromonadidae',
'voromonas',
'vorticella',
'was',
'wasp',
'wasps',
'water',
'water-insoluble',
'wavelength',
'wavelengths',
'wax',
'way',
'wool',
'world',
'xenacoelomorpha',
'xenoturbella',
'xenoturbellid',
'xenoturbellida',
'xenoturbellids',
'xylonomycetes',
'yeast',
'yeasts',
'zosterophyll',
'zosterophyllopsida',
'zygnematophyceae',
'zygocystinae',
'zygomycota',
'Α-methylfentanyl',
'Γ-oryzanol',
'α-methylfentanyl',
'γ-oryzanol'],
'gram-2': [ '4-aminobutyrate Transaminase',
'4-aminobutyrate transaminase',
'A Amoebozoa',
'A Diverse',
'A Parasites',
'Abbreviated Dialing',
'Abnormal Growth',
'Abnormal Posturing',
'Acetone–butanol–ethanol Fermentation',
'Acetyl Group',
'Acidic Compound',
'Acidic Oxide',
'Acidity Regulator',
'Active Site',
'Acyl Group',
'Adenylate Kinase',
'Adherence Medicine',
'Aerobic Respiration',
'Agricultural Robot',
'Alkyl Nitrite',
'Alkyl Nitrites',
'Alkylating Agent',
'Alternative Splicing',
'Alveolates Isolated',
'Amino Acid',
'Amoeboid Movement',
'An Important',
'Anabolic Steroid',
'Anaerobic Respiration',
'Analytical Technique',
'Ancient History',
'Anoxygenic Photosynthesis',
'Anti-Metastatic Activity',
'Anti-Pd1 Therapy',
'Anti-metastatic Activity',
'Anti-pd1 Therapy',
'Antibody-drug conjugate',
'Antigen-presenting Cell',
'Any Economy',
'Aspartate Kinase',
'Aspartate Transaminase',
'Atomic Mass',
'Atp Synthase',
'Auto Racing',
'B Cell',
'B16 Melanoma',
'Bacterial Display',
'Bacterial Transcription',
'Balantidium Coli',
'Base Pair',
'Basic Structural',
'Bechdel Test',
'Beef Aging',
'Bell-Shaped Ciliates',
'Bell-shaped Ciliates',
'Benign Cancer',
'<NAME>',
'Beta Sheet',
'Binding Site',
'Biological Cell',
'Biological Membrane',
'Biological Process',
'Biological System',
'Biological Warfare',
'Biopharmaceutical Drug',
'Blood Cell',
'Blood Sample',
'Body Fluid',
'Body Water',
'Bonding Electron',
'Both Proteins',
'Bow Down',
'Brand Drug',
'Brand Price',
'Breaking Down',
'Breast Cancer',
'British Firms',
'Cancer Cell',
'Cancer Detection',
'Cancer Immunotherapy',
'Cancer Model',
'Cancer Treatment',
'Carbohydrate Metabolism',
'Cartesian Coordinates',
'Catalytic Α',
'Catalytic α',
'Cdna Library',
'Cell Adhesion',
'Cell Biology',
'Cell Component',
'Cell Culture',
'Cell Cycle',
'Cell Membrane',
'Cell Nucleus',
'Cellular Compartment',
'Cellular Compartments',
'Chemical Bond',
'Chemical Compound',
'Chemical Conformation',
'Chemical Element',
'Chemical Elements',
'Chemical Energy',
'Chemical Formula',
'Chemical Reaction',
'Chemical Structure',
'Chemical Structures',
'Chemical Substance',
'Chemotherapeutic Agent',
'Chloroplast DNA',
'Cis-regulatory Element',
'Cis–trans Isomerism',
'Clinical Sample',
'Clinical Study',
'Colon Cancer',
'Colon Carcinoma',
'Colorectal Cancer',
'Commonly Prescribed',
'Compact Identifier',
'Computer Programming',
'Computer Science',
'Conformational Change',
'Conformational Isomerism',
'Connective Tissue',
'Conserved Sequence',
'Coordinate System',
'Coordination Complex',
'Crab-eating Macaque',
'Cribriform Morphology',
'Cross-coupling Reaction',
'Crude Lysate',
'Cryo-Electron Microscopy',
'Cryo-electron Microscopy',
'Cryoelectron Microscopy',
'Crystalline Silicon',
'Cytoskeletal Disruptor',
'Cytotoxic Activity',
'D Nits',
'Daytona Prototype',
'Designing Women',
'Diagram Commonly',
'Dichroic Glass',
'Dietary Fiber',
'Dihedral Angle',
'Dirigent Protein',
'Disposition Or',
'Dissociation Constant',
'Distributed Computing',
'Disulfide Bond',
'Dna Polymerase',
'Dna Repair',
'Dna Replication',
'Dna Sequence',
'Dna Sequencer',
'Dna Sequencing',
'Dna-binding Protein',
'Double Bond',
'Drug Development',
'Dual-polarization Interferometry',
'Ductal Morphology',
'Edible Legume',
'Eicosanoid Metabolism',
'Electric Charge',
'Electrically Neutral',
'Electromagnetic Radiation',
'Electron Crystallography',
'Elemental Analysis',
'Emulsion Polymerization',
'Endocrine System',
'Endoplasmic Reticulum',
'Enzyme Assay',
'Enzyme Catalysis',
'Enzyme Found',
'Enzyme Kinetics',
'Essential Amino',
'Ethanol Fermentation',
'Ethanol Metabolism',
'Eukaryotic Organelle',
'Eukaryotic Transcription',
'Eusocial Insects',
'Evolutionary Psychology',
'Excitatory Ability',
'Exocrine Gland',
'Experimental System',
'Extinct Plants',
'Fatty Acid',
'Fatty Acids',
'Flowering Plant',
'For Molecules',
'Four Basic',
'Fusion Power',
'Fusion Protein',
'Gene Structure',
'Generic Drug',
'Genetic Disorder',
'Genome Sequencing',
'Geologic Period',
'Geological Period',
'Gland Structure',
'Globular Protein',
'Globular Proteins',
'Glycosidic Bond',
'Grading System',
'Gram-Negative Proteobacteria',
'Gram-negative Bacteria',
'Gram-negative Proteobacteria',
'Green Algae',
'Hair Removal',
'Haploid Cell',
'Health Science',
'Healthy Cell',
'Hexapod Invertebrates',
'Histologic Stain',
'Histopathological Image',
'Hydroxyl Radical',
'Il-15 Receptor',
'Image Analysis',
'Immune Response',
'Immune System',
'In Vivo',
'Individual Entity',
'Infectious Disease',
'Inflammatory Chemokines',
'Inorganic Compound',
'Interdisciplinary Field',
'Interleukin 15',
'Interleukin 2',
'Interleukin-15 Receptor',
'Intermediate End',
'Investigative Journalism',
'Iso 6344',
'Iso 8601',
'Isobutyl Nitrite',
'Isoelectric Focusing',
'Large Biomolecules',
'Life-Sustaining Chemical',
'Life-sustaining Chemical',
'Lipid Bilayer',
'Lipid Layer',
'Lung Cancer',
'Lymph Node',
'Mail Server',
'Mainly Multicellular',
'Male Gamete',
'Malignant Cancer',
'Malignant Cell',
'Malignant Cells',
'Meat Glue',
'Medical Device',
'Medical Devices',
'Medical Image',
'Medical Study',
'Membrane-Bound Organelle',
'Mental Disorder',
'Mental Illness',
'Metabolic Disorder',
'Metabolic Pathway',
'Metabolic Pathways',
'Method In',
'Method in',
'Microscopic Organism',
'Microscopic Scale',
'Mitochondrial Dna',
'Molecular Cloning',
'Molecular Motor',
'Motor Protein',
'Mouse Model',
'Multicelluar Organism',
'Multicellular Organism',
'Muon-catalyzed Fusion',
'Native Metal',
'Natural Philosophy',
'Natural Science',
'Neuroendocrine Tumor',
'Noncoding Dna',
'Nudist Paradise',
'Object-oriented Programming',
'Optical Microscopy',
'Ordinal Data',
'Organ Only',
'Organic Compound',
'Organisation Todt',
'Organometallic Chemistry',
'Organometallic Compounds',
'Oxidation State',
'Oxidized Cellulose',
'Parasitic Alveolates',
'Parasitic Disease',
'Parasitic Diseases',
'Parasitic Twin',
'Patent Expiration',
'Pathogenic Bacteria',
'Patient Adherence',
'Patient Monitoring',
'Periodic Table',
'Pharmaceutical Drug',
'Pharmaceutical Formulation',
'Pharmaceutical Science',
'Pharmaceutical Sciences',
'Phosphorus Monoxide',
'Phosphoryl Group',
'Phylogenetic Tree',
'Physical Object',
'Physical Organization',
'Planet Earth',
'Polymeric Molecule',
'Post-transcriptional Modification',
'Preserved Remains',
'Price Decrease',
'Price Increase',
'Problem Solving',
'Product Promotion',
'Prokaryotic Microorganism',
'Prokaryotic Translation',
'Prostate Cancer',
'Prostate Cell',
'Prostate Histopathology',
'Prostate Pathology',
'Prostatectomy Specimen',
'Protective Response',
'Protein Domain',
'Protein Engineering',
'Protozoan Infection',
'Pyrococcus Furiosus',
'Quantitative Property',
'Quantitative Research',
'Quantum Mechanics',
'Quaternary Structure',
'Regulated Market',
'Reporter Gene',
'Reproductive System',
'Research Investment',
'Resonant Converter',
'Restriction Enzyme',
'Reverse Transcription',
'Ribbon Diagram',
'Ribonuclease A',
'Ribonuclease Inhibitor',
'Ribonucleoprotein Particle',
'Rna Molecule',
'Rna Splicing',
'Ruled Surface',
'Scanning Machine',
'Scientific Instrument',
'Scientific Method',
'Segregating Organisms',
'Selective Barrier',
'Sensory Transduction',
'Sentence Analysis',
'Sex Organ',
'Simplest Enol',
'Skin Cancer',
'Small Protein',
'Smallest Constituent',
'Soft Tissue',
'Solid Material',
'Specialized Subunit',
'Spherical Proteins',
'Steroid Hormone',
'Structural Formula',
'Subatomic Particle',
'Sulfur Oxoacid',
'Summarizes Commonly',
'Sushi Domain',
'Systematic Empirical',
'Systematic Enterprise',
'T Cell',
'Tabular Display',
'Taxonomic Rank',
'Temporary Arm-Like',
'Temporary | |
O0OooO0oo = lisp_process_api_database_mapping ( )
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
O0OooO0oo = json . dumps ( O0OooO0oo )
Oooo000 = lisp_api_ipc ( process , O0OooO0oo )
lisp_ipc ( Oooo000 , lisp_socket , "lisp-core" )
return
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
def lisp_process_api_map_cache ( mc , data ) :
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 33 - 33: II111iiii
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 61 - 61: I1Ii111
if 56 - 56: I1ii11iIi11i - OoooooooOO
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 31 - 31: Oo0Ooo * IiII / IiII
if 3 - 3: I1Ii111
if 65 - 65: iIii1I11I1II1 % Oo0Ooo % I11i / OoooooooOO
if 82 - 82: o0oOOo0O0Ooo
if 33 - 33: OoOoOO00 / i11iIiiIii - I1IiiI - OoooooooOO + i1IIi * I1Ii111
if 92 - 92: iII111i + OoO0O00
if 70 - 70: iIii1I11I1II1
def lisp_gather_map_cache_data ( mc , data ) :
o0Iiii = { }
o0Iiii [ "instance-id" ] = str ( mc . eid . instance_id )
o0Iiii [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
o0Iiii [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
o0Iiii [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
o0Iiii [ "expires" ] = lisp_print_elapsed ( mc . uptime )
o0Iiii [ "action" ] = lisp_map_reply_action_string [ mc . action ]
o0Iiii [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
oOo0oOOOoOoo = [ ]
for i1IIIIi1Ii111 in mc . rloc_set :
iIOoo000 = { }
if ( i1IIIIi1Ii111 . rloc_exists ( ) ) :
iIOoo000 [ "address" ] = i1IIIIi1Ii111 . rloc . print_address_no_iid ( )
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
if 91 - 91: I11i
if ( i1IIIIi1Ii111 . translated_port != 0 ) :
iIOoo000 [ "encap-port" ] = str ( i1IIIIi1Ii111 . translated_port )
if 54 - 54: I1ii11iIi11i / i1IIi
iIOoo000 [ "state" ] = i1IIIIi1Ii111 . print_state ( )
if ( i1IIIIi1Ii111 . geo ) : iIOoo000 [ "geo" ] = i1IIIIi1Ii111 . geo . print_geo ( )
if ( i1IIIIi1Ii111 . elp ) : iIOoo000 [ "elp" ] = i1IIIIi1Ii111 . elp . print_elp ( False )
if ( i1IIIIi1Ii111 . rle ) : iIOoo000 [ "rle" ] = i1IIIIi1Ii111 . rle . print_rle ( False )
if ( i1IIIIi1Ii111 . json ) : iIOoo000 [ "json" ] = i1IIIIi1Ii111 . json . print_json ( False )
if ( i1IIIIi1Ii111 . rloc_name ) : iIOoo000 [ "rloc-name" ] = i1IIIIi1Ii111 . rloc_name
oO000O0oooOo = i1IIIIi1Ii111 . stats . get_stats ( False , False )
if ( oO000O0oooOo ) : iIOoo000 [ "stats" ] = oO000O0oooOo
iIOoo000 [ "uptime" ] = lisp_print_elapsed ( i1IIIIi1Ii111 . uptime )
iIOoo000 [ "upriority" ] = str ( i1IIIIi1Ii111 . priority )
iIOoo000 [ "uweight" ] = str ( i1IIIIi1Ii111 . weight )
iIOoo000 [ "mpriority" ] = str ( i1IIIIi1Ii111 . mpriority )
iIOoo000 [ "mweight" ] = str ( i1IIIIi1Ii111 . mweight )
Iii111111I1I = i1IIIIi1Ii111 . last_rloc_probe_reply
if ( Iii111111I1I ) :
iIOoo000 [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( Iii111111I1I )
iIOoo000 [ "rloc-probe-rtt" ] = str ( i1IIIIi1Ii111 . rloc_probe_rtt )
if 14 - 14: o0oOOo0O0Ooo / O0 - iIii1I11I1II1
iIOoo000 [ "rloc-hop-count" ] = i1IIIIi1Ii111 . rloc_probe_hops
iIOoo000 [ "recent-rloc-hop-counts" ] = i1IIIIi1Ii111 . recent_rloc_probe_hops
if 88 - 88: OoooooooOO
i111i111i = [ ]
for iiiI1i1I in i1IIIIi1Ii111 . recent_rloc_probe_rtts : i111i111i . append ( str ( iiiI1i1I ) )
iIOoo000 [ "recent-rloc-probe-rtts" ] = i111i111i
if 29 - 29: IiII / OoooooooOO + I1ii11iIi11i
oOo0oOOOoOoo . append ( iIOoo000 )
if 21 - 21: I1ii11iIi11i
o0Iiii [ "rloc-set" ] = oOo0oOOOoOoo
if 35 - 35: IiII % Oo0Ooo * Ii1I . IiII
data . append ( o0Iiii )
return ( [ True , data ] )
if 16 - 16: I1ii11iIi11i % I1IiiI + Ii1I * I11i + i1IIi
if 14 - 14: iII111i / ooOoO0o % IiII - I1IiiI . Oo0Ooo
if 30 - 30: O0 . OOooOOo
if 23 - 23: i1IIi + OoooooooOO * OOooOOo . Oo0Ooo
if 83 - 83: OoooooooOO
if 53 - 53: o0oOOo0O0Ooo - Oo0Ooo / IiII + O0
if 88 - 88: Oo0Ooo % I1Ii111 * O0 - i1IIi * OoO0O00
def lisp_process_api_map_cache_entry ( parms ) :
IIiI1i = parms [ "instance-id" ]
IIiI1i = 0 if ( IIiI1i == "" ) else int ( IIiI1i )
if 74 - 74: Oo0Ooo % iIii1I11I1II1 + OOooOOo
if 50 - 50: OoO0O00 . OoooooooOO
if 31 - 31: OoO0O00
if 55 - 55: OoOoOO00 + I1Ii111 * o0oOOo0O0Ooo - I1ii11iIi11i + OoOoOO00
i1OO0o = lisp_address ( LISP_AFI_NONE , "" , 0 , IIiI1i )
i1OO0o . store_prefix ( parms [ "eid-prefix" ] )
iiIi1I = i1OO0o
IIi1IiIii = i1OO0o
if 6 - 6: II111iiii % iIii1I11I1II1 * I1Ii111
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo + ooOoO0o
if 23 - 23: Oo0Ooo % Oo0Ooo / IiII
if 63 - 63: I11i % Oo0Ooo * I1Ii111 - Oo0Ooo % i11iIiiIii . II111iiii
Oo000o0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , IIiI1i )
if ( parms . has_key ( "group-prefix" ) ) :
Oo000o0o0 . store_prefix ( parms [ "group-prefix" ] )
iiIi1I = Oo000o0o0
if 44 - 44: I11i . I1Ii111 . I1ii11iIi11i . oO0o
if 1 - 1: I11i % II111iiii / OoO0O00 + OoO0O00
O0OooO0oo = [ ]
Iii1 = lisp_map_cache_lookup ( IIi1IiIii , iiIi1I )
if ( Iii1 ) : ii1 , O0OooO0oo = lisp_process_api_map_cache ( Iii1 , O0OooO0oo )
return ( O0OooO0oo )
if 46 - 46: Oo0Ooo * Ii1I / IiII % O0 * iII111i
if 74 - 74: OoooooooOO + Ii1I
if 100 - 100: I1IiiI
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
def | |
<reponame>cgarcia-UCO/AgentSurvival
'''
Esta clase tiene agentes (clase anterior) que se mueven en el laberinto. Los agentes tienen métodos para moverse hacia adelante
y para girar a ambos lados.
HECHO Cuando un agente hace una acción, el Laberinto debería comprobar si ha agotado el número de movimientos en su turno,
y así evitar que se intente hacer agentes tramposos. Eso está hecho desde el agente, sin embargo, no está hecho
desde funciones de objetos. Por ejemplo, si se quisiese que comer un objeto consumiese un movimiento. Para ello,
de alguna forma debería hacer que la función de comer estuviese decorada con el agente que la invoca, de forma que
así también se podría comprobar el número de movimientos ejecutados. Esto ya está hecho también.
HECHO el laberinto debería ser el que llamase a los agentes indicándoles que les toca y que deben realizar un movimiento (hasta tres por turno, por ejemplo).
HECHO quizás la solución sea definir los Agentes dentro de la clase Laberinth, y que el create_agent reciba la función de movimiento.
Así, toda la información de los agentes está "protegida" dentro del laberinto. Esto no impide que el estudiante defina sus funciones
dentro de clases que ellos se creen, pero "protege" la información de los mismos dentro del Laberinto.
TODO podríamos definir también el de llegar a un punto destino conocido (A*, o sin conocer Anchura/Prof)
TODO un tipo de objeto podría ser una caja que tuviese que abrirse descifrando un enigma o con otro dispositivo (llave azul para caja azul). Para el caso del enigma, la caja podría requerir que el agente proveyese una función para calcular algo como el factorial de cualquier número o la multiplicación de matrices. Al intentar abrirla (llamando a la función de la caja que recibe la función que sabe resolver el enigma), la caja realiza comprobaciones con casos de test y se abre o no según si es correcto o no
TODO otros objetos pueden ser mapas, pistolas, anteojos, material de construcción de muros, material de destrucción de muros, audífonos, mantas de invisibilidad, semillas para plantar huertos...
'''
import asyncio
import ctypes
import signal
from datetime import datetime
import time
from string import ascii_lowercase
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import matplotlib.transforms
import numpy as np
from matplotlib import image as mpimg
from matplotlib.image import NonUniformImage
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from v002.Agent import Agent, create_agent
from v002.Enviroment import Orientation, OrientationException, Enviroment, TooMuchMovesPerTurn
import v002.Enviroment
from scipy import ndimage
import threading
# class thread_with_exception(threading.Thread):
# def __init__(self, f, maxTime, intervalTime, parent_thread_id):
# threading.Thread.__init__(self)
# self.f = f
# self.maxTime = maxTime
# self.intervalTime = intervalTime
# self.parent_thread_id = parent_thread_id
#
# def run(self):
# try:
# self.f()
# except Exception as e:
# print( str(e))
# pass
#
# def get_id(self):
#
# # returns id of the respective thread
# if hasattr(self, '_thread_id'):
# return self._thread_id
# for id, thread in threading._active.items():
# if thread is self:
# return id
#
# def raise_exception(self):
# thread_id = self.get_id()
# res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread_id),
# ctypes.py_object(SystemExit))
# if res > 1:
# ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
# print('Exception raise failure')
try:
from IPython import get_ipython
if get_ipython().__class__.__name__ != 'NoneType':
from IPython import display
i_am_in_interatcive = True
import pylab as pl
else:
import matplotlib.pyplot as pl
i_am_in_interatcive = False
except:
import matplotlib.pyplot as pl
i_am_in_interatcive = False
class Time_out(Exception):
pass
class BlockingPrinter():
def __init__(self):
self._my_semphore = threading.Semaphore()
def print(self, *args):
# self._my_semphore.acquire()
# print(args, flush=True)
# self._my_semphore.release()
pass
blocking_printer = BlockingPrinter()
# def sleeper(maxTime, intervalTime, notify_thread_id):
#
# try:
# print('Sleeper', flush=True)
# now = datetime.now()
# init_time = now
#
# while (now - init_time).seconds < maxTime:
# time.sleep(intervalTime)
# now = datetime.now()
#
# print('****Killing father', flush=True)
# # raise Time_out()
# ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(notify_thread_id),
# ctypes.py_object(Time_out))
# except Exception as e:
# print("TIMER has been killed:", e.__str__(), ':', e, flush=True)
# pass
#
# def protect_inf_loop_v4(f, maxTime, intervalTime):
# import signal
# def handler(signum, frame):
# raise Time_out('end of time')
#
# signal.signal(signal.SIGALRM, handler)
# try:
# # signal.alarm(maxTime)
# signal.setitimer(signal.ITIMER_REAL, maxTime)
# f()
#
# finally: #He puesto esto para asegurarme de que se elimina la alarma programada antes de salir de esta función. Antes no era así si se lanzaba una excepción diferente a Time_out, por ejemplo, la de too_much_moves
# signal.alarm(0)
#
# def protect_inf_loop(f, maxTime, intervalTime):
# my_id = threading.current_thread().ident
# t1 = threading.Thread(target=sleeper, args=(maxTime, intervalTime, my_id))
# try:
# t1.start()
# f()
# if t1.is_alive():
# ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(t1.ident),
# ctypes.py_object(SystemExit))
# t1.join()
#
# for _ in range(3):
# time.sleep(intervalTime)
# finally:
# ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(t1.ident),
# ctypes.py_object(SystemExit))
# t1.join()
class Enviroment_with_agents(Enviroment):
class _Object(ABC):
def __init__(self, pos_x, pos_y, environment):
self._pos_x = pos_x
self._pos_y = pos_y
self._environment = environment
@abstractmethod
def _get_info(self):
pass
@abstractmethod
def plot(self):
pass
def _notify_time_iteration(self):
pass
class __Food(_Object):
def __init__(self, pos_x, pos_y, period, environment):
super().__init__(pos_x, pos_y, environment)
self._period = period
self._current_nutrients = period + 1
self.__nutrients = period - 1
self.__my_avatar = pl.imread("images/PixelTomato.bmp")
self.__my_avatar_2 = pl.imread("images/PixelNoTomato.bmp")
self.__is_active = True
def is_active(self):
if self.__is_active and self._current_nutrients > 0:
return True
elif self.__is_active:
self.__is_active = False
# No haya nutrientes o esté inactivo, se devuelve Falso
return False
def plot(self):
if self.is_active():
# pl.plot(self._pos_x + 0.5, self._pos_y + 0.5, 'go', markersize=3)
pl.gca().imshow(self.__my_avatar,
extent=[self._pos_x + 0.2, self._pos_x + 0.8,
self._pos_y + 0.2, self._pos_y + 0.8])
else:
pl.gca().imshow(self.__my_avatar_2,
extent=[self._pos_x + 0.2, self._pos_x + 0.8,
self._pos_y + 0.2, self._pos_y + 0.8])
def _eat(self, agent):
hiden_agent = self._environment._Enviroment_with_agents__get_hidden_agent(agent, self)
hiden_agent._check_and_increase_moves_per_turn() # This line should stop this function with an exception if too much moves have been consumed
position = hiden_agent._get_position()
num_moves = hiden_agent._get_num_moves()
if position[1] == self._pos_x and \
position[0] == self._pos_y:
# and num_moves < self._environment._max_moves_per_turn:
if self.is_active():
self._current_nutrients -= 1
hiden_agent._increase_life(1)#self.__nutrients)
hiden_agent._send_message({'type': 'life_bonus', 'amount': 1,#self.__nutrients,
'Description': 'You have been given ' +
str(1) + #str(self.__nutrients) +
' life points, because you have eaten food'})
return 1
else:
hiden_agent._send_message({'type': 'life_bonus', 'amount': 0,
'Description': 'You have been given ' + str(0) + ' life points, because you have eaten food'})
return 0
def _notify_time_iteration(self):
if not self.is_active():
self._current_nutrients += 1
if self._current_nutrients >= self.__nutrients:
self.__is_active = True
def _get_info(self):
if self.is_active():
return {'type': 'food type 1',
'Description': 'This is a piece of food from a fixed source of food.'
' You eat the food and 1) you get life points, and '
'2) in case you empty it, there will not be food for a number of epochs. '
'To eat it, you have to '
'invoke the function in the field eat_function with yourself as argument:'
'<this_dictionary>[\'eat_function\'](self). You\'d be sent a message '
'about the life_bonus in '
'case you do it right, You would not, otherwise. In addition,'
'this function returns 1 in case of success, or 0 in case there is not more food',
'eat_function': self._eat}
else:
return None
class __Hidden_Agent:
def __init__(self, name, laberinth, pos_x, pos_y, orientation, life, cmap, color):
self.__position = [pos_x, pos_y]
self.__orientation = orientation
self.__path = [tuple([self.__position[0]+0.5,self.__position[1]+0.5])]
self.__laberinth = laberinth
self.__name = name
self.__num_moves = 0
self.__my_avatar = {}
self.__my_avatar[Orientation.DOWN] = pl.imread("images/face1_borders.bmp")#avatar1.bmp")
self.__my_avatar[Orientation.UP] = ndimage.rotate(self.__my_avatar[Orientation.DOWN],180)
self.__my_avatar[Orientation.LEFT] = ndimage.rotate(self.__my_avatar[Orientation.UP],90)
self.__my_avatar[Orientation.RIGHT] = ndimage.rotate(self.__my_avatar[Orientation.UP],270)
self._life = life
self._should_stop = False
self._messages = []
self._cmap = cmap
self._color = color
def _send_message(self,message):
self._messages.append(message)
def _get_position(self):
return self.__position
def _get_num_moves(self):
return self.__num_moves
def _consuming_move(f):
def inner(self):
f(self)
self._life -= 1
self._send_message({'type':'consuming move', 'Description': 'You have applied a move which consumes life, for instance moving forward',
'amount': 1})
return inner
def _and_plot(f):
def inner(self):
f(self)
if self.__laberinth._plot_run == 'always':
blocking_printer.print('_and_plot is goint to check the semaphore')#, flush=True)
# He puesto un semáforo, porque mandar excepciones a matplotlib me da problemas.
# Ahora, el mandar una excepción de Time_out va a pedir el semáforo antes de mandarla
# y así evito el problema. Además, la he puesto no bloqueante, porque si el que
# manda la excepción ya ha pillado el semáforo, es decir, va a mandar la excepción,
# cancelo el dibujado con matplotlib
if self.__laberinth.semaphore_for_raising_Exception.acquire(blocking=False):
blocking_printer.print('_and_plot got the semaphore and is goint to plot')#, flush=True)
self.__laberinth.plot(clear=True)
blocking_printer.print('_and_plot plotted and is goint to release the semaphore')#, flush=True)
self.__laberinth.semaphore_for_raising_Exception.release()
blocking_printer.print('_and_plot released the semaphore')#, flush=True)
else:
blocking_printer.print('_and_plot did not get the semaphore')#, flush=True)
return inner
def _check_and_increase_moves_per_turn(self):
if self.__num_moves < self.__laberinth._max_moves_per_turn:
self.__num_moves += 1
else:
self._send_message(
{'type': 'too much moves2', 'Description': 'You have tried to do more moves than allowed per turn'})
# print("Too much moves per turn")
raise TooMuchMovesPerTurn()
def _protected_move(f):
def inner(self):
if self.__num_moves < self.__laberinth._max_moves_per_turn:
self.__num_moves += 1
f(self)
# self._life -= 1
else:
self._send_message({'type':'too much moves', 'Description': 'You have tried to | |
import json
import time
from configparser import ConfigParser
from datetime import datetime
from captcha.helpers import captcha_image_url
from captcha.models import CaptchaStore
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import make_password
from django.core import serializers
from django.core.mail import send_mail
from django.db import transaction
from django.db.models import Q
from django.http import Http404, HttpResponse, JsonResponse, HttpResponseRedirect
from django.shortcuts import render, redirect, reverse, get_object_or_404
# Create your views here.
from django.contrib.auth.views import method_decorator,login_required
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.views.generic.base import View
from django_filters.rest_framework import DjangoFilterBackend
from pytz import unicode
from rest_framework import filters
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from apps.article.models import Article, Category_Article
from apps.article.serializers import ArticleSerializer
from apps.article.tasks import send_register_email
from apps.article.views import StandardResultsSetPagination
from apps.uitls.EmailToken import token_confirm
from apps.uitls.jsonserializable import DateEncoder
from apps.uitls.permissions import IsOwnerOrReadOnly, IsOwnerOrReadOnlyInfo
from apps.user.filter import CategoryFilter
from apps.user.models import User, Follows, VerifyCode, UserMessage, OAuthQQ
from apps.user.serializers import UserSerializer, UserMessageSerializer, FollowsSerializer, FollowsSerializerAdd, \
FollowsOthesSerializer
from website import settings
from .forms import CaptchaTestForm, LoginForms, Follow_Forms, RegisterForm, ModifyForm, EmailForm, InfoForm
from rest_framework import viewsets, mixins, status, permissions
def test(request):
form = CaptchaTestForm()
return render(request,'test.html',{'form':form})
def captcha_refresh(request):
print('=========')
""" Return json with new captcha for ajax refresh request """
if not request.is_ajax():
# 只接受ajax提交
raise Http404
new_key = CaptchaStore.generate_key()
to_json_response = {
'key': new_key,
'image_url': captcha_image_url(new_key),
}
print(to_json_response)
return HttpResponse(json.dumps(to_json_response), content_type='application/json')
def yan(request):
cs = CaptchaStore.objects.filter(response=request.POST['response'], hashkey=request.POST['hashkey'])
print(cs)
if cs:
return JsonResponse({"valid":True})
else:
return JsonResponse({'valid':False})
class CustomBackend(ModelBackend):
"""进行手机登录验证"""
def authenticate(self, request, username=None, password=<PASSWORD>, **kwargs):
try:
user = User.objects.get(Q(email=username))
#user = User.objects.get(Q(email=username) | Q(username=username))
if user.check_password(password):
return user
except Exception as e:
return None
def login_view(request):
if request.method == 'GET':
next = request.GET.get('next')
if next:
return render(request,'pc/logoin.html',{'next':next})
else:
return render(request, 'pc/logoin.html')
if request.method == 'POST':
form = LoginForms(request.POST)
next = request.GET.get('next')
if form.is_valid():
telephone = form.cleaned_data.get('telephone')
password = form.cleaned_data.get('password')
remember = form.cleaned_data.get('remember')
user = authenticate(request,username=telephone,password=password)
if user:
if user.is_active:
login(request,user)
if remember:
request.session.set_expiry(None)
else:
request.session.set_expiry(0)
if next:
return HttpResponseRedirect(next)
#return JsonResponse({"code": 200, "message": "", "data": {''}})
#return restful.result()
else:
return redirect(reverse('home'))
else:
return render(request, 'pc/logoin.html', {'next': next,'error':'此账号暂未激活,请先激活'})
#return JsonResponse({"code": 401, "message": "此账号暂未激活,请联系管理员", "data": {}})
#return restful.unauth(message='此账号暂无权限,请联系管理员')
else:
return render(request, 'pc/logoin.html', {'next': next, 'error': '账号或者密码错误'})
#return JsonResponse({"code": 400, "message": "账号或者密码错误", "data": {}})
#return restful.params_error(message="手机号码或者密码错误")
else:
errors = form.get_errors()
return render(request, 'pc/logoin.html', {'next': next, 'error': errors})
# return JsonResponse({"code":400,"message":"","data":errors})
#return restful.params_error(message=errors)
def logout_view(request):
logout(request)
return redirect('home')
class Register(View):
"""
注册
"""
def get(self,request):
return render(request,'pc/register.html')
def post(self, request):
form = RegisterForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username','')
email = form.cleaned_data.get('email', '')
password = form.cleaned_data.get('password', '')
users = User()
users.username = username
users.password =<PASSWORD>(password)
users.email = email
users.is_active = False
users.save()
token = token_confirm.generate_validate_token(username)
# message = "\n".join([u'{0},欢迎加入我的博客'.format(username), u'请访问该链接,完成用户验证,该链接1个小时内有效',
# '/'.join([settings.DOMAIN, 'activate', token])])
#send_mail(u'注册用户验证信息', message, settings.EMAIL_HOST_USER, [email], fail_silently=False)
send_register_email.delay(email=email,username=username,token=token,send_type="register")
return JsonResponse({'valid':True,'status':200, 'message': u"请登录到注册邮箱中验证用户,有效期为1个小时"})
return JsonResponse({'status':400,'data':form.errors,'valid':False})
def active_user(request, token):
#激活验证
try:
username = token_confirm.confirm_validate_token(token)
except:
username = token_confirm.remove_validate_token(token)
users = User.objects.filter(username=username)
for user in users:
if user.is_active==False:
user.delete()
return render(request, 'pc/message.html', {'message': u'对不起,验证链接已经过期,请重新<a href=\"' + unicode(settings.DOMAIN) + u'/register\">注册</a>'})
else:
return render(request, 'pc/message.html', {'message': u'此账号已经验证过,请重新<a href=\"' + unicode(settings.DOMAIN) + u'/register\">注册</a>'})
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return render(request, 'pc/message.html', {'message': u'对不起,您所验证的用户不存在,请重新<a href=\"/register\">注册</a>'})
user.is_active = True
user.save()
msg = UserMessage()
msg.user=user
msg.to_user =User.objects.get(is_superuser=True)
msg.message = '欢迎加入本站,在使用过程中有什么疑问,请联系管理员'
msg.has_read = False
msg.is_supper = True
msg.save()
message = u'验证成功,请进行<a href=\"' + unicode(settings.DOMAIN) + u'/login\">登录</a>操作'
return render(request, 'pc/message.html', {'message':message})
@method_decorator(login_required(login_url='/login'),name='dispatch')
class ResetUserView(View):
"""更换邮箱发送验证码"""
def post(self,request):
email = request.POST.get('email')
username = request.POST.get('username')
if email and username is not None:
if User.objects.filter(email=email):
return JsonResponse({'status':400,'message':'邮箱已经存在'})
send_register_email.delay(email=email, username=username,send_type='update_email')
return JsonResponse({'status': 200, 'message': u"验证码发送成功,有效期为30分钟"})
return JsonResponse({'status':400,'message':'用户名与邮箱不能为空'})
@method_decorator(login_required(login_url='/login'),name='dispatch')
class EmailView(View):
"""更换邮箱"""
def post(self,request):
forms = EmailForm(request.POST)
if forms.is_valid():
email = forms.cleaned_data.get('email')
username = forms.cleaned_data.get('username')
code = forms.cleaned_data.get('code')
end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() - 1800))
items = VerifyCode.objects.filter(send_time__lt=end_time)
for item in items:
item.delete()
exitsed = VerifyCode.objects.filter(code__icontains=code,email=email,send_type='update_email')
if exitsed:
user=request.user
user.email=email
user.save()
return JsonResponse({'status': 200, 'message': '修改成功,请重新登录'})
else:
return JsonResponse({'status': 400, 'message': '验证码已过期或错误'})
return JsonResponse({'status':400,'message':'验证失败请检查后提交'})
@method_decorator(login_required(login_url='/login'),name='dispatch')
class Modify(View):
"""密码修改"""
def post(self,request):
forms = ModifyForm(request.POST)
if forms.is_valid():
pwd1 = forms.cleaned_data.get('password')
pwd2 = forms.cleaned_data.get('password1')
email = forms.cleaned_data.get('email')
if pwd1!=pwd2:
return JsonResponse({'status':400,"email":email,"message":"密码不一致"})
is_user = User.objects.filter(email=email)
if is_user:
User.objects.filter(email=email).update(password=make_password(pwd2))
return JsonResponse({'status':200,"email":email,"message":"密码修改成功"})
return JsonResponse({'status': 400, "email": email, "message": '邮箱不存在'})
else:
email = request.POST.get('email')
return JsonResponse({'status':400,"email":email, "message":'验证失败请检查后提交'})
class Retrieve(View):
"""忘记密码"""
def get(self,request):
return render(request,'pc/retrieve.html')
def post(self,request):
forms = ModifyForm(request.POST)
if forms.is_valid():
pwd1 = forms.cleaned_data.get('password')
pwd2 = forms.cleaned_data.get('password1')
email = forms.cleaned_data.get('email')
captcha = request.POST.get('captcha','')
if captcha:
end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() - 1800))
if VerifyCode.objects.filter(email=email,code__icontains=captcha):
items = VerifyCode.objects.filter(email=email,code__icontains=captcha,send_time__lt=end_time)
for item in items:
item.delete()
exitsed = VerifyCode.objects.filter(code__icontains=captcha, email=email, send_type='forget')
if exitsed:
if pwd1 != pwd2:
return JsonResponse({'status': 400, "email": email, "message": "密码不一致"})
is_user = User.objects.filter(email=email)
if is_user:
User.objects.filter(email=email).update(password=<PASSWORD>_password(<PASSWORD>))
# for item in exitsed: 修改成功后是否删除该验证码保证30分钟内唯一性
# item.delete()
return JsonResponse({'status': 200, "email": email, "message": "密码修改成功"})
else:
return JsonResponse({'status': 400, "email": email, "message": "验证码已过期"})
else:
return JsonResponse({'status': 400, "email": email, "message": "验证码错误"})
return JsonResponse({'status': 400, "email": email, "message": '邮箱不存在'})
else:
email = request.POST.get('email')
return JsonResponse({'status':400,"email":email, "message":'验证失败请检查后提交'})
class RetrieveEmail(View):
"""更换邮箱发送验证码"""
def post(self,request):
email = request.POST.get('email')
if email:
if User.objects.filter(email=email):
send_register_email.delay(email=email, send_type='forget')
return JsonResponse({'status': 200, 'message': u"验证码发送成功,有效期为30分钟"})
return JsonResponse({'status': 400, 'message': u"邮箱不存在"})
return JsonResponse({'status':400,'message':'邮箱不能为空'})
class Author(View):
#@method_decorator(login_required(login_url='/login'))
def post(self,request):
print(request.user.is_authenticated)
if request.user.is_authenticated:
froms = Follow_Forms(request.POST)
username = request.POST.get('username')
if froms.is_valid():
follow = Follows()
if request.POST.get('follow') == str(username):
return JsonResponse({'status': 201, 'message': '不能自己关注自己'})
else:
cun = Follows.objects.filter(follow=froms.cleaned_data.get('follow'),fan=username)
if cun:
cun.delete()
return JsonResponse({'status': 200, 'message': '已取消关注'})
follow.follow = froms.cleaned_data.get('follow')
follow.fan_id = request.user.id
follow.save()
return JsonResponse({'status':200,'message':'成功关注'})
else:
return JsonResponse({'status':400,'message':'失败'})
else:
return JsonResponse({'status': 403, 'message': '请先登录'})
"""个人中心"""
@method_decorator(login_required(login_url='/login'),name='dispatch')
class Person(View):
@method_decorator(login_required(login_url='/login'),name='dispatch')
def get(self,request):
category = Category_Article.objects.all()
count = User.objects.filter(follow__fan__id=request.user.id)
floow = User.objects.filter(fan__follow_id=request.user.id)
return render(request,'pc/person/index.html',{'category':category,'count':count,'floow':floow})
class PersonDetaile(View):
"""个人中心(他人)"""
def get(self,request,article_id):
category = Category_Article.objects.all()
count = User.objects.filter(follow__fan__id=article_id)
floow = User.objects.filter(fan__follow_id=article_id)
user = User.objects.get(id=article_id)
is_active = Follows.objects.filter(follow=article_id, fan=request.user.id).exists()
if article_id ==request.user.id:
return redirect(reverse('user:person'))
return render(request, 'pc/person/indexOthers.html', {'category':category, 'count':count, 'floow':floow, 'user':user,'is_active':is_active})
@login_required(login_url='/login')
def Profile(request):
"""
人脉
:param request:
:return:
"""
count = User.objects.filter(follow__fan__id=request.user.id)
floow = User.objects.filter(fan__follow_id=request.user.id)
user = User.objects.get(id=request.user.id)
return render(request, 'pc/person/profile.html',{'count':count,'floow':floow,'user':user})
def ProfileOthers(request,article_id):
"""
人脉
:param request:
:return:
"""
category = Category_Article.objects.all()
count = User.objects.filter(follow__fan__id=article_id)
floow = User.objects.filter(fan__follow_id=article_id)
user = User.objects.get(id=article_id)
is_active = Follows.objects.filter(follow=article_id, fan=request.user.id).exists()
return render(request, 'pc/person/profileOthers.html',{'category':category, 'count':count, 'floow':floow, 'user':user,'is_active':is_active})
@csrf_exempt
def Guan(request):
"""取关"""
if request.method == 'POST':
if request.user.id is not None:
froms = Follow_Forms(request.POST)
if froms.is_valid():
floows = froms.cleaned_data.get('follow','')
user = request.POST.get('user','')
table=Follows.objects.filter(follow_id=floows,fan_id=user).delete()
return JsonResponse({'message':'ok','data':200})
else:
return JsonResponse({'massage':'未登录'})
return HttpResponse()
@login_required(login_url='/login')
def Info(request):
"""
资料修改
:param request:
:return:
"""
count = User.objects.filter(follow__fan__id=request.user.id)
floow = User.objects.filter(fan__follow_id=request.user.id)
user = User.objects.get(id=request.user.id)
if request.method == 'POST':
forms = InfoForm(request.POST)
if forms.is_valid():
username = forms.cleaned_data.get('username')
info = request.POST.get('info')
position = request.POST.get('position')
file = request.FILES.get('file')
user = request.user
user.username=username
user.info=info
user.position=position
if file:
user.user_imag=file
user.save()
return JsonResponse({'status':200,'message':'修改成功'})
return JsonResponse({'status':400,'message':'提交失败'})
return render(request,'pc/person/info.html',{'count':count,'floow':floow,'user':user})
class InfoOthers(View):
def get(self,request,article_id):
category = Category_Article.objects.all()
count = User.objects.filter(follow__fan__id=article_id)
floow = User.objects.filter(fan__follow_id=article_id)
user = User.objects.get(id=article_id)
is_active = Follows.objects.filter(follow=article_id, fan=request.user.id).exists()
return render(request,'pc/person/infoOthers.html',{'category':category, 'count':count, 'floow':floow, 'user':user,'is_active':is_active})
@login_required(login_url='login/')
def get_message(request):
"""
获取未读信息
:param request:
:return:
"""
count = UserMessage.objects.filter(user=request.user,has_read=False).count()
return JsonResponse({"status":200,'count':count})
@login_required(login_url='login/')
def message(request):
"""
消息
:param request:
:return:
"""
data = []
if request.method=='POST':
type = request.POST.get('id','')
if type:
UserMessage.objects.filter(id=type).update(has_read=True)
return JsonResponse({'status': 200, 'message': 'ok'})
# type = request.POST.get('type','')
# if type == 'unread':
# mssage = UserMessage.objects.filter(user_id=request.user.id,has_read=False)
# for i in mssage:
# json_list = {}
# json_list['id'] = i.id
# json_list['message'] = i.message
# json_list['has_read'] = i.has_read
# json_list['is_supper'] = i.is_supper
# json_list['ids'] = i.ids
# json_list['add_time'] = i.add_time
# data.append(json_list)
# return HttpResponse(json.dumps({'status':200,'message':data},cls=DateEncoder))
# elif type == 'read':
# mssage = UserMessage.objects.filter(user_id=request.user.id, has_read=True)
# for i in mssage:
# json_list = {}
# json_list['id'] = i.id
# json_list['message'] = i.message
# json_list['has_read'] = i.has_read
# json_list['is_supper'] = i.is_supper
# json_list['ids'] = i.ids
# json_list['add_time'] = i.add_time
# data.append(json_list)
# return HttpResponse(json.dumps({'status': 200, 'message': data}, cls=DateEncoder))
return render(request,'pc/person/message.html')
"""drf"""
class PersonApiabstohr(viewsets.ReadOnlyModelViewSet):
queryset = Article.objects.filter(is_show=True)
serializer_class = ArticleSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = CategoryFilter
authentication_classes = [JSONWebTokenAuthentication]
pagination_class = StandardResultsSetPagination
class PersonApi(PersonApiabstohr):
"""
个人中心
"""
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly) # 未登录禁止访问
authentication_classes = [JSONWebTokenAuthentication,SessionAuthentication]
# def list(self, request, *args, **kwargs):
# queryset = Article_add.objects.filter(authors_id=self.request.user.id).order_by('-add_time')
# serializer = ArticleSerializer(queryset, many=True)
#
# page = self.paginate_queryset(queryset)
# if page is not None:
# serializer = self.get_serializer(page, many=True)
# return self.get_paginated_response(serializer.data)
# return Response(serializer.data)
# def get_queryset(self):
# return Article_add.objects.filter(authors_id=self.request.user.id)
def get_queryset(self):
"""
This view should return a list of all the purchases
for the currently authenticated user.
"""
user = self.request.user
#User.objects.filter()
# user_id = self.request.query_params.get('pk')
# if user_id:
# return Article_add.objects.filter(authors_id=user_id).filter(is_show=True).order_by('-add_time')
# else:
return Article.objects.filter(authors_id=self.request.user.id).filter(is_show=True).order_by(
'-add_time')
class PersonOthers(PersonApiabstohr):
"""
| |
"""Obtains a meter.
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter_candidates = self._cached_meters.get(meter_name, None)
if not meter_candidates:
meter_candidates = [m for m in self._ceilometer_meter_list
if m["name"] == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
meter_category = meter_info["type"]
else:
label = ""
description = ""
meter_category = "Other"
for meter in meter_candidates:
meter["label"] = label
meter["description"] = description
meter["category"] = meter_category
if meter["project_id"] in self.tenant_map.keys():
meter["slice"] = self.tenant_map[meter["project_id"]]["slice"]
meter["service"] = self.tenant_map[meter["project_id"]]["service"]
else:
meter["slice"] = meter["project_id"]
meter["service"] = "Other"
if meter["resource_id"] in self.resource_map.keys():
meter["resource_name"] = self.resource_map[meter["resource_id"]]
self._cached_meters[meter_name] = meter_candidates
return meter_candidates
def _get_nova_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'type': _("Nova"),
'label': '',
'description': _("Existence of instance"),
}),
("instance:<type>", {
'type': _("Nova"),
'label': '',
'description': _("Existence of instance <type> "
"(openstack types)"),
}),
("memory", {
'type': _("Nova"),
'label': '',
'description': _("Volume of RAM"),
}),
("memory.usage", {
'type': _("Nova"),
'label': '',
'description': _("Volume of RAM used"),
}),
("cpu", {
'type': _("Nova"),
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'type': _("Nova"),
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'type': _("Nova"),
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'type': _("Nova"),
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'type': _("Nova"),
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'type': _("Nova"),
'label': '',
'description': _("Volume of reads"),
}),
("disk.write.bytes", {
'type': _("Nova"),
'label': '',
'description': _("Volume of writes"),
}),
("disk.read.requests.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average rate of read requests"),
}),
("disk.write.requests.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average rate of write requests"),
}),
("disk.read.bytes.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average rate of reads"),
}),
("disk.write.bytes.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average volume of writes"),
}),
("disk.root.size", {
'type': _("Nova"),
'label': '',
'description': _("Size of root disk"),
}),
("disk.ephemeral.size", {
'type': _("Nova"),
'label': '',
'description': _("Size of ephemeral disk"),
}),
("network.incoming.bytes", {
'type': _("Nova"),
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'type': _("Nova"),
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'type': _("Nova"),
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'type': _("Nova"),
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'type': _("Nova"),
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
#for flavor in get_flavor_names(self._request):
# name = 'instance:%s' % flavor
# meters_info[name] = dict(meters_info["instance:<type>"])
# meters_info[name]['description'] = (
# _('Duration of instance type %s (openstack flavor)') %
# flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'type': _("Neutron"),
'label': '',
'description': _("Existence of network"),
}),
('network.create', {
'type': _("Neutron"),
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'type': _("Neutron"),
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'type': _("Neutron"),
'label': '',
'description': _("Existence of subnet"),
}),
('subnet.create', {
'type': _("Neutron"),
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'type': _("Neutron"),
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'type': _("Neutron"),
'label': '',
'description': _("Existence of port"),
}),
('port.create', {
'type': _("Neutron"),
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'type': _("Neutron"),
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'type': _("Neutron"),
'label': '',
'description': _("Existence of router"),
}),
('router.create', {
'type': _("Neutron"),
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'type': _("Neutron"),
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'type': _("Neutron"),
'label': '',
'description': _("Existence of floating ip"),
}),
('ip.floating.create', {
'type': _("Neutron"),
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'type': _("Neutron"),
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'type': _("Glance"),
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'type': _("Glance"),
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'type': _("Glance"),
'label': '',
'description': _("Number of image updates"),
}),
('image.upload', {
'type': _("Glance"),
'label': '',
'description': _("Number of image uploads"),
}),
('image.delete', {
'type': _("Glance"),
'label': '',
'description': _("Number of image deletions"),
}),
('image.download', {
'type': _("Glance"),
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'type': _("Glance"),
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'type': _("Cinder"),
'label': '',
'description': _("Existence of volume"),
}),
('volume.size', {
'type': _("Cinder"),
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'type': _("Swift"),
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'type': _("Swift"),
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'type': _("Swift"),
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'type': _("Swift"),
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'type': _("Swift"),
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'type': _("Swift"),
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer | |
eps_sp = params['epss']
eps_t = params['epst']
# Conditions:
# gyroradius / lengthscale < eps_sp
# and
# gyroperiod / timescale < eps_t
if self.field.static:
return self.cycrad() / self.field.lengthscale(self.trajectory[-1,:4]) < eps_sp
else:
return self.cycrad() / self.field.lengthscale(self.trajectory[-1,:4]) < eps_sp \
and self.cycper() / self.field.timescale(self.trajectory[-1,:4]) < eps_t
def _TaoChanBrizardEOM(self, t, Y):
# Phase-space preserving guiding-center EOM,
# valid with nonzero electric fields and and/or time-varying fields.
# Also suitable for use with static, purely magnetic fields.
# Reference: <NAME>., <NAME>, and <NAME> (2007),
# Hamiltonian theory of adiabatic motion of relativistic charged particles,
# Phys. Plasmas, 14, 092107, doi:10.1063/1.2773702
tpos = np.concatenate(([t],Y[:3]))
ppar = Y[3]
B = self.field.B(tpos)
Bmag = np.sqrt(np.dot(B,B))
unitb = B / Bmag
gamma = np.sqrt(1 + 2*self.mu*Bmag/(self.mass*c*c) + (ppar/(self.mass*c))**2)
cb = self.field.curlb(tpos)
Bstar = B + ppar * cb / self.charge
Bstarpar = np.dot(Bstar,unitb)
E = self.field.E(tpos)
dbdt = self.field.dbdt(tpos)
gB = self.field.gradB(tpos)
Estar = E - (ppar*dbdt + self.mu * gB / gamma)/self.charge
retval = np.ones(4)
retval[:3] = (ppar * Bstar / (gamma*self.mass) + np.cross(Estar,unitb) ) / Bstarpar
retval[3] = self.charge*np.dot(Estar,Bstar) / Bstarpar
if params["enforce equatorial"]:
retval[2] = retval[3] = 0
return retval
def _BrizardChanEOM(self, t, Y):
# Phase-space preserving guiding-center EOM.
# Valid only under static magnetic fields.
# Special case of TaoChanBrizardEOM when E=0, dB/dt=0.
# Reference: <NAME> and <NAME>,
# Nonlinear relativistic gyrokinetic Vlasov-Maxwell equations,
# Phys. Plasmas 6, 4548 (1999)
gamma = 1.0/np.sqrt(1 - (self.v/c)**2)
tpos = np.concatenate(([t],Y[:3]))
ppar = Y[3]
B = self.field.B(tpos)
Bmag = np.sqrt(np.dot(B,B))
unitb = B / Bmag
gB = self.field.gradB(tpos)
cb = self.field.curlb(tpos)
Bstar = B + ppar * cb / self.charge
Bstarpar = np.dot(Bstar, unitb)
retval = np.ones(4)
retval[:3] = (ppar * Bstar / (gamma*self.mass) + self.mu * np.cross(unitb, gB) / (self.charge * gamma) ) / Bstarpar
retval[3] = -self.mu * np.dot(Bstar, gB) / (gamma * Bstarpar)
if params["enforce equatorial"]:
retval[2] = retval[3] = 0
return retval
def _NorthropTellerEOM(self,t,Y):
gamma = 1.0/np.sqrt(1 - (self.v/c)**2)
gm = gamma * self.mass
tpos = np.concatenate(([t],Y[:3]))
ppar = Y[3]
Bvec = self.field.B(tpos)
B = np.sqrt(np.dot(Bvec,Bvec))
bdir = Bvec / B
gB = self.field.gradB(tpos)
retval = np.ones(4)
retval[:3] = (gm*self.v**2 + ppar**2/gm)/(2*self.charge*B**2) * np.cross(bdir,gB) + ppar*bdir/gm
retval[3] = -self.mu * np.dot(bdir, gB) / gamma
if params["enforce equatorial"]:
retval[2] = retval[3] = 0
return retval
def advance(self, delta, eom="TaoChanBrizardEOM"):
"""
Advance the particle position and parallel momentum for a given duration.
The trajectory is initialized at the latest state of the `GuidingCenter`
and integrated for an additional `delta` seconds. Uses the
`scipy.integrate.ode` class with `"dopri5"` solver.
This method can be called many times.
Parameters
----------
delta : float
The number of seconds to advance the trajectory.
eom : {'TaoChanBrizardEOM', 'BrizardChanEOM, 'NorthropTellerEOM'}
The equation of motion that is solved.
Raises
-------
NonAdiabatic
Only if the `check_adiabaticity` attribute is set to True.
Notes
-----
Assuming that the magnetic moment is a constant of motion, and averaging
out the gyrophase, reduces the number of dynamic variables to 4.
Despite their prevalence in textbooks, Northrop-Teller equations are
not accurate enough. The equations by Tao, Chan & Brizard are designed
to preserve the phase-space volume, and give more accurate results than
Northrop-Teller equations.
Neither Northrop-Teller (as implemented here), nor the Brizard-Chan
equations are suitable to use with electric fields or with time-varying
magnetic fields. Tao-Chan-Brizard equations must be used.
Tao-Chan-Brizard EOM (the default setting) reduces to Brizard-Chan EOM
under static conditions, with little computational overhead.
The explicit runge-kutta method of order 4(5) due to Dormand & Prince
with stepsize control is used to solve for the motion. The relative
tolerance `rtol` and the absolute tolerance `atol` of the solver can be
set with `rapt.params["solvertolerances"] = (rtol, atol)`
"""
# Set resolution of the trajectory
if params["GCtimestep"] != 0:
dt = params["GCtimestep"]
else:
dt = self.bounceperiod()/params["bounceresolution"]
t0 = self.trajectory[-1,0]
rtol, atol = params["solvertolerances"]
deriv = eval("self._"+eom)
r = ode(deriv).set_integrator("dopri5",rtol=rtol, atol=atol)
r.set_initial_value(self.trajectory[-1,1:], self.trajectory[-1,0])
while r.successful() and r.t < t0+delta:
r.integrate(r.t+dt)
nextpt = np.hstack(([r.t],r.y))
self.trajectory = np.vstack((self.trajectory,nextpt))
self.tcur = r.t
if self.check_adiabaticity and not self.isadiabatic():
raise NonAdiabatic
def gett(self):
"""Return a 1-d array of time values along the trajectory."""
return self.trajectory[:,0]
def getx(self):
"""Return a 1-d array of x-coordinate values along the trajectory."""
return self.trajectory[:,1]
def gety(self):
"""Return a 1-d array of y-coordinate values along the trajectory."""
return self.trajectory[:,2]
def getz(self):
"""Return a 1-d array of z-coordinate values along the trajectory."""
return self.trajectory[:,3]
def getpp(self):
"""Return a 1-d array of parallel momentum values along the trajectory."""
return self.trajectory[:,4]
def getr(self):
"""Return a 1-d array of radial distance along the trajectory."""
return np.sqrt( self.getx()**2 + self.gety()**2 + self.getz()**2 )
def gettheta(self):
"""Return a 1-d array of azimuthal angle coordinate (radians) along the
trajectory."""
return np.arctan2( self.gety(), self.getx() )
def getphi(self): # Azimuthal angle
"""Return a 1-d array of polar angle coordinate (radians) along the
trajectory."""
return np.arccos( self.getz() / self.getr() )
def getB(self):
"""Return a 1-d array of total magnetic field strength values along the
trajectory."""
out = np.zeros(len(self.trajectory))
for i, row in enumerate(self.trajectory):
out[i] = self.field.magB(row[:4])
return out
def getp(self):
"""Return a 1-d array of total momentum values along the trajectory."""
two_m_mu = 2*self.mass*self.mu
mc = self.mass * c
gamma = self.getgamma()
B = self.getB()
pp = self.trajectory[:,4]
assert B.shape[0] == pp.shape[0] == gamma.shape[0]
res = np.zeros(gamma.shape[0])
# Try to vectorize this later.
for i,g in enumerate(gamma):
if g-1 < 1e-6: #nonrelativistic
res[i] = np.sqrt(two_m_mu*B[i] + pp[i]**2)
else: # relativistic
res[i] = mc*(g**2-1)
return res
def getv(self):
"""Return a 1-d array of particle speed values along the guiding-center trajectories."""
mom = self.getp()
gamma = self.gamma()
assert mom.shape[0] == gamma.shape[0]
return mom/gamma
def cycrad(self):
"""Return the cyclotron radius value at the final position."""
t, r, pp = self.trajectory[-1, 0], self.trajectory[-1, 1:4], self.trajectory[-1, 4]
# The advancer stores parallel momentum at last element. Convert to parallel speed.
Bmag = self.field.magB(self.trajectory[-1,:4])
gamma = np.sqrt(1 + 2*self.mu*Bmag/(self.mass*c*c) + (pp/self.mass/c)**2)
if gamma-1 < 1e-6: # nonrelativistic
vp = pp/self.mass
v = np.sqrt(2*self.mu*Bmag/self.mass + vp**2)
else:
vp = pp / self.mass / gamma # parallel speed
v = c * np.sqrt(1-1/gamma**2)
return ru.cyclotron_radius2(t, r, vp, v, self.field, self.mass, self.charge)
def cycper(self):
"""Return the cyclotron period value at the final position."""
t, r, pp = self.trajectory[-1, 0], self.trajectory[-1, 1:4], self.trajectory[-1, 4]
Bmag = self.field.magB(self.trajectory[-1,:4])
gamma = np.sqrt(1 + 2*self.mu*Bmag/(self.mass*c*c) + pp**2)
if gamma - 1 < 1e-6: # nonrelativistic
vp = pp/self.mass
v = np.sqrt(2*self.mu*Bmag/self.mass + vp**2)
else:
v = c * np.sqrt(1-1/gamma**2)
return ru.cyclotron_period2(t, r, v, self.field, self.mass, self.charge)
def getgamma(self):
"""Return a 1-d array of relativistic factor values along the trajectory.
The relativistic factor is defined as :math:`\gamma = \sqrt{1 + 2\mu B / (mc^2) + (p_{||}/mc)^2}`
"""
mc = self.mass*c
mc2 = mc*c
pp = self.trajectory[:,4]
B = self.getB()
return np.sqrt(1 + 2*self.mu*B/mc2 + (pp/mc)**2)
def getBm(self):
"""Returns an array of mirror field values at each trajectory point.
Defined using the conservation of magnetic moment.
"""
mc = self.mass*c
n = self.trajectory.shape[0]
gamma = self.getgamma()
B = self.getB()
pp = self.trajectory[:,4]
assert B.shape[0] == pp.shape[0] == gamma.shape[0] == n
res = np.zeros(n)
# Try to vectorize this later.
for i,g in enumerate(gamma):
if g-1 < 1e-6: #nonrelativistic
res[i] = (B[i] + 0.5*pp[i]**2/(self.mu*self.mass))
else: # relativistic
res[i] = B[i]/(1 - (pp[i]/mc)**2/((g-1)*(g+1)))
return res
def getke(self):
"""Return a 1-d array of kinetic energy values (Joule) along the
trajectory."""
mc = self.mass*c
mc2 = mc*c
n = self.trajectory.shape[0]
gamma = self.getgamma()
B = self.getB()
pp = self.trajectory[:,4]
assert gamma.shape[0] == B.shape[0] == pp.shape[0] == n
res = np.zeros(n)
# Try to vectorize this later.
for i,g in enumerate(gamma):
if g-1 < 1e-6: #nonrelativistic
res[i] = self.mu*B[i] + 0.5*pp[i]**2/self.mass
else: # relativistic
res[i] = (g-1)*mc2
return | |
0.10318303432144482,
0.1328119530274221,
0.1758696096352922,
0.25000788081411496,
0.31184480110955365,
0.35624810835927434,
0.46272121272437644,
0.5265482261571047,
0.5435536167799155,
0.5461021554512147,
0.5463637065698664,
0.622818968965978,
0.7144836403223405,
0.7803053944901569,
0.816938634686397,
0.8695491790736293,
0.9073272921465351,
0.9146474203236065,
0.930416538465369,
0.9530632354405895,
0.9693251747347461,
0.9702771387444298,
0.9730679590738663,
0.9792875643693388,
0.9888367029744792,
1.0],
'KOR': [0.0034404893541562544,
0.006765967464638198,
0.008969692868956164,
0.009967688908325334,
0.010310102685023668,
0.018240438031941062,
0.029291680988189666,
0.03699185793754822,
0.040568693911937126,
0.06510481233647766,
0.09929688543944976,
0.12312090362460991,
0.1800561794679972,
0.259397788441807,
0.34747544276006015,
0.41560256672047485,
0.5383423392908551,
0.5936252093683527,
0.6046917869106097,
0.6059379032990859,
0.6060323689897882,
0.7009702708390034,
0.7864916887697865,
0.8526415912136966,
0.8783212294559235,
0.918047080724972,
0.9487746019650396,
0.9526300327811269,
0.9615764363324512,
0.9754163313309868,
0.9861213421474357,
0.9864904260629377,
0.9876499416649218,
0.990409751427689,
0.9948987519031116,
1.0],
'MEX': [0.000823881939028403,
0.0019013844108284159,
0.0028170919893686255,
0.0033488336224711326,
0.003582754787619614,
0.00616985264933648,
0.010791950218249491,
0.014920859729692173,
0.01737975687173743,
0.02770260012305063,
0.04614534610580261,
0.06262021020591586,
0.0935122845939909,
0.1487039277245854,
0.21033554327630247,
0.2718150021788897,
0.38192577325761745,
0.43122834571016155,
0.4410396520948341,
0.442137915777727,
0.44222065770016633,
0.5520595863842004,
0.6504212940809434,
0.7485401655691618,
0.7779014180168283,
0.836478948299807,
0.8949118618849696,
0.8992940710061933,
0.912408241866021,
0.9385718308357679,
0.9646708269177378,
0.9650877176636061,
0.9667766262266213,
0.9719600297234545,
0.9828300216189908,
1.0],
'NGA': [0.0019618079981748608,
0.003910403376517813,
0.0051827079407294925,
0.005748371550083366,
0.005938621544132785,
0.01151296337080064,
0.01911035463917691,
0.024287679234581493,
0.026639775194831624,
0.046423414110336333,
0.07338696428845855,
0.09176156769972313,
0.14442146651503743,
0.21619278372042655,
0.30963919011993735,
0.39255077052011705,
0.5199109027323835,
0.568820244463767,
0.5771679595158994,
0.5779693901541003,
0.5780208017688734,
0.6910227913429755,
0.7778137333497667,
0.8548201634893966,
0.8770400307171516,
0.9164697825742544,
0.9514543600151736,
0.9542986804488683,
0.9618696650830102,
0.9753045808895846,
0.9872248905621059,
0.9874549542817542,
0.9882821441586094,
0.9905320354827083,
0.9946989597687592,
1.0],
'PAN': [0.008149031017802979,
0.013986362469478944,
0.016936134574588962,
0.017951694899323068,
0.018216254700629608,
0.034419488555399835,
0.05153062629208639,
0.060565583056524555,
0.06374598500646815,
0.10713317190435762,
0.1529514411258801,
0.17714422642210814,
0.2642772026439289,
0.3562924368189315,
0.47294985095581815,
0.5510428581225867,
0.6742368513213138,
0.7228223793694206,
0.7313384999814792,
0.7321781517496274,
0.732233533108198,
0.8147022682927634,
0.8797506840858633,
0.9232955066614503,
0.9403981350604526,
0.9632958770002261,
0.978624122861285,
0.9808724473945426,
0.9853876792429761,
0.9914328678099982,
0.9954796483561106,
0.9956666738645693,
0.9961742015087849,
0.9972163255849006,
0.9986742073615619,
1.0],
'PER': [0.0006786373781487426,
0.0015460942377132987,
0.0022537217936055977,
0.0026471961366107853,
0.0028127794972539655,
0.005160162133526416,
0.009165846402152985,
0.012583590533847241,
0.014527657043069074,
0.024459390163947683,
0.041407367246062976,
0.055867780332077045,
0.08738351660153515,
0.1411634530456503,
0.20783470572077722,
0.2783559192387733,
0.3921268843484369,
0.43801319236936004,
0.4462385023126464,
0.4470678639719709,
0.4471238633248029,
0.567464582911137,
0.6645366490762682,
0.7672141844455443,
0.7933150367089034,
0.8485311516493298,
0.9069357454601302,
0.9104447595142598,
0.9215796903616554,
0.9451355350260306,
0.9700516223128216,
0.9703504980646905,
0.971632428317736,
0.9757937060593154,
0.9850012508781514,
1.0],
'POL': [0.0015418659020656322,
0.003703824530732281,
0.00582318185941402,
0.007258066157260647,
0.007997345383525737,
0.01136016214475608,
0.018445237684089205,
0.025908965112950155,
0.031150709003382956,
0.042118955820712935,
0.0652278144807374,
0.08957169356473231,
0.1164024236017121,
0.17293174487757776,
0.21668763320422488,
0.25236645671396946,
0.34455517709238676,
0.40410562837540637,
0.42120223066617873,
0.4239631714234716,
0.42427013155791754,
0.4994413875821644,
0.596557004360013,
0.6757456823543919,
0.7175677099083807,
0.7857715927220097,
0.8413854630066367,
0.8503906160744878,
0.8724191895492786,
0.9083436612140376,
0.9376366988650909,
0.9389067575958409,
0.9431416997858133,
0.9538877803150962,
0.9727203067374939,
1.0],
'SEN': [0.002135642655974979,
0.004254594279384774,
0.005645464685662099,
0.006267488731528276,
0.006477979672178944,
0.012351390067748515,
0.020408569473791466,
0.02593501285242303,
0.028462082180126327,
0.048866328574973986,
0.07685699545777913,
0.0960558773322404,
0.14921918998646536,
0.22214893931877042,
0.31449369794328114,
0.3946951942353059,
0.5213742848036506,
0.5713970173247227,
0.5801760642066186,
0.5810427264000255,
0.5810999643043299,
0.6911208551900361,
0.7780104211288841,
0.8534740782357909,
0.876347905828178,
0.9160797807317593,
0.9505869456777619,
0.9535977328941146,
0.9614423461682772,
0.9750684519005656,
0.9869027355636646,
0.9871535175372027,
0.9880364611869862,
0.990388667369913,
0.9946581246172272,
1.0],
'SRB': [0.001594659862233226,
0.003529886278221783,
0.005110894467664114,
0.00599581369504894,
0.006371463271092955,
0.010487663253027935,
0.017596083536387336,
0.02373398246581712,
0.027267238497970855,
0.04156886440123148,
0.06626687909972187,
0.08759284351009616,
0.12486080383293237,
0.18922024366601542,
0.25396350554030966,
0.31020068516877136,
0.4220082547567139,
0.47758062120428296,
0.4898568240647056,
0.49138225218745785,
0.49151054298776,
0.5886286398014045,
0.6851709845587199,
0.7690294283055229,
0.8010194302963461,
0.8565936301803674,
0.9048663951154103,
0.910166454016742,
0.9239776378765079,
0.9479709309271859,
0.9688119434450465,
0.9693754363991872,
0.9713666360411124,
0.9767030668221697,
0.9865014435136619,
1.0],
'SWE': [0.0006821164594566296,
0.0016473364381152965,
0.0025383223750536617,
0.003101365124848921,
0.0033711260615171385,
0.005496232027516679,
0.0096362080375773,
0.013668807267474055,
0.016287478890923992,
0.024866890256645225,
0.04158067197050023,
0.057860955504550726,
0.08383835400605771,
0.13444562174738162,
0.18688319227833428,
0.2398080223576265,
0.34196306191674675,
0.3912577506842435,
0.40182976989117786,
0.40310514602215236,
0.4032091087476661,
0.5063133917200908,
0.6058188798811559,
0.7062489913366972,
0.7382597603973124,
0.8028761984239562,
0.8680930642753328,
0.8732419729174041,
0.8888322326614877,
0.9203024872706562,
0.952065169272851,
0.9525955811067236,
0.9547723366472328,
0.9615452150939918,
0.9759717781618154,
1.0],
'SWI': [0.0006250465068037326,
0.0014720536234548217,
0.0022090150439624616,
0.002646811179814862,
0.002843774055560762,
0.00495431678128849,
0.00880866561027365,
0.012328140652025484,
0.014470604505904787,
0.023361090199849225,
0.03959721406782192,
0.05442270995921655,
0.0825105336185233,
0.1338055246700131,
0.19296433519136788,
0.25526472743165335,
0.36330266917884824,
0.4101410454604615,
0.41916599625179846,
0.4201441573363479,
0.42021539558143145,
0.5339906065494358,
0.6326419887704912,
0.7365321744124005,
0.7650448153509776,
0.8250983796003319,
0.8883410407557893,
0.8924614629687825,
0.9054791648699587,
0.9328971581447777,
0.9617711622095845,
0.9621499644969084,
0.9637692683767213,
0.9690116592883399,
0.9805994308273742,
1.0],
'TUN': [0.0033121414582473613,
0.0064433905439675,
0.008457040745891323,
0.00934093568031249,
0.009634736475223525,
0.017542510368192064,
0.028210324436020912,
0.03540591819768355,
0.03864160573739688,
0.06353791476034411,
0.09712375057542318,
0.11977787907209604,
0.17856429083810746,
0.25786884846996655,
0.3504084616080092,
0.42324485136610795,
0.5480834469338497,
0.6015755097443671,
0.611762533043305,
0.6128537918957471,
0.6129322967549581,
0.7111906781690004,
0.7953961033639292,
0.8616727924624158,
0.885726902080405,
0.9235920809807405,
0.9533951332642085,
0.9568307800065344,
0.9649432011646575,
0.9777135045907909,
0.9877647979357016,
0.9880767550488468,
0.9890732715567353,
0.9914837863922169,
0.9954638070051662,
1.0]},
'BEL': {'COL': [0.004092405236910547,
0.007896016433350453,
0.0103400511858081,
0.011413332496202216,
0.011770426092187395,
0.020814507300706338,
0.03303664692750755,
0.04129512524127572,
0.04501528478461361,
0.07208237408742506,
0.10866074364457251,
0.13337668381255116,
0.19413138955062137,
0.27623508581293227,
0.36714830341149873,
0.43516947999128386,
0.5580292834133054,
0.6135066070052928,
0.6246402852926334,
0.625897137932618,
0.6259926716588555,
0.7179162511325051,
0.8009324048646761,
0.8630450063229664,
0.8880356265024131,
0.9254315395053048,
0.953411123430757,
0.9571726268784367,
0.9656156859949848,
0.9782498624153196,
0.9877027403128158,
0.9880638024599616,
0.9891610721031157,
0.9916875009595058,
0.99566303715611,
1.0],
'ENG': [0.0026299338000984143,
0.005294145815598845,
0.007117765491296413,
0.007970314607894194,
0.008272207078726416,
0.01480076006253717,
0.024186951522497238,
0.030934280550958506,
0.034167856825391314,
0.055505322020191566,
0.0861824926099486,
0.10823499010206836,
0.16053839738868111,
0.2357357337690102,
0.3212081298202509,
0.3910461276270402,
0.5139309806515766,
0.5679871090382992,
0.5785555031957083,
0.5797177425957513,
0.5798036900870089,
0.6802107512260261,
0.7685473827029825,
0.8407256971104594,
0.8666314464079502,
0.9089657144571757,
0.9435563034164626,
0.9473548852473733,
0.9566661492986472,
0.9718822908468836,
0.9843151336027655,
0.9846697760040161,
0.9858462874662313,
0.9888025935712083,
0.9938762644350562,
1.0],
'JPN': [0.05029762616732798,
0.07994073781183947,
0.09556066736734517,
0.10126181812303953,
0.10284731679233948,
0.14456676594914017,
0.19210300117968102,
0.21918501814735902,
0.22947100509998702,
0.29731622523431833,
0.37462084954639835,
0.4186623083860892,
0.5014110141116095,
0.5956970610248135,
0.6629810768645068,
0.6903358176294727,
0.7670009910403188,
0.8207169891612248,
0.8374443204947967,
0.8403743513631374,
0.8407299235196581,
0.8718986330293788,
0.9155757882999455,
0.9333330110944962,
0.9537348170780925,
0.970323782729253,
0.9770681312834116,
0.9818330213594006,
0.9876446096471502,
0.9923700853507069,
0.9942912572850622,
0.9950257251498995,
0.9962491473565258,
0.9978018912212394,
0.9991669503313048,
1.0],
'PAN': [0.08373235641770715,
0.1146477831955161,
0.12490948988928315,
0.127238550695288,
0.1276391376734761,
0.19941891857662716,
0.24958075975528354,
0.2671080514157843,
0.27119091527848443,
0.38001252692666565,
0.4560602987590517,
0.4826325187118671,
0.6063666296206788,
0.6928356985102107,
0.7866291400304578,
0.8221779858887164,
0.8877236264289998,
0.917937202215441,
0.9241270208536257,
0.9248403274632934,
0.924895734625263,
0.9497383231312446,
0.9726409425692392,
0.9813213116329532,
0.9883593594460908,
0.9936943695021675,
0.9957164026748685,
0.9967978099163878,
0.998027409088217,
0.9989594749281802,
0.9993127391259486,
0.9994188405473133,
0.9995822320908373,
0.9997729066363532,
0.9999250569118475,
1.0],
'POL': [0.02694350187481301,
0.047287338021113005,
0.06032528050357386,
0.06612988353637128,
0.06810137121109601,
0.09418083960805403,
0.13054170151831887,
0.15588946280572988,
0.16766969331601914,
0.21582963789152335,
0.282975833805554,
0.3297845631912042,
0.3964858919782445,
0.489483102210476,
0.5510704829155029,
0.5795032407861592,
0.6653704145776154,
0.7302003157823588,
0.7519544113730636,
0.7560605086325621,
0.756600827213402,
0.7962427235483563,
0.8561021578230984,
0.8837371827506854,
0.9138665173097279,
0.9416858551193429,
0.9545290786337701,
0.9621116494680034,
0.9726134794581165,
0.9823101408809429,
0.9867867531874387,
0.9880557389623232,
0.9904606438183907,
0.9939375832323878,
0.9974296585257816,
1.0],
'SEN': [0.0298442138417624,
0.04607235088085045,
0.05306916734154027,
0.055131994465400895,
0.055592872115727485,
0.09321687205657506,
0.12737167663374058,
0.1428744232521013,
0.14756551831546416,
0.22156836213367045,
0.2887476273032204,
0.3192400613494536,
0.4284074990579346,
0.5275089371555622,
0.634869941507516,
0.6876621728606053,
0.7851237433638548,
0.8301055403134362,
0.839332479922627,
0.8403971157627421,
0.8404799222628221,
0.888404341460465,
0.9326418082310853,
0.9543945348782135,
0.968005921995994,
0.9813920765331233,
0.9879744010141394,
0.9900684388602794,
0.9931575194189842,
0.9961954838364717,
0.9976893308933108,
0.9978950600949552,
0.99830609946244,
0.9989284446846252,
0.9995727768872601,
1.0],
'TUN': [0.04291831857441963,
0.06468366570884883,
0.0738717170388148,
0.07652960986476974,
0.07711278436756752,
0.12299843309763313,
0.16396147729082303,
0.18224574520478823,
0.18768665679936217,
0.26947865752045186,
0.34249602641278937,
0.37508806544445594,
0.48443485851227375,
0.5820509463477223,
0.6795072668420169,
0.7229366813580701,
0.8099379044811886,
0.853509829806827,
0.8632083533623016,
0.8644226562449463,
0.8645256028098253,
0.9032959171748,
0.942129790578433,
0.9594353114185004,
0.972401156964939,
0.983957084336943,
0.9891067468463699,
0.99127126374875,
0.9941649817129729,
0.9967440340786557,
0.9978933359780068,
0.9981253359005201,
0.9985459812473086,
0.9991244652439277,
0.9996696238173369,
1.0]},
'BRA': {'BEL': [0.019689757590574505,
0.0326746941769816,
0.03930546659883568,
0.04162664562377806,
0.042243054363022614,
0.06917811019691952,
0.09828777524655452,
0.11401769621882796,
0.119684323056159,
0.17701059639326175,
0.23896512233438938,
0.2724433432368599,
0.36394960282063954,
0.4628436456938153,
0.560220701106377,
0.6120329539727477,
0.7172717730069432,
0.770710913456386,
0.7827712782456577,
0.7843023091279884,
0.7844340183197586,
0.8404293478155667,
0.8972969999785392,
0.9275550639680635,
0.9468062520680217,
0.9672925446877191,
0.978192863751683,
0.9814513791744569,
0.9866527387043309,
0.9921878034907727,
0.9951328933287417,
0.9954873506525111,
0.9962548870351293,
0.9975156969715039,
0.998935475160312,
1.0],
'COL': [0.008859322207029232,
0.014131524364975948,
0.016303075136886225,
0.016909748739629325,
0.017037758207978454,
0.03600393969214552,
0.05218147145101072,
0.059080923879734216,
0.061042592879503296,
0.11275593421511416,
0.15686572102234636,
0.17567782290870862,
0.2814293288586235,
0.371631900603534,
0.515803349361107,
0.6140781041750086,
0.7370516290748038,
0.775521549083552,
0.7808702513859754,
0.7812885612414758,
0.7813102456075751,
0.8651353934594918,
0.9175815884467853,
0.953331641579463,
0.9642695022697672,
0.9791811312613815,
0.9893456728239133,
0.9904862398041249,
0.9928186490563398,
0.9959984318685737,
0.9981659371473701,
0.9982403652862512,
0.9984454556378016,
0.9988722858813355,
0.9994748070592421,
1.0],
'CRC': [0.047266084833247984,
0.06174670887559639,
0.06523086521900495,
0.0657979828887677,
0.06586762662470119,
0.1283916224656355,
0.15937944195819995,
0.16705845338404268,
0.16832706402835024,
0.2853099230373165,
0.3432883626737778,
0.35765584989009364,
0.5218126962078737,
0.6031712634866729,
0.7567406007456408,
0.8285729424211892,
0.9046841832107306,
0.9248454394060561,
0.9272190193115672,
0.9273762048733801,
0.9273830672127071,
0.9629842409322024,
0.9818451707177975,
0.9906674055262523,
0.993998144705247,
0.9971140635339416,
0.9985715403319212,
0.9988656356815301,
0.9992783266134968,
0.999664400507681,
0.9998449872860968,
0.9998611324279961,
0.9998916108719899,
0.9999350195993373,
0.9999768436895939,
1.0],
'ENG': [0.005939419483608911,
0.009798666398048578,
0.011491292972698746,
0.011994602016626645,
0.012107615745161709,
0.026397705527977686,
0.03936522786311937,
0.04524890802426888,
0.04702862051644847,
0.08957929466642794,
0.1281918454482093,
0.1457112985198636,
0.2407365869261141,
0.32696715430107376,
0.4684424754654107,
0.5737579490316852,
0.7021395347986262,
0.741264439729293,
0.7465637742671659,
0.7469675236809769,
0.7469878943565978,
0.8425562761136489,
0.9008061229010238,
0.9441678212459204,
0.9560024349831231,
0.9736220161220788,
0.9867381873130462,
0.9879404057739808,
0.9906252317669828,
0.9946224479396553,
0.9975980108202144,
0.9976743530335668,
0.9979040217386356,
0.9984257709320148,
0.9992293650339447,
1.0],
'GER': [0.006820898046071782,
0.01228118312386284,
0.015369241515898512,
0.016561751252784654,
0.01691052110080905,
0.030460448752552993,
0.04654769417673408,
0.05609754093567696,
0.05987691483156295,
0.09685124304911821,
0.1407492702355202,
0.16680838859893551,
0.24247858675107556,
0.33231855344737093,
0.43556097712203057,
0.5059915978056664,
0.6285668801635583,
0.6818984414194423,
0.6922113976561765,
0.6933331696127418,
0.6934151745619699,
0.777034416629969,
0.8497985929750697,
0.8994373550373443,
0.9205434434935369,
0.9493400342217382,
0.9689846888740838,
0.972045721048817,
0.97831030846334,
0.9868575463957256,
0.9926883594596038,
0.9929708389802306,
0.9937531113745386,
0.9953938130067866,
0.9977434861845609,
1.0],
'JPN': [0.08898666737520174,
0.12215555057325339,
0.13345494842417172,
0.13609051006504008,
0.13655661789458137,
0.2090824490439459,
0.26124135868972054,
0.279997102762706,
0.2844933360124723,
0.39194644856301586,
0.469224255654099,
0.49701246637716023,
0.6164130773093625,
0.7022832504403267,
0.7907342690758709,
0.8234961730417886,
0.8871081112433201,
0.9179860379807666,
0.9246475853189231,
0.9254559822035375,
0.9255222915629449,
0.9490838993611729,
0.9719580207911853,
0.980430503993246,
0.9878327355624389,
0.9933162495692731,
0.9953473212523241,
0.9965450291593054,
0.9978759078320346,
0.9988618121198282,
0.9992269871296194,
0.9993511490673874,
0.999538164456638,
0.9997517449334227,
0.9999187536674785,
1.0],
'KOR': [0.07626731973350379,
0.10098950638662454,
0.10789800752992372,
0.10921117645765557,
0.10939991790133961,
0.18435642832913518,
0.22798516715730732,
0.2406823087456316,
0.24314578288132144,
0.3627709325676053,
0.43239923280673315,
0.4526629491193323,
0.5958477407651215,
0.6791890255077988,
0.7934452924796085,
0.8390314695878158,
0.9055347900164906,
0.9297893552629957,
0.9337208811644591,
0.9340793499254199,
0.934101141485655,
0.9606347594979906,
0.9799890222673588,
0.9877110218896717,
0.9924168443569987,
0.9961719198923875,
0.9976701268874852,
0.9982422170093845,
0.9989269777400636,
0.9994733919503787,
0.9996914013044915,
0.9997352325402088,
0.9998060521422588,
0.9998925754591841,
0.9999644793927283,
1.0],
'MEX': [0.028293137255475297,
0.04151783504284316,
0.046256908061685545,
0.04741192908410217,
0.04762477985858162,
0.08798836684054502,
0.1181086971643748,
0.12934697362407127,
0.1321424024131251,
0.2152183028375183,
0.27721164262296544,
0.30034214231132905,
0.42858158559484466,
0.5242771042802765,
0.6562476254515195,
0.7241527021279093,
0.8226324455774205,
0.8583376547626627,
0.8640911762181936,
0.864612681148867,
0.8646441894009084,
0.9153166764886345,
0.9520607151717024,
0.970967261436958,
0.9798486384019726,
0.9889884156559957,
0.9936912634566362,
0.9947646192936884,
0.9964214963697784,
0.9981265796549954,
0.9990039256728387,
0.9990856536074236,
0.9992559402044217,
0.9995242146258829,
0.9998116676697889,
1.0],
'PAN': [0.12989147546644578,
0.15953976351607108,
0.1658500731412527,
0.16676137842562141,
0.16686079132537518,
0.2711112916414207,
0.3170944777839385,
0.3272356924611211,
0.3287267336257468,
0.472718204543847,
0.5362304811285076,
0.5502375929777253,
0.699399061994438,
0.7651917399604274,
0.8682031368501713,
0.9037731401463555,
0.9492097780869102,
0.963719813763253,
0.9657792495438297,
0.9659436679955667,
0.9659523750533926,
0.9816417202143257,
0.9916623981668379,
0.99512255460159,
0.9972559341998242,
0.9987292531069043,
0.9992379925276236,
0.9994650883654366,
0.9997003380546008,
0.9998628022570415,
0.9999189014110265,
0.9999340426802916,
0.9999551825823354,
0.9999774776755295,
0.9999934319766757,
1.0],
'POL': [0.052894856194550456,
0.07847468045682426,
0.08909280967835997,
0.09211678160644195,
0.09277032368826399,
0.14401474426100985,
0.1891100767482794,
0.20895212969188257,
0.21477249650594105,
0.3009868202432716,
0.37685583292106867,
0.41023837792740003,
0.5190245082184082,
0.6147568174268282,
0.7062684644531243,
0.7447585804222444,
0.825289262650375,
0.8674117051063233,
0.8772039709407063,
0.8784844585223018,
0.8785981251530233,
0.9124696092619523,
0.9479032998951491,
0.9628068330081938,
0.975162815874207,
0.9855567543264053,
0.9899284808705254,
0.9920827909333076,
0.9948011217746081,
0.9970878005247792,
0.9980495855339913,
0.9982914830515016,
0.9987057625707981,
0.9992441830484454,
0.9997242800275655,
1.0],
'SEN': [0.052094126032209996,
0.06991871843959836,
0.074846673493342,
0.075771128074654,
0.07590212888378352,
0.1384905733563384,
0.1743522482736759,
0.18462618732676045,
0.18658842740449996,
0.29874460443924006,
0.36300739918811603,
0.38141791788677404,
0.5321527079073659,
0.618520120997844,
0.7535757046388238,
0.8140793570143438,
0.891462961561792,
0.9162061878698072,
0.9197224494960267,
0.9200035286008466,
0.9200184329903915,
0.9546855721258581,
0.9768550452098975,
0.9867867641115821,
0.9915125221741227,
0.9957467139834276,
0.9976435927530051,
0.9981472761103183,
0.9988242125672194,
0.9994307351109676,
0.99970245160099,
0.9997360779284215,
0.999796989036187,
0.9998803347204464,
0.9999577169868669,
1.0],
'SRB': [0.04719370407946285,
0.06708165461334302,
0.0739399423339882,
0.07555198308686051,
0.07583874175586533,
0.1297861726722977,
0.16869885095478923,
0.18273285132695027,
0.18610712718662442,
0.28279200274097627,
0.3525315138412767,
0.37768332471460625,
0.5076426898154353,
0.6013833346998801,
0.7178402709102468,
0.7700189576135217,
0.8540202013342544,
0.8878281092773794,
0.8938755046034704,
0.8944839770957346,
0.8945249709637574,
0.9321618410499841,
0.9624571981847609,
0.9760310736441311,
0.9841597044417236,
0.9914437923374932,
0.994707437703759,
0.9957979453737894,
0.9972637556530307,
0.9985772721822448,
0.999165795027579,
0.9992584704758832,
0.9994268434161322,
0.9996583482798627,
0.9998753097372896,
1.0],
'SWE': [0.02583734936305206,
0.038965524112777664,
0.04407926505706997,
0.045436109045824416,
0.04570849594625279,
0.08253501702213116,
0.11250052558657658,
0.1246918985060512,
0.12799857966008643,
0.20468817090082353,
0.26709000892383894,
0.2924780004729592,
0.41225499609019467,
0.5097167840390634,
0.6344319154928729,
0.6993603432489242,
0.8008402612676512,
0.840492283182381,
0.8473782995276098,
0.848050955941699,
0.8480948945236916,
0.9009267477957162,
0.9422135332925575,
0.9637080066428428,
0.9744628735844801,
0.9856611394742076,
0.9914911120632476,
0.9928918891101937,
0.9950796810060469,
0.9973576706356821,
0.9985436236421268,
0.9986590122448481,
0.998902503466234,
0.9992912520441093,
0.9997140288326495,
1.0],
'SWI': [0.022871405017435724,
0.03406172586648111,
0.03816601787827149,
0.03918919200267599,
0.039382005260761244,
0.07479429956087408,
0.1018061700162803,
0.11210825803288926,
0.11472767474888883,
0.1916733528970417,
0.250366152426686,
0.2727510660406399,
0.3981447499863087,
0.49379282966059734,
0.630023794603525,
0.7040262292768793,
0.8079407948192094,
0.8444201247882652,
0.8501117299087093,
0.8506112410700144,
0.850640424136496,
0.9070881697087151,
0.9467202641315027,
0.968248934061421,
0.9775241961787079,
0.9876010825782179,
0.9930749785298686,
0.9941603424072684,
0.9959290937950546,
0.9978507113815247,
0.9988945590850882,
0.9989744518887538,
0.9991501192534249,
0.9994420975421141,
0.9997719544731963,
1.0],
'TUN': [0.07290237227800192,
0.09609793984658656,
0.10238522130657624,
0.10354330340890737,
0.10370453562228475,
0.17807532900849254,
0.2199804726896831,
0.23178645852757845,
0.23400386812395932,
0.3547802360936178,
0.42283317492376177,
0.44200581010593065,
0.5891092333294031,
0.6719964771720618,
0.7914426761696468,
0.8399371100255344,
0.9072405486909357,
0.930592470220985,
0.9341934897228371,
0.9345058464407957,
0.9345238745729223,
0.9618486633232037,
0.9808101409348898,
0.9885083860826354,
0.9928943624513734,
0.9964557221713516,
0.9979016126628606,
0.9984088724107844,
0.9990267055733264,
0.9995283786225538,
0.9997320548442102,
0.999768938698257,
0.9998295436775947,
0.9999048127110428,
0.9999683374543787,
1.0]},
'COL': {'JPN': [0.025474589801155134,
0.039410518852086476,
0.04531698143515412,
0.04702598859854193,
0.04740046724528122,
0.0822996421294951,
0.11333912730585158,
0.1271424570867091,
0.13123470522283998,
0.20314363388673515,
0.26709976272691205,
0.2955411989450757,
0.4066661905547228,
0.5055012644850981,
0.6199864796935617,
0.6789600027124392,
0.780783674824025,
0.8247358573948749,
0.8331678383879011,
0.8340777544230282,
0.8341437199782442,
0.8865950400915449,
0.9318762590830485,
0.955201480646862,
0.9682319044692268,
0.9816563453078925,
0.9885715314249212,
0.9904463920047822,
0.993343721586483,
| |
DiscreteVoltageControlBranchAndGenState14BusEnvTestCase(
unittest.TestCase):
"""Quick testing of DiscreteVoltageControlBranchAndGenState14BusEnv.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.2
cls.min_load_factor = 0.8
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.8
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.env = \
voltage_control_env.DiscreteVoltageControlBranchAndGenState14BusEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
self.env.scenario_idx = 0
self.env.reset()
def test_observation_shape(self):
# 14 buses, 4 "openable" lines, 5 generators.
self.assertEqual((14+4+5,), self.env.observation_space.shape)
def test_observation_bounds(self):
# All lower bounds should be 0.
np.testing.assert_array_equal(0, self.env.observation_space.low)
# Upper bounds corresponding to buses should be 2.
np.testing.assert_array_equal(2, self.env.observation_space.high[0:14])
# Remaining upper bounds correspond to line states and
# generator states should be at 1.
np.testing.assert_array_equal(1, self.env.observation_space.high[14:])
def test_num_obs(self):
# 14 buses, 4 lines to open, 5 generators.
self.assertEqual(14+4+5, self.env.num_obs)
def test_get_observation_and_get_observation_failed_pf(self):
# Get a copy of the line observation DataFrame.
branch_obs = self.env.branch_obs_data.copy(deep=True)
# Convert to multi-index to make selecting buses a bit
# simpler.
branch_obs.set_index(['BusNum', 'BusNum:1'], inplace=True)
# Close all lines.
branch_obs['LineStatus'] = 'Closed'
# Open the first and last eligible lines.
for idx in [0, len(self.env.LINES_TO_OPEN) - 1]:
l_t = self.env.LINES_TO_OPEN[idx]
branch_obs.loc[(l_t[0], l_t[1]), 'LineStatus'] = 'Open'
# Get a copy of the generator observation DataFrame.
gen_obs = self.env.gen_obs_data.copy(deep=True)
# Close all generators.
gen_obs['GenStatus'] = 'Closed'
# Open the first and last generators.
gen_obs.loc[0, 'GenStatus'] = 'Open'
gen_obs.loc[gen_obs.shape[0] - 1, 'GenStatus'] = 'Open'
# Patch the existing DataFrames, call _get_observation.
with patch.object(self.env, 'branch_obs_data',
branch_obs.reset_index()):
with patch.object(self.env, 'gen_obs_data', gen_obs):
obs = self.env._get_observation()
obs_failed = self.env._get_observation_failed_pf()
# Ensure we get back the correct number of observations.
self.assertEqual(14+4+5, obs.shape[0])
self.assertEqual(14 + 4 + 5, obs_failed.shape[0])
# The first 14 values correspond to bus voltages.
np.testing.assert_array_less(0, obs[0:14])
np.testing.assert_array_equal(0, obs_failed[0:14])
# The remaining correspond to generator and line states.
line_arr = obs[14:18]
line_arr_failed = obs[14:18]
gen_arr = obs[18:]
gen_arr_failed = obs[18:]
# Gen states and line states should be the same for the normal
# and failed cases.
np.testing.assert_array_equal(line_arr, line_arr_failed)
np.testing.assert_array_equal(gen_arr, gen_arr_failed)
# Create array for expected line states.
line_expected = np.ones(len(self.env.LINES_TO_OPEN),
dtype=self.env.dtype)
line_expected[0] = 0
line_expected[-1] = 0
# Create array for expected gen states.
gen_expected = np.ones(self.env.num_gens, dtype=self.env.dtype)
gen_expected[0] = 0
gen_expected[-1] = 0
# Test.
np.testing.assert_array_equal(line_expected, line_arr)
np.testing.assert_array_equal(gen_expected, gen_arr)
# noinspection DuplicatedCode
class DiscreteVoltageControlBranchAndGenStateClippedReward14BusEnvTestCase(
unittest.TestCase):
"""Quick testing of
DiscreteVoltageControlBranchAndGenStateClippedReward14BusEnv.
Specifically, we'll be testing the
_compute_reward_volt_change_clipped and
_compute_reward_failed_power_flow_clipped methods which it uses.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.4
cls.min_load_factor = 0.6
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.9
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 1000
cls.csv_logfile = 'log.csv'
cls.truncate_voltages = True
cls.scale_voltage_obs = True
# Ensure we remove the logfile if it was created by other
# test cases.
try:
os.remove(cls.csv_logfile)
except FileNotFoundError:
pass
cls.env = \
voltage_control_env.DiscreteVoltageControlEnv(
pwb_path=PWB_14, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile,
truncate_voltages=True,
scale_voltage_obs=True,
clipped_reward=True
)
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def setUp(self) -> None:
# Replace the voltage DataFrames for each run. It'll simply be
# 6 voltage measurements initialized to 1.
self.env.bus_obs_data = pd.DataFrame({'BusPUVolt': np.ones(6)})
self.env.bus_obs_data_prev = pd.DataFrame({'BusPUVolt': np.ones(6)})
def test_all_in(self):
self.assertEqual(1.0, self.env._compute_reward())
def test_fail(self):
self.assertEqual(-1.0, self.env._compute_reward_failed_pf())
def test_two_voltages_move_in(self):
# Move two from out to in.
self.env.bus_obs_data_prev.iloc[0]['BusPUVolt'] = 0.94
self.env.bus_obs_data_prev.iloc[-1]['BusPUVolt'] = 1.06
# Ensure at least one stays out of bounds.
self.env.bus_obs_data_prev.iloc[3]['BusPUVolt'] = 0.949
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.94
self.assertEqual(0.75, self.env._compute_reward())
def test_no_op_action_reward(self):
with patch.object(self.env, 'last_action', new=0):
self.assertEqual(0.0, self.env._compute_reward())
def test_one_voltage_moves_in(self):
# Move one bus from out to in.
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.949
self.env.bus_obs_data.iloc[4]['BusPUVolt'] = 0.95000001
# Ensure at least one stays out of bounds.
self.env.bus_obs_data_prev.iloc[3]['BusPUVolt'] = 0.949
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.94
self.assertEqual(0.5, self.env._compute_reward())
def test_three_in_one_out(self):
# Move three from out to in.
self.env.bus_obs_data_prev.iloc[0]['BusPUVolt'] = 0.94
self.env.bus_obs_data_prev.iloc[-1]['BusPUVolt'] = 1.06
self.env.bus_obs_data_prev.iloc[2]['BusPUVolt'] = 1.1
# Move one from in to out.
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.94
self.assertEqual(0.75, self.env._compute_reward())
def test_two_move_out(self):
# Move two buses out of range.
self.env.bus_obs_data.iloc[2]['BusPUVolt'] = 1.08
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 0.9
self.assertEqual(-0.75, self.env._compute_reward())
def test_one_moves_out_net(self):
# Move two buses out of range.
self.env.bus_obs_data.iloc[1]['BusPUVolt'] = 1.06
self.env.bus_obs_data.iloc[0]['BusPUVolt'] = 0.92
# Move one in range.
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.93
self.assertEqual(-0.5, self.env._compute_reward())
def test_bad_move_right_direction(self):
self.env.bus_obs_data_prev.iloc[-1]['BusPUVolt'] = 0.9
self.env.bus_obs_data.iloc[-1]['BusPUVolt'] = 0.91
self.assertEqual(0.25, self.env._compute_reward())
def test_bad_move_wrong_direction(self):
self.env.bus_obs_data_prev.iloc[3]['BusPUVolt'] = 1.051
self.env.bus_obs_data.iloc[3]['BusPUVolt'] = 1.06
self.assertEqual(-0.25, self.env._compute_reward())
@unittest.skip("Don't worry about over/undershoot for now.")
def test_overshoot(self):
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.9
self.env.bus_obs_data.iloc[4]['BusPUVolt'] = 1.06
# The agent decreased the distance from the band.
self.assertEqual(0.15, self.env._compute_reward())
def test_do_nothing(self):
self.env.bus_obs_data_prev.iloc[4]['BusPUVolt'] = 0.9
self.env.bus_obs_data.iloc[4]['BusPUVolt'] = 0.9
self.assertEqual(-0.1, self.env._compute_reward())
# noinspection DuplicatedCode
class IL200BusShuntsTestCase(unittest.TestCase):
"""Test case for shunts with the 200 bus case and the
DiscreteVoltageControlGenAndShuntNoContingenciesEnv.
"""
@classmethod
def setUpClass(cls) -> None:
# Initialize the environment. Then, we'll use individual test
# methods to test various attributes, methods, etc.
# Define inputs to the constructor.
cls.num_scenarios = 1000
cls.max_load_factor = 1.4
cls.min_load_factor = 0.6
cls.min_load_pf = 0.8
cls.lead_pf_probability = 0.1
cls.load_on_probability = 0.9
cls.shunt_closed_probability = 0.5
cls.num_gen_voltage_bins = 5
cls.gen_voltage_range = (0.95, 1.05)
cls.seed = 18
cls.log_level = logging.INFO
cls.dtype = np.float32
cls.log_buffer = 10
cls.csv_logfile = 'log.csv'
cls.env = \
voltage_control_env.DiscreteVoltageControlGenAndShuntNoContingenciesEnv(
pwb_path=PWB_200, num_scenarios=cls.num_scenarios,
max_load_factor=cls.max_load_factor,
min_load_factor=cls.min_load_factor,
min_load_pf=cls.min_load_pf,
lead_pf_probability=cls.lead_pf_probability,
load_on_probability=cls.load_on_probability,
shunt_closed_probability=cls.shunt_closed_probability,
num_gen_voltage_bins=cls.num_gen_voltage_bins,
gen_voltage_range=cls.gen_voltage_range,
seed=cls.seed,
log_level=logging.INFO,
dtype=cls.dtype,
log_buffer=cls.log_buffer,
csv_logfile=cls.csv_logfile
)
def setUp(self) -> None:
self.env.scenario_idx = 0
self.env.reset()
# noinspection PyUnresolvedReferences
@classmethod
def tearDownClass(cls) -> None:
cls.env.close()
def test_branches_to_open_none(self):
self.assertIsNone(self.env.branches_to_open)
def _shunt_action_helper(self, shunt_idx, start_state, finish_state):
"""The last action should toggle the last shunt. Ensure that
shunt is open first. shunt_idx should be negative only.
"""
# Grab the last shunt.
last_shunt = self.env.shunt_init_data.iloc[shunt_idx].copy()
# Open it.
last_shunt['SSStatus'] = start_state
self.env.saw.ChangeParametersSingleElement(
ObjectType='shunt',
ParamList=last_shunt.index.tolist(),
Values=last_shunt.tolist()
)
# Update observations.
self.env._rotate_and_get_observation_frames()
# Helper to pull this shunt.
def get_shunt():
s = self.env.saw.GetParametersSingleElement(
ObjectType='shunt',
ParamList=['BusNum', 'ShuntID', 'SSStatus'],
Values=[last_shunt['BusNum'], last_shunt['ShuntID'], 0])
return s
# Confirm shunt changed state in PowerWorld.
shunt_out = get_shunt()
self.assertEqual(start_state, shunt_out['SSStatus'])
# Now, take the last action, which should toggle this shunt.
self.env._take_action(self.env.action_space.n + shunt_idx)
# Confirm it's closed now.
shunt_out = get_shunt()
self.assertEqual(finish_state, shunt_out['SSStatus'])
def test_take_action_last_shunt_open_to_closed(self):
"""Toggle the last shunt from open to closed."""
self._shunt_action_helper(
shunt_idx=-1, start_state='Open', finish_state='Closed')
def test_take_action_first_shunt_closed_to_open(self):
"""Toggle the first shunt from closed to open."""
# Hard-code the fact that there are 4 shunts in this case.
self._shunt_action_helper(
shunt_idx=-4, start_state='Closed', finish_state='Open'
)
def test_observation_space(self):
"""Ensure the observation space is the correct size via
hard-coding.
"""
# 200 buses, 49 generators, 4 shunts.
n = 200 + 49 + 4
self.assertEqual(
(n,), self.env.observation_space.shape
)
# Lower bound should be 0.
np.testing.assert_array_equal(
np.zeros(n, dtype=self.env.dtype), self.env.observation_space.low
)
# Voltage cap at 2.
np.testing.assert_array_equal(
np.ones(200, dtype=self.dtype) + 1,
self.env.observation_space.high[0:200])
# All else at 1 (gen and shunt states)
np.testing.assert_array_equal(
np.ones(49+4, dtype=self.dtype),
self.env.observation_space.high[200:]
)
def test_action_1_puts_gen_at_min(self):
"""Ensure action 1 puts the first generator at the lowest
set point.
"""
# Get the set point.
gen_data = self.env.gen_obs_data.iloc[0]
initial_v = gen_data['GenVoltSet']
# Ensure we dont' start at the minimum.
self.assertNotAlmostEqual(initial_v, self.gen_voltage_range[0])
# Take action 1 (0 is no-op).
self.env._take_action(1)
# Pull voltage for this generator.
gen = self.env.saw.GetParametersSingleElement(
ObjectType='gen',
ParamList=self.env.gen_key_fields + ['GenVoltSet'],
Values=gen_data[self.env.gen_key_fields].tolist() + [0]
)
# Ensure it's voltage set point is at the minimum.
self.assertAlmostEqual(self.gen_voltage_range[0], gen['GenVoltSet'])
def test_last_gen_action_puts_gen_at_max(self):
"""Ensure the last possible gen action puts the last generator
at the highest set point.
"""
# Get the starting set point.
gen_data = self.env.gen_obs_data.iloc[-1]
initial_v = gen_data['GenVoltSet']
# Ensure we don't start at the maximum.
self.assertNotAlmostEqual(initial_v, self.gen_voltage_range[1])
# Take the last generator action. No need for -1 because action
# 0 is no-op.
self.env._take_action(self.env.num_gens * self.num_gen_voltage_bins)
| |
kw else kw.pop('inv')
to = 'to' in kw and kw.pop('to')
pattern_objs = []
for pattern in patterns:
pattern_objs.append(re.compile(pattern, **kw))
for data in prev:
match = None
for pattern_obj in pattern_objs:
match = pattern_obj.match(data)
if match is not None:
break
if bool(inv) ^ (match is not None):
if to is dict:
yield match.groupdict()
elif to is tuple:
yield tuple(match.groups())
elif to is list:
yield list(match.groups())
else:
yield match
@pipe.func
def resplit(prev, pattern, *args, **kw):
"""The resplit pipe split previous pipe input by regular expression.
Use 'maxsplit' keyword argument to limit the number of split.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to split string.
:type pattern: str|unicode
"""
maxsplit = 0 if 'maxsplit' not in kw else kw.pop('maxsplit')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.split(s, maxsplit=maxsplit)
@pipe.func
def sub(prev, pattern, repl, *args, **kw):
"""sub pipe is a wrapper of re.sub method.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern string.
:type pattern: str|unicode
:param repl: Check repl argument in re.sub method.
:type repl: str|unicode|callable
"""
count = 0 if 'count' not in kw else kw.pop('count')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.sub(repl, s, count=count)
@pipe.func
def subn(prev, pattern, repl, *args, **kw):
"""subn pipe is a wrapper of re.subn method.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern string.
:type pattern: str|unicode
:param repl: Check repl argument in re.sub method.
:type repl: str|unicode|callable
"""
count = 0 if 'count' not in kw else kw.pop('count')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.subn(repl, s, count=count)
@pipe.func
def wildcard(prev, *patterns, **kw):
"""wildcard pipe greps data passed from previous generator
according to given regular expression.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param patterns: The wildcard string which used to filter data. When more than one pattern specified, the data is passed if it matches any pattern.
:type pattern: str|unicode|re pattern object
:param inv: If true, invert the match condition.
:type inv: boolean
:returns: generator
"""
import fnmatch
inv = 'inv' in kw and kw.pop('inv')
pattern_objs = []
for pattern in patterns:
pattern_objs.append(re.compile(fnmatch.translate(pattern), **kw))
for data in prev:
is_match = False
for pattern_obj in pattern_objs:
if pattern_obj.match(data):
is_match = True
break
if bool(inv) ^ is_match:
yield data
@pipe.func
def stdout(prev, endl='\n', thru=False):
"""This pipe read data from previous iterator and write it to stdout.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param endl: The end-of-line symbol for each output.
:type endl: str
:param thru: If true, data will passed to next generator. If false, data
will be dropped.
:type thru: bool
:returns: generator
"""
for i in prev:
sys.stdout.write(str(i) + endl)
if thru:
yield i
@pipe.func
def stderr(prev, endl='\n', thru=False):
"""This pipe read data from previous iterator and write it to stderr.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param endl: The end-of-line symbol for each output.
:type endl: str
:param thru: If true, data will passed to next generator. If false, data
will be dropped.
:type thru: bool
:returns: generator
"""
for i in prev:
sys.stderr.write(str(i) + endl)
if thru:
yield i
@pipe.func
def readline(prev, filename=None, mode='r', trim=None, start=1, end=sys.maxsize):
"""This pipe get filenames or file object from previous pipe and read the
content of file. Then, send the content of file line by line to next pipe.
The start and end parameters are used to limit the range of reading from file.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param filename: The files to be read. If None, use previous pipe input as filenames.
:type filename: None|str|unicode|list|tuple
:param mode: The mode to open file. default is 'r'
:type mode: str
:param trim: The function to trim the line before send to next pipe.
:type trim: function object.
:param start: if star is specified, only line number larger or equal to start will be sent.
:type start: integer
:param end: The last line number to read.
:type end: integer
:returns: generator
"""
if trim is None:
trim = lambda s: s.rstrip()
if prev is None:
if filename is None:
raise Exception('No input available for readline.')
elif is_str_type(filename):
file_list = [filename, ]
else:
file_list = filename
else:
file_list = prev
for fn in file_list:
if isinstance(fn, file_type):
fd = fn
else:
fd = open(fn, mode)
try:
if start <= 1 and end == sys.maxsize:
for line in fd:
yield trim(line)
else:
for line_no, line in enumerate(fd, 1):
if line_no < start:
continue
yield trim(line)
if line_no >= end:
break
finally:
if fd != fn:
fd.close()
@pipe.func
def fileobj(prev, file_handle, endl='', thru=False):
"""This pipe read/write data from/to file object which specified by
file_handle.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param file_handle: The file object to read or write.
:type file_handle: file object
:param endl: The end-of-line symbol for each output.
:type endl: str
:param thru: If true, data will passed to next generator. If false, data
will be dropped.
:type thru: bool
:returns: generator
"""
if prev is not None:
for i in prev:
file_handle.write(str(i)+endl)
if thru:
yield i
else:
for data in file_handle:
yield data
@pipe.func
def sh(prev, *args, **kw):
"""sh pipe executes shell command specified by args. If previous pipe exists,
read data from it and write it to stdin of shell process. The stdout of
shell process will be passed to next pipe object line by line.
Optional keyword arguments:
- trim: Pass a function into sh pipe. It is used to trim the output from shell process.
The default trim function is str.rstrip. Therefore, any space characters in tail of
shell process output line will be removed.
- endl: Append the specified to each input line from previous pipe.
- returncode: Set the expected returncode. It the returncode of process doesn't not equal to this value. A
subprocess.CalledProcessError will be raised.
- decode: The codecs to be used to decode the output of shell.
For example:
py_files = result(sh('ls') | strip | wildcard('*.py'))
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param args: The command line arguments. It will be joined by space character.
:type args: list of string.
:param kw: arguments for subprocess.Popen.
:type kw: dictionary of options.
:returns: generator
"""
endl = '\n' if 'endl' not in kw else kw.pop('endl')
returncode = None if 'returncode' not in kw else kw.pop('returncode')
if PY3:
decode = functools.partial(codecs.decode, encoding=locale.getdefaultlocale()[1]) if 'decode' not in kw else kw.pop('decode')
else:
decode = (lambda ln: codecs.decode(ln, locale.getdefaultlocale()[1])) if 'decode' not in kw else kw.pop('decode')
trim = (lambda s: s.rstrip()) if 'trim' not in kw else kw.pop('trim')
cmdline = ' '.join(args)
if not cmdline:
if prev is not None:
for i in prev:
yield i
else:
while True:
yield None
process = subprocess.Popen(cmdline, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
**kw)
if prev is not None:
stdin_buffer = StringIO()
for i in prev:
stdin_buffer.write(i)
if endl:
stdin_buffer.write(endl)
if PY3:
process.stdin.write(stdin_buffer.getvalue().encode('utf-8'))
else:
process.stdin.write(stdin_buffer.getvalue())
process.stdin.flush()
process.stdin.close()
stdin_buffer.close()
for line in process.stdout:
yield trim(decode(line))
process.wait()
if returncode is not None and returncode != process.returncode:
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmdline)
@pipe.func
def execmd(prev, *args, **kw):
"""execmd pipe executes shell command specified by previous pipe.
For example:
py_files = result(readline("dir_list.txt", trim=str.strip) | fmt("ls {}") | execmd )
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param kw: arguments for subprocess.Popen.
:type kw: dictionary of options.
:returns: generator
"""
returncode = None if 'returncode' not in kw else kw.pop('returncode')
if PY3:
decode = functools.partial(codecs.decode, encoding=locale.getdefaultlocale()[1]) if 'decode' not in kw else kw.pop('decode')
else:
decode = (lambda ln: codecs.decode(ln, locale.getdefaultlocale()[1])) if 'decode' not in kw else kw.pop('decode')
trim = (lambda s: s.rstrip()) if 'trim' not in kw else kw.pop('trim')
for cmdline in prev:
process = subprocess.Popen(cmdline, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
**kw)
for line in process.stdout:
yield trim(decode(line))
process.wait()
if returncode is not None and returncode != | |
MOL 2 8.447 13.786 6.178 1.00 0.00 H1- \n',
'ATOM 514 H3 MOL 2 7.524 11.986 6.711 1.00 0.00 H1- \n',
'ATOM 515 H4 MOL 2 8.102 10.674 4.848 1.00 0.00 H1- \n',
'ATOM 516 H5 MOL 2 6.609 10.669 4.357 1.00 0.00 H1- \n',
'ATOM 517 H6 MOL 2 7.090 8.640 5.466 1.00 0.00 H1- \n',
'ATOM 518 H7 MOL 2 7.485 9.434 6.763 1.00 0.00 H1- \n',
'ATOM 519 N1 MOL 2 7.104 0.690 17.485 1.00 0.00 N3- \n',
'ATOM 520 C1 MOL 2 7.531 3.021 15.867 1.00 0.00 C \n',
'ATOM 521 C2 MOL 2 8.296 2.685 16.973 1.00 0.00 C \n',
'ATOM 522 C3 MOL 2 7.973 1.515 17.788 1.00 0.00 C \n',
'ATOM 523 C4 MOL 2 6.907 24.339 18.352 1.00 0.00 C \n',
'ATOM 524 C5 MOL 2 7.177 23.079 17.544 1.00 0.00 C \n',
'ATOM 525 C6 MOL 2 6.904 21.839 18.384 1.00 0.00 C \n',
'ATOM 526 H1 MOL 2 6.795 2.495 15.649 1.00 0.00 H1- \n',
'ATOM 527 H2 MOL 2 8.447 1.386 18.578 1.00 0.00 H1- \n',
'ATOM 528 H3 MOL 2 7.524 24.386 19.111 1.00 0.00 H1- \n',
'ATOM 529 H4 MOL 2 8.102 23.074 17.248 1.00 0.00 H1- \n',
'ATOM 530 H5 MOL 2 6.609 23.069 16.757 1.00 0.00 H1- \n',
'ATOM 531 H6 MOL 2 7.090 21.040 17.866 1.00 0.00 H1- \n',
'ATOM 532 H7 MOL 2 7.485 21.834 19.163 1.00 0.00 H1- \n',
'ATOM 533 N1 MOL 2 19.504 13.090 17.485 1.00 0.00 N3- \n',
'ATOM 534 C1 MOL 2 19.931 15.421 15.867 1.00 0.00 C \n',
'ATOM 535 C2 MOL 2 20.696 15.085 16.973 1.00 0.00 C \n',
'ATOM 536 C3 MOL 2 20.373 13.915 17.788 1.00 0.00 C \n',
'ATOM 537 C4 MOL 2 19.307 11.939 18.352 1.00 0.00 C \n',
'ATOM 538 C5 MOL 2 19.577 10.679 17.544 1.00 0.00 C \n',
'ATOM 539 C6 MOL 2 19.304 9.439 18.384 1.00 0.00 C \n',
'ATOM 540 H1 MOL 2 19.195 14.895 15.649 1.00 0.00 H1- \n',
'ATOM 541 H2 MOL 2 20.847 13.786 18.578 1.00 0.00 H1- \n',
'ATOM 542 H3 MOL 2 19.924 11.986 19.111 1.00 0.00 H1- \n',
'ATOM 543 H4 MOL 2 20.502 10.674 17.248 1.00 0.00 H1- \n',
'ATOM 544 H5 MOL 2 19.009 10.669 16.757 1.00 0.00 H1- \n',
'ATOM 545 H6 MOL 2 19.490 8.640 17.866 1.00 0.00 H1- \n',
'ATOM 546 H7 MOL 2 19.885 9.434 19.163 1.00 0.00 H1- \n',
'ATOM 547 N1 MOL 2 19.504 0.690 5.085 1.00 0.00 N3- \n',
'ATOM 548 C1 MOL 2 19.931 3.021 3.467 1.00 0.00 C \n',
'ATOM 549 C2 MOL 2 20.696 2.685 4.573 1.00 0.00 C \n',
'ATOM 550 C3 MOL 2 20.373 1.515 5.388 1.00 0.00 C \n',
'ATOM 551 C4 MOL 2 19.307 24.339 5.952 1.00 0.00 C \n',
'ATOM 552 C5 MOL 2 19.577 23.079 5.144 1.00 0.00 C \n',
'ATOM 553 C6 MOL 2 19.304 21.839 5.984 1.00 0.00 C \n',
'ATOM 554 H1 MOL 2 19.195 2.495 3.249 1.00 0.00 H1- \n',
'ATOM 555 H2 MOL 2 20.847 1.386 6.178 1.00 0.00 H1- \n',
'ATOM 556 H3 MOL 2 19.924 24.386 6.711 1.00 0.00 H1- \n',
'ATOM 557 H4 MOL 2 20.502 23.074 4.848 1.00 0.00 H1- \n',
'ATOM 558 H5 MOL 2 19.009 23.069 4.357 1.00 0.00 H1- \n',
'ATOM 559 H6 MOL 2 19.490 21.040 5.466 1.00 0.00 H1- \n',
'ATOM 560 H7 MOL 2 19.885 21.834 6.763 1.00 0.00 H1- \n',
'ATOM 561 N1 MOL 2 17.696 24.110 17.485 1.00 0.00 N3- \n',
'ATOM 562 C1 MOL 2 17.269 21.779 15.867 1.00 0.00 C \n',
'ATOM 563 C2 MOL 2 16.504 22.115 16.973 1.00 0.00 C \n',
'ATOM 564 C3 MOL 2 16.827 23.285 17.788 1.00 0.00 C \n',
'ATOM 565 C4 MOL 2 17.893 0.461 18.352 1.00 0.00 C \n',
'ATOM 566 C5 MOL 2 17.623 1.721 17.544 1.00 0.00 C \n',
'ATOM 567 C6 MOL 2 17.896 2.961 18.384 1.00 0.00 C \n',
'ATOM 568 H1 MOL 2 18.005 22.305 15.649 1.00 0.00 H1- \n',
'ATOM 569 H2 MOL 2 16.353 23.414 18.578 1.00 0.00 H1- \n',
'ATOM 570 H3 MOL 2 17.276 0.414 19.111 1.00 0.00 H1- \n',
'ATOM 571 H4 MOL 2 16.698 1.726 17.248 1.00 0.00 H1- \n',
'ATOM 572 H5 MOL 2 18.191 1.731 16.757 1.00 0.00 H1- \n',
'ATOM 573 H6 MOL 2 17.710 3.760 17.866 1.00 0.00 H1- \n',
'ATOM 574 H7 MOL 2 17.315 2.966 19.163 1.00 0.00 H1- \n',
'ATOM 575 N1 MOL 2 17.696 11.710 5.085 1.00 0.00 N3- \n',
'ATOM 576 C1 MOL 2 17.269 9.379 3.467 1.00 0.00 C \n',
'ATOM 577 C2 MOL 2 16.504 9.715 4.573 1.00 0.00 C \n',
'ATOM 578 C3 MOL 2 16.827 10.885 5.388 1.00 0.00 C \n',
'ATOM 579 C4 MOL 2 17.893 12.861 5.952 1.00 0.00 C \n',
'ATOM 580 C5 MOL 2 17.623 14.121 5.144 1.00 0.00 C \n',
'ATOM 581 C6 MOL 2 17.896 15.361 5.984 1.00 0.00 C \n',
'ATOM 582 H1 MOL 2 18.005 9.905 3.249 1.00 0.00 H1- \n',
'ATOM 583 H2 MOL 2 16.353 11.014 6.178 1.00 0.00 H1- \n',
'ATOM 584 H3 MOL 2 17.276 12.814 6.711 1.00 0.00 H1- \n',
'ATOM 585 H4 MOL 2 16.698 14.126 4.848 1.00 0.00 H1- \n',
'ATOM 586 H5 MOL 2 18.191 14.131 4.357 1.00 0.00 H1- \n',
'ATOM 587 H6 MOL 2 17.710 16.160 5.466 1.00 0.00 H1- \n',
'ATOM 588 H7 MOL 2 17.315 15.366 6.763 1.00 0.00 H1- \n',
'ATOM 589 N1 MOL 2 5.296 24.110 5.085 1.00 0.00 N3- \n',
'ATOM 590 C1 MOL 2 4.869 21.779 3.467 1.00 0.00 C \n',
'ATOM 591 C2 MOL 2 4.104 22.115 4.573 1.00 0.00 C \n',
'ATOM 592 C3 MOL 2 4.427 23.285 5.388 1.00 0.00 C \n',
'ATOM 593 C4 MOL 2 5.493 0.461 5.952 1.00 0.00 C \n',
'ATOM 594 C5 MOL 2 5.223 1.721 5.144 1.00 0.00 C \n',
'ATOM 595 C6 MOL 2 5.496 2.961 5.984 1.00 0.00 C \n',
'ATOM 596 H1 MOL 2 5.605 22.305 3.249 1.00 0.00 H1- \n',
'ATOM 597 H2 MOL 2 3.953 23.414 6.178 1.00 0.00 H1- \n',
'ATOM 598 H3 MOL 2 4.876 0.414 6.711 1.00 0.00 H1- \n',
'ATOM 599 H4 MOL 2 4.298 1.726 4.848 1.00 0.00 H1- \n',
'ATOM 600 H5 MOL 2 5.791 1.731 4.357 1.00 0.00 H1- \n',
'ATOM 601 H6 MOL 2 5.310 3.760 5.466 1.00 0.00 H1- \n',
'ATOM 602 H7 MOL 2 4.915 2.966 6.763 1.00 0.00 H1- \n',
'ATOM 603 N1 MOL 2 5.296 11.710 17.485 1.00 0.00 N3- \n',
'ATOM 604 C1 MOL 2 4.869 9.379 15.867 1.00 0.00 C \n',
'ATOM 605 C2 MOL 2 4.104 9.715 16.973 1.00 0.00 C \n',
'ATOM 606 C3 MOL 2 4.427 10.885 17.788 1.00 0.00 C \n',
'ATOM 607 C4 MOL 2 5.493 12.861 18.352 1.00 0.00 C \n',
'ATOM 608 C5 MOL 2 5.223 14.121 17.544 1.00 0.00 C \n',
'ATOM 609 C6 MOL 2 5.496 15.361 18.384 1.00 0.00 C \n',
'ATOM 610 H1 MOL 2 5.605 9.905 15.649 1.00 0.00 H1- \n',
'ATOM 611 H2 MOL 2 3.953 11.014 18.578 1.00 0.00 H1- \n',
'ATOM 612 H3 MOL 2 4.876 12.814 19.111 1.00 0.00 H1- \n',
'ATOM 613 H4 MOL 2 4.298 14.126 17.248 1.00 0.00 H1- \n',
'ATOM 614 H5 MOL 2 5.791 14.131 16.757 1.00 0.00 H1- \n',
'ATOM 615 H6 MOL 2 5.310 16.160 17.866 1.00 0.00 H1- \n',
'ATOM 616 H7 MOL 2 4.915 15.366 19.163 1.00 0.00 H1- \n',
'ATOM 617 N1 MOL 2 19.504 11.710 7.315 1.00 0.00 N3- \n',
'ATOM 618 C1 MOL 2 19.931 9.379 8.933 1.00 0.00 C \n',
'ATOM 619 C2 MOL 2 20.696 9.715 7.827 1.00 0.00 C | |
<reponame>xamox/SimpleCV
from SimpleCV.base import *
from SimpleCV.Color import *
from SimpleCV.Features.Features import Feature, FeatureSet
class TrackSet(FeatureSet):
"""
**SUMMARY**
TrackSet is a class extended from FeatureSet which is a class
extended from Python's list. So, TrackSet has all the properties
of a list as well as all the properties of FeatureSet.
In general, functions dealing with attributes will return
numpy arrays.
This class is specifically made for Tracking.
**EXAMPLE**
>>> image = Image("/path/to/image.png")
>>> ts = image.track("camshift", img1=image, bb) #ts is the track set
>>> ts.draw()
>>> ts.x()
"""
def append(self, f):
"""
**SUMMARY**
This is a substitute function for append. This is used in
Image.track(). To get z, vel, etc I have to use this.
This sets few parameters up and appends Tracking object to
TrackSet list.
Users are discouraged to use this function.
**RETURNS**
Nothing.
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... img = img1
>>> ts.append(CAMShift(img,bb,ellipse))
"""
list.append(self,f)
ts = self
f.sizeRatio = float(ts[-1].area)/float(ts[0].area)
f.vel = self.__pixelVelocity()
f.rt_vel = self.__pixleVelocityRealTime()
def areaRatio(self):
"""
**SUMMARY**
Returns a numpy array of the areaRatio of each feature.
where areaRatio is the ratio of the size of the current bounding box to
the size of the initial bounding box
**RETURNS**
A numpy array.
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... img = img1
>>> print ts.areaRatio
"""
return np.array([f.areaRatio for f in self])
def drawPath(self, color=Color.GREEN, thickness=2):
"""
**SUMMARY**
Draw the complete path traced by the center of the object on current frame
**PARAMETERS**
* *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
* *thickness* - Thickness of the tracing path.
**RETURNS**
Nada. Nothing. Zilch.
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... ts.drawPath() # For continuous tracing
... img = img1
>>> ts.drawPath() # draw the path at the end of tracking
"""
ts = self
img = self[-1].image
for i in range(len(ts)-1):
img.drawLine((ts[i].center),(ts[i+1].center), color=color, thickness=thickness)
def draw(self, color=Color.GREEN, rad=1, thickness=1):
"""
**SUMMARY**
Draw the center of the object on the current frame.
**PARAMETERS**
* *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
* *rad* - Radius of the circle to be plotted on the center of the object.
* *thickness* - Thickness of the boundary of the center circle.
**RETURNS**
Nada. Nothing. Zilch.
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... ts.draw() # For continuous tracking of the center
... img = img1
"""
f = self[-1]
f.image.drawCircle(f.center, rad, color, thickness)
def drawBB(self, color=Color.GREEN, thickness=3):
"""
**SUMMARY**
Draw the bounding box over the object on the current frame.
**PARAMETERS**
* *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
* *thickness* - Thickness of the boundary of the bounding box.
**RETURNS**
Nada. Nothing. Zilch.
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... ts.drawBB() # For continuous bounding box
... img = img1
"""
f = self[-1]
f.image.drawRectangle(f.bb_x, f.bb_y, f.w, f.h, color, thickness)
def trackLength(self):
"""
**SUMMARY**
Get total number of tracked frames.
**PARAMETERS**
No Parameters required.
**RETURNS**
* *int* * -Number of tracked image frames
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... img = img1
>>> print ts.trackLength()
"""
return len(self)
def trackImages(self):
"""
**SUMMARY**
Get all the tracked images in a list
**PARAMETERS**
No Parameters required.
**RETURNS**
* *list* * - A list of all the tracked SimpleCV.ImageClass.Image
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... img = img1
>>> imgset = ts.trackImages()
"""
return [f.image for f in self]
def BBTrack(self):
"""
**SUMMARY**
Get all the bounding box in a list
**PARAMETERS**
No Parameters required.
**RETURNS**
* *list* * - All the bounding box co-ordinates in a list
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... img = img1
>>> print ts.BBTrack()
"""
return [f.bb for f in self]
def __pixelVelocity(self):
"""
**SUMMARY**
Get Pixel Velocity of the tracked object in pixel/frame.
**PARAMETERS**
No Parameters required.
**RETURNS**
* *tuple* * - (Velocity of x, Velocity of y)
"""
ts = self
if len(ts) < 2:
return (0,0)
dx = ts[-1].x - ts[-2].x
dy = ts[-1].y - ts[-2].y
return (dx, dy)
def pixelVelocity(self):
"""
**SUMMARY**
Get each Pixel Velocity of the tracked object in pixel/frames.
**PARAMETERS**
No Parameters required.
**RETURNS**
* *numpy array* * - array of pixel velocity tuple.
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... img = img1
>>> print ts.pixelVelocity()
"""
return np.array([f.vel for f in self])
def __pixleVelocityRealTime(self):
"""
**SUMMARY**
Get each Pixel Velocity of the tracked object in pixel/second.
**PARAMETERS**
No Parameters required.
**RETURNS**
* *tuple* * - velocity tuple
"""
ts = self
if len(ts) < 2:
return (0,0)
dx = ts[-1].x - ts[-2].x
dy = ts[-1].y - ts[-2].y
dt = ts[-1].time - ts[-2].time
return (float(dx)/dt, float(dy)/dt)
def pixleVelocityRealTime(self):
"""
**SUMMARY**
Get each Pixel Velocity of the tracked object in pixel/frames.
**PARAMETERS**
No Parameters required.
**RETURNS**
* *numpy array* * - array of pixel velocity tuple.
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... img = img1
>>> print ts.pixelVelocityRealTime()
"""
return np.array([f.rt_vel for f in self])
def showCoordinates(self, pos=None, color=Color.GREEN, size=None):
"""
**SUMMARY**
Show the co-ordinates of the object in text on the current frame.
**PARAMETERS**
* *pos* - A tuple consisting of x, y values. where to put to the text
* *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
* *size* - Fontsize of the text
**RETURNS**
Nada. Nothing. Zilch.
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... ts.showCoordinates() # For continuous bounding box
... img = img1
"""
ts = self
f = ts[-1]
img = f.image
if not pos:
imgsize = img.size()
pos = (imgsize[0]-120, 10)
if not size:
size = 16
text = "x = %d y = %d" % (f.x, f.y)
img.drawText(text, pos[0], pos[1], color, size)
def showSizeRatio(self, pos=None, color=Color.GREEN, size=None):
"""
**SUMMARY**
Show the sizeRatio of the object in text on the current frame.
**PARAMETERS**
* *pos* - A tuple consisting of x, y values. where to put to the text
* *color* - The color to draw the object. Either an BGR tuple or a member of the :py:class:`Color` class.
* *size* - Fontsize of the text
**RETURNS**
Nada. Nothing. Zilch.
**EXAMPLE**
>>> while True:
... img1 = cam.getImage()
... ts = img1.track("camshift", ts1, img, bb)
... ts.showZ() # For continuous bounding box
... img = img1
"""
ts = self
f = ts[-1]
img = f.image
if not pos:
imgsize = img.size()
pos = (imgsize[0]-120, 30)
if not size:
size = 16
text = "size = %f" % (f.sizeRatio)
img.drawText(text, pos[0], pos[1], color, size)
def showPixelVelocity(self, pos=None, color=Color.GREEN, size=None):
"""
**SUMMARY**
show the Pixel Veloctiy (pixel/frame) of the object in text on the current frame.
**PARAMETERS**
* *pos* - A tuple consisting of x, y values. where to put to the text
* *color* - The color to draw the object. | |
<filename>tools/webdriver/webdriver/client.py<gh_stars>1-10
from . import error
from . import protocol
from . import transport
from six import string_types
from six.moves.urllib import parse as urlparse
def command(func):
def inner(self, *args, **kwargs):
if hasattr(self, "session"):
session = self.session
else:
session = self
if session.session_id is None:
session.start()
return func(self, *args, **kwargs)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
class Timeouts(object):
def __init__(self, session):
self.session = session
def _get(self, key=None):
timeouts = self.session.send_session_command("GET", "timeouts")
if key is not None:
return timeouts[key]
return timeouts
def _set(self, key, secs):
body = {key: secs * 1000}
self.session.send_session_command("POST", "timeouts", body)
return None
@property
def script(self):
return self._get("script")
@script.setter
def script(self, secs):
return self._set("script", secs)
@property
def page_load(self):
return self._get("pageLoad")
@page_load.setter
def page_load(self, secs):
return self._set("pageLoad", secs)
@property
def implicit(self):
return self._get("implicit")
@implicit.setter
def implicit(self, secs):
return self._set("implicit", secs)
def __str__(self):
name = "%s.%s" % (self.__module__, self.__class__.__name__)
return "<%s script=%d, load=%d, implicit=%d>" % \
(name, self.script, self.page_load, self.implicit)
class ActionSequence(object):
"""API for creating and performing action sequences.
Each action method adds one or more actions to a queue. When perform()
is called, the queued actions fire in order.
May be chained together as in::
ActionSequence(session, "key", id) \
.key_down("a") \
.key_up("a") \
.perform()
"""
def __init__(self, session, action_type, input_id, pointer_params=None):
"""Represents a sequence of actions of one type for one input source.
:param session: WebDriver session.
:param action_type: Action type; may be "none", "key", or "pointer".
:param input_id: ID of input source.
:param pointer_params: Optional dictionary of pointer parameters.
"""
self.session = session
self._id = input_id
self._type = action_type
self._actions = []
self._pointer_params = pointer_params
@property
def dict(self):
d = {
"type": self._type,
"id": self._id,
"actions": self._actions,
}
if self._pointer_params is not None:
d["parameters"] = self._pointer_params
return d
@command
def perform(self):
"""Perform all queued actions."""
self.session.actions.perform([self.dict])
def _key_action(self, subtype, value):
self._actions.append({"type": subtype, "value": value})
def _pointer_action(self, subtype, button=None, x=None, y=None, duration=None, origin=None, width=None,
height=None, pressure=None, tangential_pressure=None, tilt_x=None,
tilt_y=None, twist=None, altitude_angle=None, azimuth_angle=None):
action = {
"type": subtype
}
if button is not None:
action["button"] = button
if x is not None:
action["x"] = x
if y is not None:
action["y"] = y
if duration is not None:
action["duration"] = duration
if origin is not None:
action["origin"] = origin
if width is not None:
action["width"] = width
if height is not None:
action["height"] = height
if pressure is not None:
action["pressure"] = pressure
if tangential_pressure is not None:
action["tangentialPressure"] = tangential_pressure
if tilt_x is not None:
action["tiltX"] = tilt_x
if tilt_y is not None:
action["tiltY"] = tilt_y
if twist is not None:
action["twist"] = twist
if altitude_angle is not None:
action["altitudeAngle"] = altitude_angle
if azimuth_angle is not None:
action["azimuthAngle"] = azimuth_angle
self._actions.append(action)
def pause(self, duration):
self._actions.append({"type": "pause", "duration": duration})
return self
def pointer_move(self, x, y, duration=None, origin=None, width=None, height=None,
pressure=None, tangential_pressure=None, tilt_x=None, tilt_y=None,
twist=None, altitude_angle=None, azimuth_angle=None):
"""Queue a pointerMove action.
:param x: Destination x-axis coordinate of pointer in CSS pixels.
:param y: Destination y-axis coordinate of pointer in CSS pixels.
:param duration: Number of milliseconds over which to distribute the
move. If None, remote end defaults to 0.
:param origin: Origin of coordinates, either "viewport", "pointer" or
an Element. If None, remote end defaults to "viewport".
"""
self._pointer_action("pointerMove", x=x, y=y, duration=duration, origin=origin,
width=width, height=height, pressure=pressure,
tangential_pressure=tangential_pressure, tilt_x=tilt_x, tilt_y=tilt_y,
twist=twist, altitude_angle=altitude_angle, azimuth_angle=azimuth_angle)
return self
def pointer_up(self, button=0):
"""Queue a pointerUp action for `button`.
:param button: Pointer button to perform action with.
Default: 0, which represents main device button.
"""
self._pointer_action("pointerUp", button=button)
return self
def pointer_down(self, button=0, width=None, height=None, pressure=None,
tangential_pressure=None, tilt_x=None, tilt_y=None,
twist=None, altitude_angle=None, azimuth_angle=None):
"""Queue a pointerDown action for `button`.
:param button: Pointer button to perform action with.
Default: 0, which represents main device button.
"""
self._pointer_action("pointerDown", button=button, width=width, height=height,
pressure=pressure, tangential_pressure=tangential_pressure,
tilt_x=tilt_x, tilt_y=tilt_y, twist=twist, altitude_angle=altitude_angle,
azimuth_angle=azimuth_angle)
return self
def click(self, element=None, button=0):
"""Queue a click with the specified button.
If an element is given, move the pointer to that element first,
otherwise click current pointer coordinates.
:param element: Optional element to click.
:param button: Integer representing pointer button to perform action
with. Default: 0, which represents main device button.
"""
if element:
self.pointer_move(0, 0, origin=element)
return self.pointer_down(button).pointer_up(button)
def key_up(self, value):
"""Queue a keyUp action for `value`.
:param value: Character to perform key action with.
"""
self._key_action("keyUp", value)
return self
def key_down(self, value):
"""Queue a keyDown action for `value`.
:param value: Character to perform key action with.
"""
self._key_action("keyDown", value)
return self
def send_keys(self, keys):
"""Queue a keyDown and keyUp action for each character in `keys`.
:param keys: String of keys to perform key actions with.
"""
for c in keys:
self.key_down(c)
self.key_up(c)
return self
def scroll(self, x, y, delta_x, delta_y, duration=None, origin=None):
"""Queue a scroll action.
:param x: Destination x-axis coordinate of pointer in CSS pixels.
:param y: Destination y-axis coordinate of pointer in CSS pixels.
:param delta_x: scroll delta on x-axis in CSS pixels.
:param delta_y: scroll delta on y-axis in CSS pixels.
:param duration: Number of milliseconds over which to distribute the
scroll. If None, remote end defaults to 0.
:param origin: Origin of coordinates, either "viewport" or an Element.
If None, remote end defaults to "viewport".
"""
action = {
"type": "scroll",
"x": x,
"y": y,
"deltaX": delta_x,
"deltaY": delta_y
}
if duration is not None:
action["duration"] = duration
if origin is not None:
action["origin"] = origin
self._actions.append(action)
return self
class Actions(object):
def __init__(self, session):
self.session = session
@command
def perform(self, actions=None):
"""Performs actions by tick from each action sequence in `actions`.
:param actions: List of input source action sequences. A single action
sequence may be created with the help of
``ActionSequence.dict``.
"""
body = {"actions": [] if actions is None else actions}
actions = self.session.send_session_command("POST", "actions", body)
return actions
@command
def release(self):
return self.session.send_session_command("DELETE", "actions")
def sequence(self, *args, **kwargs):
"""Return an empty ActionSequence of the designated type.
See ActionSequence for parameter list.
"""
return ActionSequence(self.session, *args, **kwargs)
class Window(object):
identifier = "window-fcc6-11e5-b4f8-330a88ab9d7f"
def __init__(self, session):
self.session = session
@command
def close(self):
handles = self.session.send_session_command("DELETE", "window")
if handles is not None and len(handles) == 0:
# With no more open top-level browsing contexts, the session is closed.
self.session.session_id = None
return handles
@property
@command
def rect(self):
return self.session.send_session_command("GET", "window/rect")
@property
@command
def size(self):
"""Gets the window size as a tuple of `(width, height)`."""
rect = self.rect
return (rect["width"], rect["height"])
@size.setter
@command
def size(self, new_size):
"""Set window size by passing a tuple of `(width, height)`."""
width, height = new_size
body = {"width": width, "height": height}
self.session.send_session_command("POST", "window/rect", body)
@property
@command
def position(self):
"""Gets the window position as a tuple of `(x, y)`."""
rect = self.rect
return (rect["x"], rect["y"])
@position.setter
@command
def position(self, new_position):
"""Set window position by passing a tuple of `(x, y)`."""
x, y = new_position
body = {"x": x, "y": y}
self.session.send_session_command("POST", "window/rect", body)
@command
def maximize(self):
return self.session.send_session_command("POST", "window/maximize")
@command
def minimize(self):
return self.session.send_session_command("POST", "window/minimize")
@command
def fullscreen(self):
return self.session.send_session_command("POST", "window/fullscreen")
@classmethod
def from_json(cls, json, session):
uuid = json[Window.identifier]
return cls(uuid, session)
class Frame(object):
identifier = "frame-075b-4da1-b6ba-e579c2d3230a"
def __init__(self, session):
self.session = session
@classmethod
def from_json(cls, json, session):
uuid = json[Frame.identifier]
return cls(uuid, session)
class Find(object):
def __init__(self, session):
self.session = session
@command
def css(self, element_selector, all=True):
elements = self._find_element("css selector", element_selector, all)
return elements
def _find_element(self, strategy, selector, all):
route = "elements" if all else "element"
body = {"using": strategy,
"value": selector}
return self.session.send_session_command("POST", route, body)
class Cookies(object):
def __init__(self, session):
self.session = session
def __getitem__(self, name):
self.session.send_session_command("GET", "cookie/%s" % name, {})
def __setitem__(self, name, value):
cookie = {"name": name,
"value": None}
if isinstance(name, string_types):
cookie["value"] = value
elif hasattr(value, "value"):
cookie["value"] = value.value
self.session.send_session_command("POST", "cookie/%s" % name, {})
class UserPrompt(object):
def __init__(self, session):
self.session = session
@command
def dismiss(self):
self.session.send_session_command("POST", "alert/dismiss")
@command
def accept(self):
self.session.send_session_command("POST", "alert/accept")
@property
@command
def text(self):
return self.session.send_session_command("GET", "alert/text")
@text.setter
@command
def text(self, value):
body = {"text": value}
self.session.send_session_command("POST", "alert/text", body=body)
class Session(object):
def __init__(self,
host,
port,
url_prefix="/",
capabilities=None,
extension=None):
self.transport = transport.HTTPWireProtocol(host, port, url_prefix)
self.requested_capabilities = capabilities
self.capabilities = None
self.session_id = | |
= overlap.overlap_count + overlap_count
n = overlap.overlap.get ( field_id, 0 )
overlap.overlap [ field_id ] = n + overlap_count
# Return overlap
return [ x0, y0, x1, y1, w0, h0 ]
# No overlap
return None
# Update the overlap fields
def update_overlap_fields () :
# Load the overlap results if available
if os.path.isfile ( overlap_tiles_fname ) :
print ( "Reading overlapped tiles" )
# Load tile overlap offsets
oin = open ( overlap_tiles_fname )
# Header
oin.readline ()
# Loop
for line in oin :
fields = line.strip ().split ( "," )
in_train = int ( fields [ 0 ] )
tile_id = int ( fields [ 1 ] )
right = int ( fields [ 2 ] )
train = int ( fields [ 3 ] )
overlap_id = int ( fields [ 4 ] )
x0 = int ( fields [ 5 ] )
y0 = int ( fields [ 6 ] )
x1 = int ( fields [ 7 ] )
y1 = int ( fields [ 8 ] )
w = int ( fields [ 9 ] )
h = int ( fields [ 10 ] )
overlap = [ x0, y0, x1, y1, w, h ]
if in_train > 0 :
tile = train_tiles [ tile_id ]
else :
tile = test_tiles [ tile_id ]
if right > 0 :
if train > 0 :
tile.right_train_id = overlap_id
tile.right_train_overlap = overlap
else :
tile.right_test_id = overlap_id
tile.right_test_overlap = overlap
else :
if train > 0 :
tile.bottom_train_id = overlap_id
tile.bottom_train_overlap = overlap
else :
tile.bottom_test_id = overlap_id
tile.bottom_test_overlap = overlap
oin.close ()
# Read from file
print ( "Reading overlapped fields" )
fin = open ( overlap_fields_fname )
# Header
fin.readline ()
# Loop
for line in fin :
fields = line.strip ().split ( "," )
field_id = int ( fields [ 0 ] )
pixels = int ( fields [ 1 ] )
overlap_id = int ( fields [ 2 ] )
overlap_count = int ( fields [ 3 ] )
field = field_dict [ field_id ]
field.boder_pixels = pixels
if overlap_id > 0 and overlap_count > 0 :
if field.overlap_count == 0 :
field.overlap_count = overlap_count
field.overlap = { }
else :
field.overlap_count = field.overlap_count + overlap_count
field.overlap [ overlap_id ] = overlap_count
fin.close ()
else :
print ( "Updating overlapped fields" )
for tiles in [ train_tiles, test_tiles ] :
for tile_id, tile in tiles.items () :
raster = load_raster ( tile.field_id_file )
# Overlap on right from train
overlap_id = tile_to_right ( tile.bbox, train_tiles )
if overlap_id is not None :
overlap = update_overlap_tiles ( True, tile, raster, train_tiles [ overlap_id ] )
if overlap is not None :
tile.right_train_id = overlap_id
tile.right_train_overlap = overlap
# Overlap on right from test
overlap_id = tile_to_right ( tile.bbox, test_tiles )
if overlap_id is not None :
overlap = update_overlap_tiles ( True, tile, raster, test_tiles [ overlap_id ] )
if overlap is not None :
tile.right_test_id = overlap_id
tile.right_test_overlap = overlap
# Overlap on bottom from train
overlap_id = tile_to_bottom ( tile.bbox, train_tiles )
if overlap_id is not None :
overlap = update_overlap_tiles ( False, tile, raster, train_tiles [ overlap_id ] )
if overlap is not None :
tile.bottom_train_id = overlap_id
tile.bottom_train_overlap = overlap
# Overlap on bottom from test
overlap_id = tile_to_bottom ( tile.bbox, test_tiles )
if overlap_id is not None :
overlap = update_overlap_tiles ( False, tile, raster, test_tiles [ overlap_id ] )
if overlap is not None :
tile.bottom_test_id = overlap_id
tile.bottom_test_overlap = overlap
# Now we can store
fout = open ( overlap_tiles_fname, "w" )
fout.write ( "in train,tile_id,right,train,overlap_id,x0,y0,x1,y1,w,h\n" )
in_train = 1
for dict in [ train_tiles, test_tiles ] :
for tile_id, tile in dict.items () :
if tile.right_train_id is not None :
fout.write ( f"{in_train},{tile_id},1,1,{tile.right_train_id}" )
for i in tile.right_train_overlap :
fout.write ( "," )
fout.write ( str ( i ) )
fout.write ( "\n" )
if tile.right_test_id is not None :
fout.write ( f"{in_train},{tile_id},1,0,{tile.right_test_id}" )
for i in tile.right_test_overlap :
fout.write ( "," )
fout.write ( str ( i ) )
fout.write ( "\n" )
if tile.bottom_train_id is not None :
fout.write ( f"{in_train},{tile_id},0,1,{tile.bottom_train_id}" )
for i in tile.bottom_train_overlap :
fout.write ( "," )
fout.write ( str ( i ) )
fout.write ( "\n" )
if tile.bottom_test_id is not None :
fout.write ( f"{in_train},{tile_id},0,0,{tile.bottom_test_id}" )
for i in tile.bottom_test_overlap :
fout.write ( "," )
fout.write ( str ( i ) )
fout.write ( "\n" )
in_train = 0
fout.close ()
# Overlap fields
fout = open ( overlap_fields_fname, "w" )
fout.write ( "field_id,border pixels,overlap_id,overlap_count\n" )
for field_id, field in field_dict.items () :
if field.overlap_count > 0 :
for overlap_id, overlap_count in field.overlap.items () :
fout.write ( f"{field_id},{field.border_pixels},{overlap_id},{overlap_count}\n" )
elif field.border_pixels > 0 :
fout.write ( f"{field_id},{field.border_pixels},0,0\n" )
fout.close ()
# Given an image, fill the holes in it
# and return the outside mask, boundary
# and inside mask and estimated
# perimeter length
# Note the boundary is 2 pixels wide
def outside_boundary_inside_perimeter ( img ) :
x0 = 0
y0 = 0
if img [ y0, x0 ] :
x0 = img.shape [ 1 ] - 1
y0 = img.shape [ 1 ] - 1
outside = flood ( img, (y0, x0) )
inside = ~outside
boundary = find_boundaries ( inside )
return outside, boundary, inside, perimeter ( inside )
# Update the overlap features
def update_overlap_features () :
# Load the overlap results if available
if not os.path.isfile ( overlap_features_fname ) :
print ( "Creating overlapped features" )
fout = open ( overlap_features_fname, "w" )
fout.write ( "field_id,p1,p2,p3,p4,p5,p6,p7,p8,p9\n" )
for field_id, field in field_dict.items () :
if field.label == 0 :
if field.overlap_count > 0 :
total_count = 0
label_counts = [ 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
for overlap_id, overlap_count in field.overlap.items () :
overlap = field_dict [ overlap_id ]
if overlap.label > 0 :
total_count = total_count + overlap_count
label_counts [ overlap.label - 1 ] = label_counts [ overlap.label - 1 ] + overlap_count
if total_count > 0 :
fout.write ( str ( field_id ) )
for label_count in label_counts :
fout.write ( "," )
fout.write ( str ( label_count / total_count ) )
fout.write ( "\n" )
fout.close ()
# Given the border of an image, return the goodness of fit of a circle
def fit_circle ( boundary ) :
total = np.count_nonzero ( boundary )
if total > 10 :
xy = np.zeros ( [ total, 2 ], dtype = int )
i = 0
for x in range ( boundary.shape [ 1 ] ) :
for y in range ( boundary.shape [ 0 ] ) :
if boundary [ y, x ] :
xy [ i, 0 ] = x
xy [ i, 1 ] = y
i = i + 1
# Fit circle model
circle = CircleModel ()
if circle.estimate ( xy ) :
return 1 / (1 + circle.residuals ( xy ).std ())
return 0.0
# Update the field lookup array
def update_field_lookup () :
row = 0
for field_id in sorted ( field_dict.keys () ) :
field_lookup [ row, 0 ] = field_id
field_lookup [ field_id, 1 ] = row
row = row + 1
# Update name indices
def update_name_indices () :
for i, date_name in enumerate ( date_names ) :
date_name_index [ date_name ] = i
for i, band_name in enumerate ( band_names ) :
band_name_index [ band_name ] = i
for i, s2_band_name in enumerate ( s2_band_names ) :
s2_band_name_index [ s2_band_name ] = i
# Given a date, return its name
def date_name ( date ) :
month = int ( date [ 5 :7 ] )
day = int ( date [ 8 :10 ] )
if day <= 15 :
return str ( month ) + 'a'
else :
return str ( month ) + 'b'
# Given a tile, update the data support with observations from its images
def calc_tile ( tile ) :
current_path = os.path.dirname ( tile.tile_file )
tile_assets = load_json_file ( tile.tile_file )
field_ids = load_raster ( tile.field_id_file ).flatten ()
for link in tile_assets [ "links" ] :
if link [ "rel" ] == "source" :
inner_link = link [ "href" ]
inner_path = current_path + "/" + inner_link
asset_dir = os.path.dirname ( inner_path )
inner_item = load_json_file ( inner_path )
# Get the date and index
datetime = inner_item [ "properties" ] [ "datetime" ]
dname = date_name ( datetime )
dindex = date_name_index [ dname ]
# Observations for this date
obs = { }
# Read observations from assets
for asset_key in inner_item [ "assets" ] :
asset_value = inner_item [ "assets" ] [ asset_key ]
asset_link = asset_value [ "href" ]
asset_path = asset_dir + "/" + asset_link
obs [ asset_key ] = load_raster ( asset_path ).flatten ()
# Apply cloud mask, if any
clm = obs.get ( "CLM", None )
if clm is None :
# No cloud mask, this is s1
mask = None
else :
# Cloud mask so this must be s2
mask = clm == 0
# Calculate composite bands
n = clm.shape [ 0 ]
ndvi = np.zeros ( n, dtype = float )
evi = np.zeros ( n, dtype = float )
savi = np.zeros ( n, dtype = float | |
<gh_stars>1-10
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import json
import requests
import azure.cli.command_modules.appconfig._azconfig.keyvalue_iterable as iterable
import azure.cli.command_modules.appconfig._azconfig.exceptions as exceptions
import azure.cli.command_modules.appconfig._azconfig.constants as constants
import azure.cli.command_modules.appconfig._azconfig.utils as utils
import azure.cli.command_modules.appconfig._azconfig.models as models
import azure.cli.command_modules.appconfig._azconfig.mapper as mapper
import azure.cli.command_modules.appconfig._azconfig.request_message as request_message
import azure.cli.command_modules.appconfig._azconfig.request_handler as handler
class AzconfigClient(object):
"""Represents an azconfig client.
Provides a client-side logical representation of the Azure config service.
This client is used to configure and execute requests against the
service.
The service client encapsulates the endpoint and credentials used to access
the Azure config service.
"""
def __init__(self, connection_string, client_options=None):
"""
:param string connection_params:
Contains 'endpoint', 'id' and 'secret', where id and secret are credentials
used to create the client.
:param ClientOptions client_options:
Optional parameter to customize AzconfigClient
"""
self.connection_string = connection_string
self._request_session = requests.Session()
self._client_options = models.ClientOptions(
) if client_options is None else client_options
self._default_headers = {
constants.HttpHeaders.UserAgent: self._client_options.user_agent
}
self._request_handler = handler.RequestHandler(
connection_string, self._client_options)
def add_keyvalue(self, keyvalue, modify_options=None):
""" Adds a new key-value to a configuration store.
:param KeyValue keyvalue:
The key-value to add.
:param dict custom_headers:
Headers that will be added to the request
:param ModifyKeyValueOptions modify_options:
Optional parameter to set keyvalue modification options
:return:
The key-value that was added to the configuration store.
:rtype:
KeyValue
:raises ValueError: If the keyvalue entry alreay exists.
"""
if modify_options is None:
modify_options = models.ModifyKeyValueOptions()
key, label = utils.encode_key_and_label(
keyvalue.key, keyvalue.label)
body_content = {
"content_type": keyvalue.content_type,
"value": keyvalue.value,
"tags": keyvalue.tags
}
return self.__write_key(key,
label,
body_content,
modify_options,
if_match_etag=None,
if_none_match_etag='*')
def set_keyvalue(self, keyvalue, modify_options=None):
""" Sets a key-value's properties within a configuration store.
If the key-value does not exist it will be created.
:param KeyValue keyvalue:
The key-value to set.
:param ModifyKeyValueOptions modify_options:
Optional parameter to set keyvalue modification options
:return:
The key-value that was set in the configuration store.
:rtype:
KeyValue
"""
if modify_options is None:
modify_options = models.ModifyKeyValueOptions()
key, label = utils.encode_key_and_label(keyvalue.key, keyvalue.label)
body_content = {
"content_type": keyvalue.content_type,
"value": keyvalue.value,
"tags": keyvalue.tags
}
return self.__write_key(key,
label,
body_content,
modify_options)
def update_keyvalue(self, keyvalue, modify_options=None):
""" Updates a key-value that was retrieved from a configuration store.
The ETag property is used to ensure that no external changes to the key-value have
occurred in the configuration store since the key-value was retrieved.
:param KeyValue keyvalue:
The key-value to update.
:param ModifyKeyValueOptions modify_options:
Optional parameter to set keyvalue modification options
:return:
The updated key-value.
:rtype:
KeyValue
:raises ValueError: If the keyvalue entry has been modified and etag mismatches.
"""
if modify_options is None:
modify_options = models.ModifyKeyValueOptions()
if keyvalue.etag is None:
raise ValueError("Etag of the keyvalue cannot be null")
key, label = utils.encode_key_and_label(
keyvalue.key, keyvalue.label)
body_content = {
"content_type": keyvalue.content_type,
"value": keyvalue.value,
"tags": keyvalue.tags
}
return self.__write_key(key,
label,
body_content,
modify_options,
keyvalue.etag)
def delete_keyvalue_by_key_label(self, key, label=None, modify_options=None):
""" Deletes a key-value from a configuration store.
:param str key:
The key of the key-value that should be deleted.
:param str label:
The label of the key-value that should be deleted.
:param ModifyKeyValueOptions modify_options:
Optional parameter to set keyvalue modification options
:return:
The deleted key-value if found, otherwise null.
:rtype:
KeyValue
"""
if modify_options is None:
modify_options = models.ModifyKeyValueOptions()
key, label = utils.encode_key_and_label(key, label)
query_url = '/kv/{}?label={}'.format(key,
'' if label is None else label)
endpoint = utils.get_endpoint_from_connection_string(
self.connection_string)
url = 'https://{}{}'.format(endpoint, query_url)
custom_headers = self.__configure_request_ids(modify_options)
custom_headers.update(self._default_headers)
headers = utils.generate_request_header(method=constants.HttpMethods.Delete,
custom_headers=custom_headers,
datetime_=None,
if_match_etag=None)
response = self._request_handler.execute(request_message.RequestMessage(
constants.HttpMethods.Delete, headers, url, ''), self._request_session)
if response.status_code == constants.StatusCodes.OK:
return mapper.map_json_to_keyvalue(response.json())
if response.status_code == constants.StatusCodes.NO_CONTENT:
return None
raise exceptions.HTTPException(response.status_code, response.reason,
response.headers, response.content)
def delete_keyvalue(self, keyvalue, modify_options=None):
""" Deletes a key-value from a configuration store.
The ETag property is used to ensure that no external changes to the key-value have occurred in the configuration store since the key-value was retrieved.
:param str keyvalue:
The key-value to delete.
:param ModifyKeyValueOptions modify_options:
Optional parameter to set keyvalue modification options
:return:
The deleted key-value.
:rtype:
KeyValue
:raises ValueError: If the key-value entry has been modified and etag mismatches.
"""
if modify_options is None:
modify_options = models.ModifyKeyValueOptions()
if keyvalue.etag is None:
raise ValueError("Etag of the keyvalue cannot be null")
key, label = utils.encode_key_and_label(keyvalue.key, keyvalue.label)
query_url = '/kv/{}?label={}'.format(key,
'' if label is None else label)
endpoint = utils.get_endpoint_from_connection_string(
self.connection_string)
url = 'https://{}{}'.format(endpoint, query_url)
custom_headers = self.__configure_request_ids(modify_options)
custom_headers.update(self._default_headers)
headers = utils.generate_request_header(method=constants.HttpMethods.Delete,
custom_headers=custom_headers,
datetime_=None,
if_match_etag=keyvalue.etag)
response = self._request_handler.execute(request_message.RequestMessage(
constants.HttpMethods.Delete, headers, url, ''), self._request_session)
if response.status_code == constants.StatusCodes.OK:
return mapper.map_json_to_keyvalue(response.json())
if response.status_code == constants.StatusCodes.PRECONDITION_FAILED:
raise ValueError('The keyvalue entry has been modified.')
raise exceptions.HTTPException(response.status_code, response.reason,
response.headers, response.content)
def get_keyvalue(self, key, query_options=None):
"""Retrieves a key-value with the specified key, taking into account the constraints of the key-value query options.
:param str key:
The key of the key-value to retrieved.
:param QueryKeyValueOptions query_options:
Parameters used to modify which key-value is retrieved.
:return:
The key-value if found, otherwise null
:rtype:
KeyValue
"""
if query_options is None:
query_options = models.QueryKeyValueOptions()
return self.__query_key(key, query_options)
def get_keyvalues(self, query_options=None):
"""Returns an iterable object which allows the caller to iterate and retrieve key-values.
:param QueryKeyValueCollectionOptions query_options:
Parameters used to modify the set of key-values that are retrieved.
:return:
An iterable of key-values if found, otherwise an empty one .
:rtype: KeyValueIterable
"""
if query_options is None:
query_options = models.QueryKeyValueCollectionOptions
return iterable.KeyValueIterable(self, query_options, self.__query_keys)
def read_keyvalue_revisions(self, query_options=None):
"""Returns an iterable object which allows the caller to asynchronously iterate and retrieve revisions.
:param QueryKeyValueCollectionOptions query_options:
Parameters used to modify the set of revisions that are retrieved.
:return:
An iterable of key-value revisions if found, otherwise an empty one
:rtype: KeyValueIterable
"""
if query_options is None:
query_options = models.QueryKeyValueCollectionOptions
return iterable.KeyValueIterable(self, query_options, self.__list_revision)
def lock_keyvalue(self, keyvalue, modify_options=None):
"""Locks a key-value within a configuration store.
:param KeyValue keyvalue:
The key-value to be locked..
:param ModifyKeyValueOptions modify_options:
Optional parameter to set keyvalue modification options
:return:
The locked key-value if its ETag matches the ETag in the configuration store, otherwise null.
:rtype: KeyValue
"""
if modify_options is None:
modify_options = models.ModifyKeyValueOptions()
key, label = utils.encode_key_and_label(keyvalue.key, keyvalue.label)
query_url = '/locks/{}'.format(key)
query_url += '?label={}'.format('' if label is None else label)
endpoint = utils.get_endpoint_from_connection_string(
self.connection_string)
url = 'https://{}{}'.format(endpoint, query_url)
custom_headers = self.__configure_request_ids(modify_options)
custom_headers.update(self._default_headers)
headers = utils.generate_request_header(method=constants.HttpMethods.Put,
custom_headers=custom_headers,
datetime_=None,
if_match_etag=keyvalue.etag)
response = self._request_handler.execute(request_message.RequestMessage(
constants.HttpMethods.Put, headers, url, ''), self._request_session)
if response.status_code == constants.StatusCodes.OK:
return mapper.map_json_to_keyvalue(response.json())
if response.status_code == constants.StatusCodes.NO_CONTENT:
return None
raise exceptions.HTTPException(response.status_code, response.reason,
response.headers, response.content)
def unlock_keyvalue(self, keyvalue, modify_options=None):
"""Unlocks a key-value within a configuration store.
:param KeyValue keyvalue:
The key-value to be unlocked.
:param ModifyKeyValueOptions modify_options:
Optional parameter to set keyvalue modification options
:return:
The unlocked key-value if its ETag matches the ETag in the configuration store, otherwise null.
:rtype: KeyValue
"""
if modify_options is None:
modify_options = models.ModifyKeyValueOptions()
key, label = utils.encode_key_and_label(keyvalue.key, keyvalue.label)
query_url = '/locks/{}'.format(key)
query_url += '?label={}'.format('' if label is None else label)
endpoint = utils.get_endpoint_from_connection_string(
self.connection_string)
url = 'https://{}{}'.format(endpoint, query_url)
custom_headers = self.__configure_request_ids(modify_options)
custom_headers.update(self._default_headers)
headers = utils.generate_request_header(method=constants.HttpMethods.Delete,
custom_headers=custom_headers,
datetime_=None,
if_match_etag=keyvalue.etag)
response = self._request_handler.execute(request_message.RequestMessage(
constants.HttpMethods.Delete, headers, url, ''), self._request_session)
if response.status_code == constants.StatusCodes.OK:
return mapper.map_json_to_keyvalue(response.json())
if response.status_code == constants.StatusCodes.NO_CONTENT:
return None
raise exceptions.HTTPException(response.status_code, response.reason,
response.headers, response.content)
# pylint: disable=too-many-arguments
def __write_key(self,
key,
label,
body_content,
modify_options,
if_match_etag=None,
if_none_match_etag=None):
query_url = '/kv/{}?label={}'.format(key,
'' if label is None else label)
endpoint = utils.get_endpoint_from_connection_string(
self.connection_string)
url = 'https://{}{}'.format(endpoint, query_url)
custom_headers = self.__configure_request_ids(modify_options)
custom_headers.update(self._default_headers)
headers = utils.generate_request_header(method=constants.HttpMethods.Put,
custom_headers=custom_headers,
datetime_=None,
if_match_etag=if_match_etag,
if_none_match_etag=if_none_match_etag)
response = self._request_handler.execute(request_message.RequestMessage(
constants.HttpMethods.Put, headers, url, json.dumps(body_content)), self._request_session)
if response.status_code == constants.StatusCodes.OK:
return mapper.map_json_to_keyvalue(response.json())
raise exceptions.HTTPException(response.status_code, response.reason,
response.headers, response.content)
def __list_revision(self, query_option, continuation_link):
key, label = utils.encode_key_and_label(
query_option.key_filter, query_option.label_filter)
query_datetime = query_option.query_datetime
query_fields = self.__construct_query_fields_to_string(
query_option.fields)
if continuation_link is None:
query_url = '/revisions?key={}'.format('*' if key is None else key)
query_url += '&label={}'.format('*'if label is None else label)
query_url += '&fields={}'.format(query_fields)
else:
query_url = self.__parse_link_header(continuation_link)
if query_url is None:
return [], None
endpoint = utils.get_endpoint_from_connection_string(
self.connection_string)
url = 'https://{}{}'.format(endpoint, query_url)
custom_headers = self.__configure_request_ids(query_option)
custom_headers.update(self._default_headers)
headers = utils.generate_request_header(method=constants.HttpMethods.Get,
custom_headers=custom_headers,
datetime_=query_datetime)
response = self._request_handler.execute(request_message.RequestMessage(
constants.HttpMethods.Get, headers, url, ''), self._request_session)
if response.status_code == constants.StatusCodes.OK:
if constants.HttpHeaders.Link in response.headers:
return mapper.map_json_to_keyvalues(response.json()['items']), response.headers[constants.HttpHeaders.Link]
return mapper.map_json_to_keyvalues(response.json()['items']), None
raise exceptions.HTTPException(response.status_code, response.reason,
| |
size')
# save into data frames
self.importance = pd.DataFrame({'feat_idx':feat_idx,
'feature':self.feat_names.iloc[feat_idx] if self.feat_names is not None else None,
'imp_val':feat_imp})
#--------------------------------------------------------------------
# Feature selection methods
class selectMixedDT(_featSelect_base):
# univariate feature selection with mixed data type, based on statistical test
# for y as categorical
# numeric features are selected based on ANOVA
# categorical features are selected based on chi-squared
# for y as numeric
# numeric features are selected based on Pearson's correlation (regression)
# categorical features are selected based on ANOVA
# results sorted based on qval in ascending order
def __init__(self, alpha=0.05):
# alpha is the corected p-value threshold (correction as BH FDR)
super().__init__()
self.alpha = alpha
def fit(self, x, y, feat_idx_numeric, feat_idx_categorical, y_categorical, feat_names=None):
# check length
self.feat_names = feat_names
if self.feat_names is not None:
if len(self.feat_names) != x.shape[1]:
raise ValueError('Feature names size do not match the x input feature size')
# feature selection scoring calculations
feat_labels_num = self.feat_names.iloc[feat_idx_numeric] if self.feat_names is not None else None
feat_labels_cat = self.feat_names.iloc[feat_idx_categorical] if self.feat_names is not None else None
x_num = x[:,feat_idx_numeric]
x_cat = x[:,feat_idx_categorical]
# univariate feature selection
if y_categorical:
sf_cat = SelectKBest(chi2, k=5).fit(x_cat, y) #for categorical y, categorical x; chi-squared
sf_num = SelectKBest(f_classif, k=5).fit(x_num, y) #for categorical y, numeric x; ANOVA
else:
sf_cat = SelectKBest(f_classif, k=5).fit(x_cat, y) #for numeric y, categorical x; ANOVA
sf_num = SelectKBest(f_regression, k=5).fit(x_num, y) #for numeric y, numeric x; linear regression
# get the p-value, and perform multiple hypothesis correction
df1 = pd.DataFrame({'feat_idx': feat_idx_numeric,
'feature':feat_labels_num,
'pval': sf_num.pvalues_})
df2 = pd.DataFrame({'feat_idx': feat_idx_categorical,
'feature':feat_labels_cat,
'pval': sf_cat.pvalues_})
df_pvals = pd.concat([df1,df2], axis=0)
df_pvals['qval'] = np.nan
df_pvals['reject'] = np.nan
pval_notnan = ~df_pvals.pval.isnull()
reject, pval_cor = multipletests(df_pvals.pval[pval_notnan], alpha=self.alpha, method='fdr_bh')[:2]
df_pvals.loc[pval_notnan, 'qval'] = pval_cor
df_pvals.loc[pval_notnan, 'reject'] = reject
# store the results in the importance data frame
df_pvals.sort_values('qval', ascending=True, inplace=True)
self.importance = df_pvals
self.importance['imp_val'] = df_pvals.qval
self.importance_sel = self.importance.loc[self.importance.reject==True,:]
class selectUnivariate(_featSelect_base):
# univariate feature selection, based on univariate model and its performance metric
# results sorted based on score_test in descending order
def __init__(self, dm_model, threshold=0, sort=False):
super().__init__()
self.dm_model = dm_model
self.threshold = threshold
self.sort = sort
def fit(self, x_train, y_train, x_test, y_test, feat_names=None, target_name=None):
self.feat_names = feat_names
# check length
if x_train.shape[1] != x_test.shape[1]:
raise ValueError('Feature size for train and test sets do not match')
if self.feat_names is not None:
if(len(self.feat_names) != x_train.shape[1]):
raise ValueError('Feature names size do not match the x input feature size and/or feature index size')
# run model for each feature
df_res_sel = pd.DataFrame()
for idx in range(0, x_train.shape[1]):
x_tr, x_te = self.transform_set(x_train, x_test, feat_idx=idx)
self.dm_model.fit(x_tr, y_train, x_te, y_test)
df_res_sp = self.dm_model.evaluate({'train': {'x':x_tr, 'y':y_train},
'test': {'x':x_te, 'y':y_test}},
'univariate', self.feat_names.iloc[idx], target_name)
df_res_sp['feat_idx'] = idx
df_res_sp['feat_id'] = self.feat_names.index[idx]
df_res_sel = df_res_sel.append(df_res_sp, sort=False)
df_res_sel.set_index('feat_id',inplace=True,drop=True)
if self.sort:
df_res_sel.sort_values('score_test', ascending=False, inplace=True)
self.importance = df_res_sel
self.importance_sel = df_res_sel.loc[df_res_sel.score_test>self.threshold,:]
class selectKFeat(_selectImpFeat):
def __init__(self, dm_model, k=10, feat_names=None):
# get the n most important features, based on feature importance
super().__init__(dm_model, feat_names)
self.importance_sel = self.importance.iloc[0:k,:].copy()
class selectQuantile(_selectImpFeat):
def __init__(self, dm_model, threshold=None, feat_names=None):
# get the top quantile of features, based on feature importance
super().__init__(dm_model, feat_names)
df = self.importance.copy()
df.sort_values('imp_val', ascending=False, inplace=True)
df = df.loc[df.imp_val >= df.imp_val.quantile(q=threshold),:] #keep the top quartile
df = df.loc[df.imp_val > 0, :] #remove any negative or zero importance
self.importance_sel = df
######################################################################
# Model pipelines
######################################################################
def model_univariate(data, dm_model, feat_labels, target_name, df_res, y_categorical, data_null, perm=100):
# based on simple pairwise statistical test
# y_categorical as True for if y are categorical values
# approach- univariate as a pre-filter, use machine learning to reprioritize
# classification: works
# regression: maybe; works for elastic net
x_train, y_train = data['train'].values()
x_test, y_test = data['test'].values()
#-------
# full model
dm_model.fit(x_train, y_train, x_test, y_test)
df_res_sp = dm_model.evaluate(data, 'all', 'all', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
# feature selection - univariate, filter by q-value
sf = selectMixedDT(alpha=0.05)
if y_categorical:
sf.fit(x_train,y_train,
np.where(feat_labels.source.isin(['RNA-seq','CN']))[0],
np.where(feat_labels.source.isin(['CERES','Mut','Lineage']))[0],
y_categorical,
feat_labels.name)
else:
sf.fit(x_train,y_train,
np.where(feat_labels.source.isin(['CERES','RNA-seq','CN']))[0],
np.where(feat_labels.source.isin(['Mut','Lineage']))[0],
y_categorical,
feat_labels.name)
x_tr, x_te = sf.transform_set(x_train,x_test)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
# reduced model
dm_model.fit(x_tr, y_train, x_te, y_test)
data['train']['x'] = x_tr
data['test']['x'] = x_te
data_null['test']['x'] = x_te
df_res_sp = dm_model.evaluate(data, 'topfeat', 'topfeat', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
return df_res, sf
def model_infer_iter(data, dm_model, feat_labels, target_name, df_res, y_categorical, data_null, perm=100):
# iterative inference
x_train, y_train = data['train'].values()
x_test, y_test = data['test'].values()
#-------
# full model
dm_model.fit(x_train, y_train, x_test, y_test)
df_res_sp = dm_model.evaluate(data, 'all', 'all', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
# round 1
sf = selectQuantile(dm_model, threshold=0.75, feat_names=feat_labels.name)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_train, x_test)
dm_model.fit(x_tr, y_train, x_te, y_test)
# round 2
sf = selectQuantile(dm_model, threshold=0.75, feat_names=feat_names_sel)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_tr, x_te)
dm_model.fit(x_tr, y_train, x_te, y_test)
# round 3
sf = selectQuantile(dm_model, threshold=0.75, feat_names=feat_names_sel)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_tr, x_te)
# reduced model
dm_model.fit(x_tr, y_train, x_te, y_test)
data['train']['x'] = x_tr
data['test']['x'] = x_te
data_null['test']['x'] = x_te
df_res_sp = dm_model.evaluate(data, 'topfeat', 'topfeat', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
return df_res, sf
def model_infer_iter_ens(data, dm_model, feat_labels, target_name, df_res, y_categorical, data_null, perm=100):
# Deprecated (replaced by model_infer_ens_custom). Kept here for compatibility reasons.
# iterative inference, ensemble (random forest) methods, with Boruta feature selection
# works on ensemble methods as boruta requires _feature_importance
x_train, y_train = data['train'].values()
x_test, y_test = data['test'].values()
#-------
# full model
dm_model.fit(x_train, y_train, x_test, y_test)
df_res_sp = dm_model.evaluate(data, 'all', 'all', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
# round 1
sf = selectQuantile(dm_model, threshold=0.75, feat_names=feat_labels.name)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_train, x_test)
dm_model.fit(x_tr, y_train, x_te, y_test)
# round 2
sf = selectQuantile(dm_model, threshold=0.75, feat_names=feat_names_sel)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_tr, x_te)
dm_model.fit(x_tr, y_train, x_te, y_test)
# round 3
sf = selectQuantile(dm_model, threshold=0.75, feat_names=feat_names_sel)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_tr, x_te)
# boruta feature selection
dm_model.model.set_params(max_depth=7)
feat_selector = BorutaPy(dm_model.model, n_estimators='auto', verbose=0)
feat_selector.fit(x_tr, y_train)
feat_names_sel = feat_names_sel[feat_selector.support_]
if len(feat_names_sel) < 1: return df_res, None
x_tr = feat_selector.transform(x_tr)
x_te = feat_selector.transform(x_te)
sf = _featSelect_base()
sf.importance_sel = pd.DataFrame(feat_names_sel.copy())
# reduced model
dm_model.fit(x_tr, y_train, x_te, y_test)
data['train']['x'] = x_tr
data['test']['x'] = x_te
data_null['test']['x'] = x_te
df_res_sp = dm_model.evaluate(data, 'topfeat', 'topfeat', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
return df_res, sf
def model_infer(data, dm_model, feat_labels, target_name, df_res, y_categorical, data_null, perm=100):
# simple inference
x_train, y_train = data['train'].values()
x_test, y_test = data['test'].values()
#-------
# full model
dm_model.fit(x_train, y_train, x_test, y_test)
df_res_sp = dm_model.evaluate(data, 'all', 'all', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
# quantile
sf = selectQuantile(dm_model, threshold=0.75, feat_names=feat_labels.name)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_train, x_test)
# reduced model
dm_model.fit(x_tr, y_train, x_te, y_test)
data['train']['x'] = x_tr
data['test']['x'] = x_te
data_null['test']['x'] = x_te
df_res_sp = dm_model.evaluate(data, 'topfeat', 'topfeat', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
return df_res, sf
def model_infer_ens_custom(data, dm_model, feat_labels, target_name, df_res, y_categorical, data_null, perm=100,
sf_iterThresholds=[0.75, 0,75, 0.75], sf_topK = None):
# ensemble (random forest) methods, with Boruta feature selection
# works on ensemble methods as boruta requires _feature_importance
# with custom feature selection (either iterative or topK features)
# this technically is generic enough that we don't need model_infer_iter_ens, but that method
# is kept for backward compatibility
x_train, y_train = data['train'].values()
x_test, y_test = data['test'].values()
# -------
# full model
dm_model.fit(x_train, y_train, x_test, y_test)
df_res_sp = dm_model.evaluate(data, 'all', 'all', target_name, data_null, perm)
df_res = df_res.append(df_res_sp, sort=False)
# set up feat_names_sel values and copy train sets, for use in iterative selection next
x_tr = x_train.copy()
x_te = x_test.copy()
feat_names_sel = feat_labels.name
feat_names_sel.name = 'feature' # 'feature' is used as the column name in sf.importance_sel for features. Conform to the same naming.
for threshold in sf_iterThresholds:
sf = selectQuantile(dm_model, threshold=threshold, feat_names=feat_names_sel)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < 1: return df_res, None
x_tr, x_te = sf.transform_set(x_tr, x_te)
dm_model.fit(x_tr, y_train, x_te, y_test)
if sf_topK:
sf = selectKFeat(dm_model, k=sf_topK, feat_names=feat_names_sel)
feat_names_sel = sf.importance_sel.feature
if len(feat_names_sel) < | |
started.
When iterating over this object, batches might contain overlapping trajectories.
Args:
transitions (:class:`TransitionBatch`): the transition data used to built
the iterator.
trajectory_indices (list(tuple(int, int)): a list of [start, end) indices for
trajectories.
batch_size (int): the batch size to use when iterating over the stored data.
sequence_length (int): the length of the sequences returned.
batches_per_loop (int): if given, specifies how many batches
to return (at most) over a full loop of the iterator.
rng (np.random.Generator, optional): a random number generator when sampling
batches. If ``None`` (default value), a new default generator will be used.
"""
def __init__(
self,
transitions: TransitionBatch,
trajectory_indices: List[Tuple[int, int]],
batch_size: int,
sequence_length: int,
batches_per_loop: int,
rng: Optional[np.random.Generator] = None,
):
self._sequence_length = sequence_length
self._valid_starts = self._get_indices_valid_starts(
trajectory_indices, sequence_length
)
self._batches_per_loop = batches_per_loop
if len(self._valid_starts) < 0.5 * len(trajectory_indices):
warnings.warn(
"More than 50% of the trajectories were discarded for being shorter "
"than the specified length."
)
# no need to pass transitions to super(), since it's only used by __getitem__,
# which this class replaces. Passing the set of possible starts allow us to
# use all the indexing machinery of the superclasses.
super().__init__(
self._valid_starts, # type: ignore
batch_size,
shuffle_each_epoch=True, # this is ignored
rng=rng,
)
self.transitions = transitions
@staticmethod
def _get_indices_valid_starts(
trajectory_indices: List[Tuple[int, int]],
sequence_length: int,
) -> np.ndarray:
# This is memory and time inefficient but it's only done once when creating the
# iterator. It's a good price to pay for now, since it simplifies things
# enormously and it's less error prone
valid_starts = []
for (start, end) in trajectory_indices:
if end - start < sequence_length:
continue
valid_starts.extend(list(range(start, end - sequence_length + 1)))
return np.array(valid_starts)
def __iter__(self):
self._current_batch = 0
return self
def __next__(self):
if self._current_batch >= self._batches_per_loop:
raise StopIteration
self._current_batch += 1
indices = self._rng.choice(self.num_stored, size=self.batch_size, replace=True)
return self[indices]
def __len__(self):
return self._batches_per_loop
def __getitem__(self, item):
return _sequence_getitem_impl(
self.transitions,
self.batch_size,
self._sequence_length,
self._valid_starts,
item,
)
class ReplayBuffer:
"""A replay buffer with support for training/validation iterators and ensembles.
This buffer can be pushed to and sampled from as a typical replay buffer.
Args:
capacity (int): the maximum number of transitions that the buffer can store.
When the capacity is reached, the contents are overwritten in FIFO fashion.
obs_shape (Sequence of ints): the shape of the observations to store.
action_shape (Sequence of ints): the shape of the actions to store.
obs_type (type): the data type of the observations (defaults to np.float32).
action_type (type): the data type of the actions (defaults to np.float32).
reward_type (type): the data type of the rewards (defaults to np.float32).
rng (np.random.Generator, optional): a random number generator when sampling
batches. If None (default value), a new default generator will be used.
max_trajectory_length (int, optional): if given, indicates that trajectory
information should be stored and that trajectories will be at most this
number of steps. Defaults to ``None`` in which case no trajectory
information will be kept. The buffer will keep trajectory information
automatically using the done value when calling :meth:`add`.
.. warning::
When using ``max_trajectory_length`` it is the user's responsibility to ensure
that trajectories are stored continuously in the replay buffer.
"""
def __init__(
self,
capacity: int,
obs_shape: Sequence[int],
action_shape: Sequence[int],
obs_type: Type = np.float32,
action_type: Type = np.float32,
reward_type: Type = np.float32,
rng: Optional[np.random.Generator] = None,
max_trajectory_length: Optional[int] = None,
):
self.cur_idx = 0
self.capacity = capacity
self.num_stored = 0
self.trajectory_indices: Optional[List[Tuple[int, int]]] = None
if max_trajectory_length:
self.trajectory_indices = []
capacity += max_trajectory_length
# TODO replace all of these with a transition batch
self.obs = np.empty((capacity, *obs_shape), dtype=obs_type)
self.next_obs = np.empty((capacity, *obs_shape), dtype=obs_type)
self.action = np.empty((capacity, *action_shape), dtype=action_type)
self.reward = np.empty(capacity, dtype=reward_type)
self.done = np.empty(capacity, dtype=bool)
if rng is None:
self._rng = np.random.default_rng()
else:
self._rng = rng
self._start_last_trajectory = 0
@property
def stores_trajectories(self) -> bool:
return self.trajectory_indices is not None
@staticmethod
def _check_overlap(segment1: Tuple[int, int], segment2: Tuple[int, int]) -> bool:
s1, e1 = segment1
s2, e2 = segment2
return (s1 <= s2 < e1) or (s1 < e2 <= e1)
def remove_overlapping_trajectories(self, new_trajectory: Tuple[int, int]):
cnt = 0
for traj in self.trajectory_indices:
if self._check_overlap(new_trajectory, traj):
cnt += 1
else:
break
for _ in range(cnt):
self.trajectory_indices.pop(0)
def _trajectory_bookkeeping(self, done: bool):
self.cur_idx += 1
if self.num_stored < self.capacity:
self.num_stored += 1
if self.cur_idx >= self.capacity:
self.num_stored = max(self.num_stored, self.cur_idx)
if done:
self.close_trajectory()
else:
partial_trajectory = (self._start_last_trajectory, self.cur_idx + 1)
self.remove_overlapping_trajectories(partial_trajectory)
if self.cur_idx >= len(self.obs):
warnings.warn(
"The replay buffer was filled before current trajectory finished. "
"The history of the current partial trajectory will be discarded. "
"Make sure you set `max_trajectory_length` to the appropriate value "
"for your problem."
)
self._start_last_trajectory = 0
self.cur_idx = 0
self.num_stored = len(self.obs)
def close_trajectory(self):
new_trajectory = (self._start_last_trajectory, self.cur_idx)
self.remove_overlapping_trajectories(new_trajectory)
self.trajectory_indices.append(new_trajectory)
if self.cur_idx - self._start_last_trajectory > (len(self.obs) - self.capacity):
warnings.warn(
"A trajectory was saved with length longer than expected. "
"Unexpected behavior might occur."
)
if self.cur_idx >= self.capacity:
self.cur_idx = 0
self._start_last_trajectory = self.cur_idx
def add(
self,
obs: np.ndarray,
action: np.ndarray,
next_obs: np.ndarray,
reward: float,
done: bool,
):
"""Adds a transition (s, a, s', r, done) to the replay buffer.
Args:
obs (np.ndarray): the observation at time t.
action (np.ndarray): the action at time t.
next_obs (np.ndarray): the observation at time t + 1.
reward (float): the reward at time t + 1.
done (bool): a boolean indicating whether the episode ended or not.
"""
self.obs[self.cur_idx] = obs
self.next_obs[self.cur_idx] = next_obs
self.action[self.cur_idx] = action
self.reward[self.cur_idx] = reward
self.done[self.cur_idx] = done
if self.trajectory_indices is not None:
self._trajectory_bookkeeping(done)
else:
self.cur_idx = (self.cur_idx + 1) % self.capacity
self.num_stored = min(self.num_stored + 1, self.capacity)
def sample(self, batch_size: int) -> TransitionBatch:
"""Samples a batch of transitions from the replay buffer.
Args:
batch_size (int): the number of samples required.
Returns:
(tuple): the sampled values of observations, actions, next observations, rewards
and done indicators, as numpy arrays, respectively. The i-th transition corresponds
to (obs[i], act[i], next_obs[i], rewards[i], dones[i]).
"""
indices = self._rng.choice(self.num_stored, size=batch_size)
return self._batch_from_indices(indices)
def sample_trajectory(self) -> Optional[TransitionBatch]:
"""Samples a full trajectory and returns it as a batch.
Returns:
(tuple): A tuple with observations, actions, next observations, rewards
and done indicators, as numpy arrays, respectively; these will correspond
to a full trajectory. The i-th transition corresponds
to (obs[i], act[i], next_obs[i], rewards[i], dones[i])."""
if self.trajectory_indices is None or len(self.trajectory_indices) == 0:
return None
idx = self._rng.choice(len(self.trajectory_indices))
indices = np.arange(
self.trajectory_indices[idx][0], self.trajectory_indices[idx][1]
)
return self._batch_from_indices(indices)
def _batch_from_indices(self, indices: Sized) -> TransitionBatch:
obs = self.obs[indices]
next_obs = self.next_obs[indices]
action = self.action[indices]
reward = self.reward[indices]
done = self.done[indices]
return TransitionBatch(obs, action, next_obs, reward, done)
def __len__(self):
return self.num_stored
def save(self, save_dir: Union[pathlib.Path, str]):
"""Saves the data in the replay buffer to a given directory.
Args:
save_dir (str): the directory to save the data to. File name will be
replay_buffer.npz.
"""
path = pathlib.Path(save_dir) / "replay_buffer.npz"
np.savez(
path,
obs=self.obs[: self.num_stored],
next_obs=self.next_obs[: self.num_stored],
action=self.action[: self.num_stored],
reward=self.reward[: self.num_stored],
done=self.done[: self.num_stored],
)
def load(self, load_dir: Union[pathlib.Path, str]):
"""Loads transition data from a given directory.
Args:
load_dir (str): the directory where the buffer is stored.
"""
path = pathlib.Path(load_dir) / "replay_buffer.npz"
data = np.load(path)
num_stored = len(data["obs"])
self.obs[:num_stored] = data["obs"]
self.next_obs[:num_stored] = data["next_obs"]
self.action[:num_stored] = data["action"]
self.reward[:num_stored] = data["reward"]
self.done[:num_stored] = data["done"]
self.num_stored = num_stored
self.cur_idx = self.num_stored % self.capacity
def get_all(self, shuffle: bool = False) -> TransitionBatch:
"""Returns all data stored in the replay buffer.
Args:
shuffle (int): set to ``True`` if the data returned should be in random order.
Defaults to ``False``.
"""
if shuffle:
permutation = self._rng.permutation(self.num_stored)
return self._batch_from_indices(permutation)
else:
return TransitionBatch(
self.obs[: self.num_stored],
self.action[: self.num_stored],
self.next_obs[: self.num_stored],
self.reward[: self.num_stored],
self.done[: self.num_stored],
)
def get_iterators(
self,
batch_size: int,
val_ratio: float,
train_ensemble: bool = False, # noqa
ensemble_size: Optional[int] = None,
shuffle_each_epoch: bool = True,
bootstrap_permutes: bool = False,
) -> Tuple[TransitionIterator, Optional[TransitionIterator]]:
"""Returns training/validation iterators for the data in the replay buffer.
.. deprecated:: v0.1.2
Use :func:`mbrl.util.common.get_basic_buffer_iterators`.
Args:
batch_size (int): the batch size for the iterators.
val_ratio (float): | |
<reponame>robingong/imgaug
from __future__ import print_function, division, absolute_import
import time
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import imgaug as ia
def main():
time_start = time.time()
test_Keypoint()
test_KeypointsOnImage()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_Keypoint():
eps = 1e-8
# -------------
# x/y/x_int/y_int
# -------------
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# -------------
# project
# -------------
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# -------------
# shift
# -------------
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# -------------
# draw_on_image
# -------------
kp = ia.Keypoint(x=0, y=0)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[0, 0, :] == [0, 255, 0])
assert np.all(image_kp[1:, :, :] == 10)
assert np.all(image_kp[:, 1:, :] == 10)
kp = ia.Keypoint(x=4, y=4)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[4, 4, :] == [0, 255, 0])
assert np.all(image_kp[:4, :, :] == 10)
assert np.all(image_kp[:, :4, :] == 10)
kp = ia.Keypoint(x=4, y=4)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[2:, 2:, :] == [0, 255, 0])
assert np.all(image_kp[:2, :, :] == 10)
assert np.all(image_kp[:, :2, :] == 10)
kp = ia.Keypoint(x=5, y=5)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[3:, 3:, :] == [0, 255, 0])
assert np.all(image_kp[:3, :, :] == 10)
assert np.all(image_kp[:, :3, :] == 10)
kp = ia.Keypoint(x=0, y=0)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[:3, :3, :] == [0, 255, 0])
assert np.all(image_kp[3:, :, :] == 10)
assert np.all(image_kp[:, 3:, :] == 10)
kp = ia.Keypoint(x=-1, y=-1)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 255, 0), alpha=1, size=5, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[:2, :2, :] == [0, 255, 0])
assert np.all(image_kp[2:, :, :] == 10)
assert np.all(image_kp[:, 2:, :] == 10)
kp = ia.Keypoint(x=0, y=0)
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
image_kp = kp.draw_on_image(
image, color=(0, 200, 0), alpha=0.5, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_kp[0, 0, :] == [0 + 5, 100 + 5, 0 + 5])
assert np.all(image_kp[1:, :, :] == 10)
assert np.all(image_kp[:, 1:, :] == 10)
# -------------
# generate_similar_points_manhattan
# -------------
kp = ia.Keypoint(y=4, x=5)
kps_manhatten = kp.generate_similar_points_manhattan(0, 1.0, return_array=False)
assert len(kps_manhatten) == 1
assert kps_manhatten[0].y == 4
assert kps_manhatten[0].x == 5
kps_manhatten = kp.generate_similar_points_manhattan(1, 1.0, return_array=False)
assert len(kps_manhatten) == 5
expected = [(4, 5), (3, 5), (4, 6), (5, 5), (4, 4)]
for y, x in expected:
assert any([np.allclose([y, x], [kp_manhatten.y, kp_manhatten.x]) for kp_manhatten in kps_manhatten])
kps_manhatten = kp.generate_similar_points_manhattan(1, 1.0, return_array=True)
assert kps_manhatten.shape == (5, 2)
expected = [(4, 5), (3, 5), (4, 6), (5, 5), (4, 4)]
for y, x in expected:
assert any([np.allclose([y, x], [kp_manhatten_y, kp_manhatten_x])
for kp_manhatten_x, kp_manhatten_y in kps_manhatten])
# -------------
# __repr__ / __str_
# -------------
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# -------------
# on()
# -------------
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# -------------
# draw_on_image
# -------------
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], alpha=0.5, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [int(0.5*10+0), int(0.5*10+0.5*255), int(10*0.5+0)])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
# -------------
# shift
# -------------
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert | |
array like object where each element corresponds to a specific camera parameter,
corresponding to the element represented by each column coming from the :meth:`~CameraModel.compute_jacobian`
method. For a concrete example of the update vector and how it works, see the concrete camera model
implementations.
:param update_vec: delta updates to the model parameters
"""
pass
@abstractmethod
def pixels_to_unit(self, pixels: ARRAY_LIKE, temperature: Real = 0, image: int = 0) -> np.ndarray:
"""
This method converts pixel image locations to unit vectors expressed in the camera frame.
The pixel locations should be expressed as a shape (2,) or (2, n) array. They are converted
to unit vectors by first going through the inverse distortion model (see :meth:`undistort_pixels`) and then
being converted to unit vectors in the camera frame according to the definitions of the current model (also
including any misalignment terms).
:param pixels: The image points to be converted to unit vectors in the camera frame as a shape (2,) or (2, n)
array
:param temperature: The temperature to use for the undistortion
:param image: The image index that the pixels belong to (only important if there are multiple misalignments)
:return: The unit vectors corresponding to the image locations expressed in the camera frame as a shape (3,) or
(3, n) array.
"""
return np.zeros(3)
@abstractmethod
def undistort_pixels(self, pixels: ARRAY_LIKE, temperature: Real = 0) -> np.ndarray:
"""
This method computes undistorted pixel locations (gnomic/pinhole locations) for given distorted
pixel locations according to the current model.
The ``pixels`` input should be specified as a shape (2,) or (2, n) array of image locations with units of
pixels. The return will be an array of the same shape as ``pixels`` with units of pixels but with distortion
removed.
:param pixels: The image points to be converted to gnomic (pinhole) locations as a shape (2,) or (2, n) array
:param temperature: The temperature to use for the undistortion
:return: The undistorted (gnomic) locations corresponding to the distorted pixel locations as an array of
the same shape as ``pixels``
"""
return np.zeros(2)
def overwrite(self, model: 'CameraModel'):
"""
This method replaces self with the properties of ``model`` in place.
This method is primarily used in the calibration classes to maintain the link between the internal and external
camera models. Essentially, each instance variable in ``self`` is overwritten by the corresponding instance
variable in other.
This method operates by looping through the properties defined in :attr:`important_attributes` and copying the
value from ``model`` to ``self``.
:param model: The model to overwrite self with
:raises ValueError: When ``model`` is not the same type as ``self``
"""
# check to see if the other model is the same type of self
if not isinstance(model, self.__class__):
raise ValueError('Models must be of same type to overwrite')
# loop through each attribute in important_attributes and copy its value from model to self
for attribute in self.important_attributes:
setattr(self, attribute, getattr(model, attribute))
@abstractmethod
def distort_pixels(self, pixels: ARRAY_LIKE) -> np.ndarray:
"""
A method that takes gnomic pixel locations in units of pixels and applies the appropriate distortion to them.
This method is used in the :meth:`distortion_map` method to generate the distortion values for each pixel.
:param pixels: The pinhole location pixel locations the distortion is to be applied to
:return: The distorted pixel locations in units of pixels
"""
return np.zeros(2)
def distortion_map(self, shape: NONEARRAY = None, step: int = 1) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
This method computes the value of the distortion model across an entire image for use in creating distortion
maps.
The ``shape`` and ``step`` inputs to this method specify the size of the image (shape) as well as the size of
the grid steps for computing the distortion values. The locations the distortion values are computed for are
generated by::
rows, cols = np.meshgrid(np.arange(0, shape[0], step), np.arange(0, shape[1], step), indexing='ij')
If shape is ``None`` then it is set to be ``(self.n_rows, self.n_cols)``.
The value of the distortion is then computed for each row and column location in ``rows`` and ``cols`` and then
returned, along with the ``rows`` and ``cols`` grids in units of pixels.
In general this method will be used like::
>>> import matplotlib.pyplot as plt
>>> from giant.camera_models import CameraModel
>>> inst = CameraModel(field_of_view=1) # you can't actually do this
>>> prows, pcols, dist = inst.distortion_map((1024, 1024), 100)
>>> plt.figure()
>>> cs = plt.contour(pcols, prows, np.linalg.norm(dist, axis=0).reshape(prows.shape))
>>> plt.clabel(cs, inline=True, fontsize=10)
>>> plt.figure()
>>> plt.quiver(pcols.flatten(), prows.flatten(), dist[0], dist[1])
to generate distortion maps of the current model.
:param shape: The size of the image or None
:param step: The size of the step to use in sampling the distortion field
:return: a tuple containing the ``rows`` grid, ``cols`` grid, and a (2, ``rows.size``) array containing the
distortion values in pixels (first row = x distortion values, seconds row=y distortion values)
"""
if shape is None:
shape = (self.n_rows, self.n_cols)
# get the pixels that we are calculating the distortion map for
row_labels = np.arange(0, shape[0], step)
col_labels = np.arange(0, shape[1], step)
rows, cols = np.meshgrid(row_labels, col_labels, indexing='ij')
pixels = np.array([cols.flatten().tolist(), rows.flatten().tolist()])
# distort the pixels, calculate the distortion, and return the results
return rows, cols, self.distort_pixels(pixels) - pixels
def undistort_image(self, image: np.ndarray, return_shape: Union[ReturnShape, str] = 'same') -> np.ndarray:
"""
This method takes in an entire image and warps it to remove the distortion specified by the current model.
The image should be input as a (n, m) array of gray-scale illumination values (DN values).
The warping is formed by
#. generating a grid of subscripts for each cell of the image (these are the distorted pixel locations)
#. computing the corresponding gnomic location of these points using the :meth:`undistort_pixels` method
#. re-sampling the undistorted image data to form a new image with distortion removed
In general you should avoid using this function because it is much more computationally expensive than
working with the nominal distorted image and then undistorting specific points for OpNav measurements.
If ``return_shape`` is ``'same'`` then the returned image is the same size as the input image (and the
undistorted image is either cropped or padded to fit this shape). If ``return_shape`` is ``'full'`` then the
returned image is the size of what the detector would need to be to capture the image from the camera if it
was a pinhole model.
:param image: The image to have the distortion removed from as a (n, m) array of gray-scale illumination values
:param return_shape: Specify whether to return the full undistorted image or the undistorted image set to the
same size as the original
:return: The undistorted image as an array of shape (n, m) illumination values
.. note:: The re-sampled image has NaN specified for anywhere that would be considered extrapolation in the
re-sampling process. This means that the undistorted image will generally look somewhat weird around
the edges.
"""
row_labels = np.arange(image.shape[0])
col_labels = np.arange(image.shape[1])
rows, cols = np.meshgrid(row_labels, col_labels, indexing='ij')
pixel_subs = np.array([cols.flatten().tolist(), rows.flatten().tolist()])
if hasattr(image, 'temperature'):
# noinspection PyTypeChecker
undistorted_subs = self.undistort_pixels(pixel_subs, temperature=image.temperature)
else:
undistorted_subs = self.undistort_pixels(pixel_subs)
# points = np.array([undistorted_subs[1, :].tolist(), undistorted_subs[0, :].tolist()]).T
points = undistorted_subs.T
if ReturnShape(return_shape) == ReturnShape.SAME:
new_subs = pixel_subs[::-1].T
shape = image.shape
else:
start = np.ceil(points.min(axis=0)).astype(int)
stop = np.floor(points.max(axis=0)).astype(int) + 1
new_c = np.arange(start[0], stop[0])
new_r = np.arange(start[1], stop[1])
gridded_r, gridded_c = np.meshgrid(new_r, new_c, indexing='ij')
new_subs = np.vstack([gridded_r.ravel(), gridded_c.ravel()])
shape = gridded_r.shape
return interp.griddata(points, image.flatten(), new_subs, fill_value=np.nan, method='linear').reshape(shape)
def copy(self) -> 'CameraModel':
"""
Returns a deep copy of this object, breaking all references with ``self``.
:return: A copy of self that is a separate object
"""
return copy.deepcopy(self)
# noinspection PyProtectedMember
def to_elem(self, elem: etree._Element, **kwargs) -> etree._Element:
"""
Stores this camera model in an :class:`lxml.etree.SubElement` object for storing in | |
#!/usr/bin/python
import sys, math, itertools
import numpy as np
from scipy.optimize import fmin_powell
import random
import argparse
from ast import literal_eval as make_tuple
# Used to call datgnom and outsource Dmax determination.
import general_scripts as gs
import saxscalc as sc
def read_xvgs_all(filelist):
out_list=[]
for i in range(len(filelist)):
block = np.array( gs.load_xydy(filelist[i]) )
print( " ...file read, shape: %s" % str(block.shape), file=sys.stderr )
out_list.append(block)
return out_list
def find_subrange(x, xmin, xmax):
return x[ (x>=xmin) & (x<=xmax) ]
def find_subrange_all( dataRaw, initBounds):
nPlots = len(dataRaw)
minAll = np.max( (initBounds[0], np.max([ d[0,0] for d in dataRaw ])) )
maxAll = np.min( (initBounds[1], np.min([ d[0,-1] for d in dataRaw ])) )
return find_subrange( dataRaw[0][0], minAll, maxAll )
def build_interpolated_block( xBasis, dataRaw ):
nVals = len(xBasis)
nPlots = len(dataRaw)
# Assume Each file has the same numver of y-values.
nCols = dataRaw[0].shape[0]
outBlock = np.zeros( (nPlots, nCols-1, nVals) )
for i in range(nPlots):
for j in range(1, nCols):
outBlock[i,j-1] = np.interp( xBasis, dataRaw[i][0], dataRaw[i][j])
return outBlock
def subtract_spectra(yA, yB):
# A-B
sh = yA.shape
out = np.zeros( sh )
out[0] = yA[0] - yB[0]
out[1] = np.sqrt( np.square(yA[1]) + np.square(yB[1]) )
return out
def intensityDiff(pos, *args):
y1 = args[0][0] ; y1sig = args[0][1]
y2 = args[1][0] ; y2sig = args[1][1]
fitMetric = args[2]
bUseWeights = args[3]
bNoConst = args[4]
stride = args[5]
numRounds = args[6]
if fitMetric == 'chi' or fitMetric == 'chi_red':
if bNoConst:
f = pos[0] ; c=0.0
else:
f, c = pos
if bUseWeights:
value=sc.chi_square(y1,f*y2+c, dx1=y1sig, dx2=f*y2sig)
else:
value=sc.chi_square(y1,f*y2+c)
elif fitMetric == 'log_chi':
if bNoConst:
f = pos[0] ; c=0.0
else:
f, c = pos
# Check for negative values first.
if np.any(f*y2+c < 0.0):
print( "= = WARNING: for values of f %g and c %g there exists invalid logs." % (f, c), file=sys.stderr )
return 1e20
if bUseWeights:
value=sc.log_chi_square(y1,f*y2+c, dx1=y1sig, dx2=f*y2sig)
else:
value=sc.log_chi_square(y1,f*y2+c)
elif fitMetric == 'chi_free':
if bNoConst:
f = pos[0] ; c=0.0
nParams=1
else:
f, c = pos
nParams=2
if bUseWeights:
value = sc.chi_square_free(y1, f*y2+c, dx1=y1sig, dx2=f*y2sig, stride=stride, nParams=nParams, nRounds=numRounds)
else:
value = sc.chi_square_free(y1, f*y2+c, stride=stride, nParams=nParams, nRounds = numRounds)
elif fitMetric == 'vr':
c = pos
value = sc.volatility_ratio(y1-c, y2+c, stride=stride )
# If the entire strip is masked return 1e99. This usually indicates some form of domain error in the underlying calculation.
if np.ma.is_masked(value) or np.isnan(value):
value=1e99
elif fitMetric == 'cormap' or fitMetric == 'cormap_matrix':
# Will need a two-stage eliminator, perhaps, but essentially we'll need a system that minimises the number
# of sequential runs.
f, c = pos
runs = sc.run_distribution( y1 > f*y2+c )
value = len(runs)
# prob = sc.cormap_value( y1, f*y2+c )
# value = 1.0 - sc.cormap_value( y1, f*y2+c )
else:
print( "= = = ERROR, metric not recognised! %s" % fitMetric, file=sys.stderr )
sys.exit(1)
return value
def populationIntensityDiff(pos, *args):
yT = args[0][0,0] ; yTsig = args[0][0,1]
yP = args[0][1:,0] ; yPsig = args[0][1:,1]
fitMetric = args[1]
bUseWeights = args[2]
bNoConst = args[3]
stride = args[4]
numRounds = args[5]
if fitMetric == 'vr':
# = = Try to maintain symmetry.
c = pos[-1]
yT -= c
y2 = np.mean(yP, axis=0)+c ; y2sig = np.mean(yPsig, axis=0)
elif bNoConst:
f = pos ; c=0.0
y2 = np.mean(f[:,None]*yP, axis=0)+c ; y2sig = np.mean(f[:,None]*yPsig, axis=0)
else:
f = pos[:-1] ; c = pos[-1]
y2 = np.mean(f[:,None]*yP, axis=0)+c ; y2sig = np.mean(f[:,None]*yPsig, axis=0)
# Don't suport chi_red just yet.
if fitMetric == 'chi':
if bUseWeights:
value=sc.chi_square(yT,y2, dx1=yTsig, dx2=y2sig)
else:
value=sc.chi_square(yT,y2)
elif fitMetric == 'log_chi':
# Check for negative values first.
if np.any(y2 < 0.0):
#print( "= = WARNING: for values of f %g and c %g there exists invalid logs." % (f, c), file=sys.stderr )
return 1e20
if bUseWeights:
value=sc.log_chi_square(yT,y2, dx1=yTsig, dx2=y2sig)
else:
value=sc.log_chi_square(yT,y2)
elif fitMetric == 'chi_free':
nParams=len(pos)
if bUseWeights:
value = sc.chi_square_free(yT, y2, dx1=yTsig, dx2=y2sig, stride=stride, nParams=nParams, nRounds=numRounds)
else:
value = sc.chi_square_free(yT, y2, stride=stride, nParams=nParams, nRounds = numRounds)
elif fitMetric == 'vr':
value = sc.volatility_ratio(yT, y2, stride=stride )
elif fitMetric == 'cormap' or fitMetric == 'cormap_matrix':
runs = sc.run_distribution( yT > y2 )
value = len(runs)
# prob = sc.cormap_value( yT, y2 )
# value = 1.0 - sc.cormap_value( yT, y2 )
else:
print( "= = = ERROR, metric not recognised! %s" % fitMetric, file=sys.stderr )
sys.exit(1)
print( value )
return value
#####################################
# MAIN PROGRAM ######################
random.seed();
#scriptname=os.path.basename(__file__)
parser = argparse.ArgumentParser(description="Fits a set of curves by minimising differences according to some popular metrics,"
"utilising Powell-minimisation (sequential step-wise search over each variable) over one of two of the "
"variables overall scaling *f* and constant subtraction *c*. "
"These two variables approximate uncertainties in sample concentration/beam intensities and basic buffer subtraction."
"Note that there are some combinations of arguments that won't work well, like cormap with Powell minimization.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('files', metavar='N', type=str, nargs='*',
help="The list of files to be manipulated and fitted. For modes 0 and 1, the first file given acts as the reference point"
"by which other files are comparied with. The first file will also serve to define the set of independent-variable"
"(scattering-angle q) to act as the basis for interpolation.")
parser.add_argument('-mode', type=int, default=0, help="Determine which curves to fit with which. Mode 0: Fit all other files to the first"
"Mode 1: Fit first file to all others. Mode 2: Fit all files to each other (!)."
"Mode 3: Use population mode. Fit other files as a population to the first."
"(ToDo) Mode 4: Use population mode. Use the first again as the target, fit combination of others." )
parser.add_argument('-metric', type=str, default='chi', help="Determine the metric to use as comparison."
"Options are: chi | log_chi | chi_free | volatility | cormap | cormap_matrix "
"chi_free is as defined by Rambo and Tainer, Nature, 2013."
"Volatility is the Volatility Ratio as defined by Hura et al. (2013). Can be given as 'vr' instead for short."
"cormap is the Correlation Map as defined by Franke et al. (2015)")
parser.add_argument('-o', type=str, dest='outpref', default='fitted', help='Output prefix. Files will be written as <out>-fittedExp.xvg, etc. ')
parser.add_argument('-debug', dest='bDebug', action='store_true', help='Debug mode.')
parser.add_argument('-v', dest='bVerbose', action='store_true', help='Verbose mode.')
parser.add_argument('-diffspec', dest='bDiffSpec', action='store_true', help='Write the difference spectra after fitting.')
# = = = Parameters for all fitting modes.
parser.add_argument('-qmin', type=float, default=0.0, help='Minimum q to include in fitting.')
parser.add_argument('-qmax', type=float, default=10.0, help='Maximum q to include in fitting.')
parser.add_argument('-qrange', type=str, default='', help='Q-range to include in fitting, overrides above. Give as a 2-tuple, e.g. (0,3).')
# = = = Parameters for fitting modes.
parser.add_argument('-now', dest='bNoWeights', action='store_true', help='Do not weight each point by the errors in column 3.')
parser.add_argument('-noc', dest='bNoConst', action='store_true', help='Do not use constant background subtraction.')
parser.add_argument('-c0', type=float, default=np.nan, help='Start fitting with given background constant C0 instead of by estimation.')
parser.add_argument('-f0', type=float, default=np.nan, help='Start fitting with given scaling constant F0 instead of by estimation.')
# = = = Parameters for chi_free and/or vr
parser.add_argument('-Dmax', type=float, default=np.nan, help='Give the maximum molecular extent D_max, required to compute number of Shannon channels '
'as used in chi_free and volatility ratio computations. NB: can compute using other utilities, e.g. ATSAS datgnom.')
parser.add_argument('-nRounds', type=int, default=500, help='In Tainer\'s chi_free modelling, the number of replicates to determine median chi.')
#parser.add_argument('-xmult', type=str, help='X-value pre-multiplier for each imput file.')
args = parser.parse_args()
fitMode = args.mode
fitMetric = args.metric
outpref=args.outpref
Dmax=args.Dmax
numRounds=args.nRounds
nparams=2
fInit = args.f0
bEstimateF0 = np.isnan(fInit)
f0EstInterval = 5
cInit = args.c0
bEstimateC0 = np.isnan(cInit)
bVerbose = args.bVerbose
bUseWeights = not args.bNoWeights
bDebug = args.bDebug
bNoConst = args.bNoConst
bDiffSpec = args.bDiffSpec
if (args.qrange != ''):
tmp = make_tuple(args.qrange)
qmin=tmp[0] ; qmax=tmp[1]
else:
qmin=args.qmin ; qmax=args.qmax
# = = = Sanitise fitMetric and check for inconsistent given arguments.
fitMetric = fitMetric.lower()
if fitMetric == 'v_r' or fitMetric == 'volatrat' or fitMetric == 'volatility':
fitMetric = 'vr'
elif fitMetric == 'chifree':
fitMetric = 'chi_free'
elif fitMetric == 'log' or fitMetric == 'logchi':
fitMetric = 'log_chi'
elif fitMetric == 'chired' or fitMetric == 'chi_reduced':
fitMetric = 'chi_red'
if fitMetric == 'vr':
if bNoConst:
print( '= = = ERROR: The volatility ratio has only 1 free parameter for constant subtraction. Cannot be used with argument -noc.', file=sys.stderr )
sys.exit(1)
if np.isnan(Dmax):
print( '= = = ERROR: Volatility ratio requires the definition of Dmax in order to operate!', file=sys.stderr )
sys.exit(1)
if fitMetric == 'chi_free' or fitMetric == 'chi_red':
if np.isnan(Dmax):
print( '= = | |
buflen = space.len_w(w_value) + 1
elif typ == rwinreg.REG_MULTI_SZ:
if space.is_w(w_value, space.w_None):
buflen = 1
buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
buf[0] = '\0'
elif space.isinstance_w(w_value, space.w_list):
strings = []
buflen = 0
# unwrap strings and compute total size
w_iter = space.iter(w_value)
while True:
try:
w_item = space.next(w_iter)
if space.isinstance_w(w_item, space.w_unicode):
w_item = space.call_method(w_item, 'encode',
space.newtext('mbcs'))
item = space.bytes_w(w_item)
strings.append(item)
buflen += len(item) + 1
except OperationError as e:
if not e.match(space, space.w_StopIteration):
raise # re-raise other app-level exceptions
break
buflen += 1
buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
# Now copy data
buflen = 0
for string in strings:
for i in range(len(string)):
buf[buflen + i] = string[i]
buflen += len(string) + 1
buf[buflen - 1] = '\0'
buflen += 1
buf[buflen - 1] = '\0'
else: # REG_BINARY and ALL unknown data types.
if space.is_w(w_value, space.w_None):
buflen = 0
buf = lltype.malloc(rffi.CCHARP.TO, 1, flavor='raw')
buf[0] = '\0'
else:
try:
value = w_value.readbuf_w(space)
except BufferInterfaceNotFound:
raise oefmt(space.w_TypeError,
"Objects of type '%T' can not be used as binary "
"registry values", w_value)
else:
value = value.as_str()
buflen = len(value)
buf = rffi.str2charp(value)
if buf is not None:
return rffi.cast(rffi.CCHARP, buf), buflen
raise oefmt(space.w_ValueError,
"Could not convert the data to the specified type")
def convert_from_regdata(space, buf, buflen, typ):
if typ == rwinreg.REG_DWORD:
if not buflen:
return space.newint(0)
d = rffi.cast(rwin32.LPDWORD, buf)[0]
return space.newint(d)
elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ:
if not buflen:
s = ""
else:
# may or may not have a trailing NULL in the buffer.
buf = rffi.cast(rffi.CCHARP, buf)
if buf[buflen - 1] == '\x00':
buflen -= 1
s = rffi.charp2strn(buf, buflen)
w_s = space.newbytes(s)
return space.call_method(w_s, 'decode', space.newtext('mbcs'))
elif typ == rwinreg.REG_MULTI_SZ:
if not buflen:
return space.newlist([])
i = 0
l = []
while i < buflen and buf[i]:
s = []
while i < buflen and buf[i] != '\0':
s.append(buf[i])
i += 1
if len(s) == 0:
break
s = ''.join(s)
l.append(space.newtext(s))
i += 1
return space.newlist(l)
else: # REG_BINARY and all other types
return space.newbytes(rffi.charpsize2str(buf, buflen))
@unwrap_spec(value_name="text", typ=int)
def SetValueEx(space, w_hkey, value_name, w_reserved, typ, w_value):
"""SetValueEx(key, value_name, reserved, type, value) - Stores data in the value field of an open registry key.
key is an already open key, or any one of the predefined HKEY_* constants.
value_name is a string containing the name of the value to set, or None
type is an integer that specifies the type of the data. This should be one of:
REG_BINARY -- Binary data in any form.
REG_DWORD -- A 32-bit number.
REG_DWORD_LITTLE_ENDIAN -- A 32-bit number in little-endian format.
REG_DWORD_BIG_ENDIAN -- A 32-bit number in big-endian format.
REG_EXPAND_SZ -- A null-terminated string that contains unexpanded references
to environment variables (for example, %PATH%).
REG_LINK -- A Unicode symbolic link.
REG_MULTI_SZ -- An sequence of null-terminated strings, terminated by
two null characters. Note that Python handles this
termination automatically.
REG_NONE -- No defined value type.
REG_RESOURCE_LIST -- A device-driver resource list.
REG_SZ -- A null-terminated string.
reserved can be anything - zero is always passed to the API.
value is a string that specifies the new value.
This method can also set additional value and type information for the
specified key. The key identified by the key parameter must have been
opened with KEY_SET_VALUE access.
To open the key, use the CreateKeyEx() or OpenKeyEx() methods.
Value lengths are limited by available memory. Long values (more than
2048 bytes) should be stored as files with the filenames stored in
the configuration registry. This helps the registry perform efficiently."""
hkey = hkey_w(w_hkey, space)
buf, buflen = convert_to_regdata(space, w_value, typ)
try:
ret = rwinreg.RegSetValueEx(hkey, value_name, 0, typ, buf, buflen)
finally:
lltype.free(buf, flavor='raw')
if ret != 0:
raiseWindowsError(space, ret, 'RegSetValueEx')
def QueryValueEx(space, w_hkey, w_subkey):
"""value,type_id = QueryValueEx(key, value_name) - Retrieves the type and data for a specified value name associated with an open registry key.
key is an already open key, or any one of the predefined HKEY_* constants.
value_name is a string indicating the value to query"""
hkey = hkey_w(w_hkey, space)
if space.is_w(w_subkey, space.w_None):
subkey = None
else:
subkey = space.text_w(w_subkey)
null_dword = lltype.nullptr(rwin32.LPDWORD.TO)
with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as retDataSize:
ret = rwinreg.RegQueryValueEx(hkey, subkey, null_dword, null_dword,
None, retDataSize)
bufSize = intmask(retDataSize[0])
if ret == rwinreg.ERROR_MORE_DATA:
bufSize = 256
elif ret != 0:
raiseWindowsError(space, ret, 'RegQueryValueEx')
while True:
with lltype.scoped_alloc(rffi.CCHARP.TO, bufSize) as databuf:
with lltype.scoped_alloc(rwin32.LPDWORD.TO, 1) as retType:
ret = rwinreg.RegQueryValueEx(hkey, subkey, null_dword,
retType, databuf, retDataSize)
if ret == rwinreg.ERROR_MORE_DATA:
# Resize and retry
bufSize *= 2
retDataSize[0] = rffi.cast(rwin32.DWORD, bufSize)
continue
if ret != 0:
raiseWindowsError(space, ret, 'RegQueryValueEx')
length = intmask(retDataSize[0])
return space.newtuple([
convert_from_regdata(space, databuf,
length, retType[0]),
space.newint(intmask(retType[0])),
])
@unwrap_spec(subkey="text")
def CreateKey(space, w_hkey, subkey):
"""key = CreateKey(key, sub_key) - Creates or opens the specified key.
key is an already open key, or one of the predefined HKEY_* constants
sub_key is a string that names the key this method opens or creates.
If key is one of the predefined keys, sub_key may be None. In that case,
the handle returned is the same key handle passed in to the function.
If the key already exists, this function opens the existing key
The return value is the handle of the opened key.
If the function fails, an exception is raised."""
hkey = hkey_w(w_hkey, space)
with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey:
ret = rwinreg.RegCreateKey(hkey, subkey, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'CreateKey')
return W_HKEY(space, rethkey[0])
@unwrap_spec(subkey="text", res=int, sam=rffi.r_uint)
def CreateKeyEx(space, w_hkey, subkey, res=0, sam=rwinreg.KEY_WRITE):
"""key = CreateKey(key, sub_key) - Creates or opens the specified key.
key is an already open key, or one of the predefined HKEY_* constants
sub_key is a string that names the key this method opens or creates.
If key is one of the predefined keys, sub_key may be None. In that case,
the handle returned is the same key handle passed in to the function.
If the key already exists, this function opens the existing key
The return value is the handle of the opened key.
If the function fails, an exception is raised."""
hkey = hkey_w(w_hkey, space)
with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey:
ret = rwinreg.RegCreateKeyEx(hkey, subkey, res, None, 0,
sam, None, rethkey,
lltype.nullptr(rwin32.LPDWORD.TO))
if ret != 0:
raiseWindowsError(space, ret, 'CreateKeyEx')
return W_HKEY(space, rethkey[0])
@unwrap_spec(subkey="text")
def DeleteKey(space, w_hkey, subkey):
"""DeleteKey(key, sub_key) - Deletes the specified key.
key is an already open key, or any one of the predefined HKEY_* constants.
sub_key is a string that must be a subkey of the key identified by the key parameter.
This value must not be None, and the key may not have subkeys.
This method can not delete keys with subkeys.
If the method succeeds, the entire key, including all of its values,
is removed. If the method fails, an EnvironmentError exception is raised."""
hkey = hkey_w(w_hkey, space)
ret = rwinreg.RegDeleteKey(hkey, subkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegDeleteKey')
@unwrap_spec(subkey="text")
def DeleteValue(space, w_hkey, subkey):
"""DeleteValue(key, value) - Removes a named value from a registry key.
key is an already open key, or any one of the predefined HKEY_* constants.
value is a string that identifies the value to remove."""
hkey = hkey_w(w_hkey, space)
ret = rwinreg.RegDeleteValue(hkey, subkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegDeleteValue')
@unwrap_spec(subkey="text", res=int, sam=rffi.r_uint)
def OpenKey(space, w_hkey, subkey, res=0, sam=rwinreg.KEY_READ):
"""key = OpenKey(key, sub_key, res = 0, sam = KEY_READ) - Opens the specified key.
key is an already open key, or any one of the predefined HKEY_* constants.
sub_key is a string that identifies the sub_key to open
res is a reserved integer, and must be zero. Default is zero.
sam is an integer that specifies an access mask that describes the desired
security access for the key. Default is KEY_READ
The result is a new handle to the specified key
If the function fails, an EnvironmentError exception is raised."""
hkey = hkey_w(w_hkey, space)
with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey:
ret = rwinreg.RegOpenKeyEx(hkey, subkey, res, sam, rethkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegOpenKeyEx')
return W_HKEY(space, rethkey[0])
@unwrap_spec(index=int)
def EnumValue(space, w_hkey, index):
"""tuple = EnumValue(key, index) - Enumerates values of an open registry key.
key is an already open key, or any one of the predefined HKEY_* constants.
index is an integer that identifies the index of | |
be of type string.")
if feature_name not in self.all_features():
raise UnsatisfiedRequirments(f"'{feature_name}' doesn't exist in any features.")
self.__feature_value_representation = copy.deepcopy(feature_value_representation)
def set_feature_to_dummy_encoded(self,
feature_name,
dummy_encoded_list):
self.__dummy_encoded_features[feature_name] = dummy_encoded_list
for bool_feature in dummy_encoded_list:
self.__bool_features.add(bool_feature)
# --- Functions ---
def feature_types_dict(self):
feature_types = dict()
# -----
for feature_name in self.__string_features:
feature_types[feature_name] = "string"
for feature_name in self.__bool_features:
feature_types[feature_name] = "bool"
for feature_name in self.__integer_features:
feature_types[feature_name] = "integer"
for feature_name in self.__float_features:
feature_types[feature_name] = "float"
for feature_name in self.__datetime_features:
feature_types[feature_name] = "datetime"
for feature_name in self.__categorical_features:
feature_types[feature_name] = "categorical"
for feature_name in self.__null_only_features:
feature_types[feature_name] = "null_only"
return feature_types
def feature_types_dataframe(self):
features = list()
feature_types = list()
# -----
for feature_name in self.__string_features:
features.append(feature_name)
feature_types.append("string")
for feature_name in self.__bool_features:
features.append(feature_name)
feature_types.append("bool")
for feature_name in self.__integer_features:
features.append(feature_name)
feature_types.append("integer")
for feature_name in self.__float_features:
features.append(feature_name)
feature_types.append("float")
for feature_name in self.__datetime_features:
features.append(feature_name)
feature_types.append("datetime")
for feature_name in self.__categorical_features:
features.append(feature_name)
feature_types.append("category")
for feature_name in self.__null_only_features:
features.append(feature_name)
feature_types.append("null only")
dtypes_df = pd.DataFrame({'Data Types': feature_types})
dtypes_df.index = features
dtypes_df.index.name = "Features"
return dtypes_df
def remove_feature_from_dummy_encoded(self,
feature_name):
for bool_feature in self.__dummy_encoded_features[feature_name]:
self.remove_feature(bool_feature)
del self.__dummy_encoded_features[feature_name]
def remove_feature(self,
feature_name):
"""
Removes a feature from one of the feature sets.
Args:
feature_name:
The given feature name to remove.
"""
while True:
try:
self.__string_features.remove(feature_name)
break
except KeyError:
pass
try:
self.__categorical_features.remove(feature_name)
break
except KeyError:
pass
try:
self.__integer_features.remove(feature_name)
break
except KeyError:
pass
try:
self.__float_features.remove(feature_name)
break
except KeyError:
pass
try:
self.__datetime_features.remove(feature_name)
break
except KeyError:
pass
try:
self.__bool_features.remove(feature_name)
break
except KeyError:
pass
try:
self.__null_only_features.remove(feature_name)
break
except KeyError:
pass
raise KeyError(f"The feature {feature_name} doesn't exist inside any of DataFrameType's feature sets!!!")
def display_features(self,
display_dataframes=False,
notebook_mode=False):
"""
Display's the feature sets info.
Args:
display_dataframes:
Creates a dataframe object to display feature information.
notebook_mode:
Determines if the dataframe can be displayed in a notebook
"""
# Do a simple print of feature set info.
if not display_dataframes:
# -----
if self.__string_features:
print("String Features: {0}\n".format(
self.__string_features))
if self.__categorical_features:
print("Categorical Features: {0}\n".format(
self.__categorical_features))
if self.__string_features or self.__categorical_features:
print("---------"*10)
# -----
if self.__bool_features:
print("Bool Features: {0}\n".format(
self.__bool_features))
print("---------" * 10)
if self.__datetime_features:
print("Datetime Features: {0}\n".format(
self.__datetime_features))
print("---------" * 10)
# -----
if self.__float_features | self.__integer_features:
print("Numerical Features: {0}\n".format(
self.__float_features | self.__integer_features))
if self.__integer_features:
print("Integer Features: {0}\n".format(
self.__integer_features))
if self.__float_features:
print("Float Features: {0}\n".format(
self.__float_features))
if self.__target_feature:
print("---------" * 10)
print("Target Feature: {0}\n".format(
self.__target_feature))
if self.__null_only_features:
print("---------" * 10)
print("Null Only Feature: {0}\n".format(
self.__null_only_features))
# Create dataframe object based on the feature sets.
else:
dtypes_df = self.feature_types_dataframe()
if notebook_mode:
display(dtypes_df)
else:
print(dtypes_df)
def fix_numeric_features(self,
df,
notebook_mode=False,
display_results=False):
"""
Attempts to move numerical features to the correct types by
following the given priority que:
1. Bool
2. Categorical
3. Float
4. Int
5. Do nothing
Args:
df: pd.Dataframe
Pandas Dataframe object to update to correct types.
notebook_mode: bool
Boolean value to determine if any notebook functions can be used here.
display_results: bool
Display the table in priority order with flags.
Note -
This will not actually update the given dataframe. This object is
a abstract representation of the dataframe.
"""
features_flag_types = dict()
for feature_name in df.columns:
try:
pd.to_numeric(df[feature_name])
except ValueError:
# Ignore all string features
if feature_name in self.string_features():
continue
# Features that must be these set types
if feature_name in self.categorical_features():
continue
if feature_name in self.bool_features():
continue
feature_values = set(pd.to_numeric(df[feature_name],
errors="coerce").dropna())
if not len(feature_values):
continue
# Get flag's to push to priority que
flag_dict = dict()
flag_dict["Bool"] = self.__bool_check(feature_values)
numeric_flag, float_flag, int_flag, category_flag = \
self.__numeric_check(feature_values)
flag_dict["Numeric"] = numeric_flag
flag_dict["Float"] = float_flag
flag_dict["Integer"] = int_flag
flag_dict["Categorical"] = category_flag
# Pass the flag dictionary to later be processed by the priority que.
features_flag_types[feature_name] = flag_dict
# Iterate on feature and changes based on priority que
for feature_name, flag_dict in features_flag_types.items():
# -----
if flag_dict["Bool"]:
self.set_feature_to_bool(feature_name)
continue
# -----
elif flag_dict["Categorical"]:
self.set_feature_to_categorical(feature_name)
continue
# -----
elif flag_dict["Numeric"]:
if flag_dict["Float"]:
self.set_feature_to_float(feature_name)
continue
elif flag_dict["Integer"]:
self.set_feature_to_integer(feature_name)
continue
if display_results:
flag_df = pd.DataFrame.from_dict(features_flag_types,
orient='index')
if notebook_mode:
display(flag_df)
else:
print(flag_df)
def fix_string_features(self,
df,
notebook_mode=False):
"""
Iterates through all string features and moves features to given types.
May ask user question if it detects any conflicting string/numeric
types.
Args:
df: pd.Dataframe
Pandas dataframe object.
notebook_mode: bool
Will use the 'clear_output' notebook function if in notebook
mode.
"""
# Store types to convert
type_conflict_dict = dict()
# Currently this only performs
for feature_name in self.string_features():
# Float found
float_flag = False
# Keep track of all numeric features
numeric_count = 0
numeric_values = []
# Keep track of all string features
string_count = 0
string_values = []
# Keep track fof all datetime features
datetime_count = 0
datetime_values = []
# Iterate through value counts
for val, count in df[feature_name].dropna().value_counts().iteritems():
numeric_check = False
try:
float(val)
numeric_check = True
except ValueError:
pass
# Numeric check
if isinstance(val, float) or isinstance(val,
int) or numeric_check == True:
numeric_values.append(val)
numeric_count += count
if isinstance(val, float):
float_flag = True
if numeric_check and isinstance(val, str):
if len(val.split(".")) == 2:
float_flag = True
# String/Datetime check
elif isinstance(val, str):
datetime_found = False
try:
parser.parse(val)
datetime_values.append(val)
datetime_count += count
datetime_found = True
except Exception as e:
pass
if not datetime_found:
string_values.append(val)
string_count += count
# Must be a numeric type; find which type
if numeric_count != 0 and string_count == 0 and datetime_count == 0:
if float_flag:
type_conflict_dict[feature_name] = "float"
else:
if self.__bool_check(numeric_values):
type_conflict_dict[feature_name] = "bool"
elif self.__categorical_check(numeric_values):
type_conflict_dict[feature_name] = "category"
else:
type_conflict_dict[feature_name] = "integer"
# Must be a string type
elif numeric_count == 0 and string_count != 0 and datetime_count == 0:
if self.__bool_string_values_check(string_values):
type_conflict_dict[feature_name] = "bool"
else:
type_conflict_dict[feature_name] = "string"
# Must be a datetime
elif numeric_count == 0 and string_count == 0 and datetime_count != 0:
type_conflict_dict[feature_name] = "datetime"
# A conflict is found; have the user work it out.
else:
print("Type conflict found!")
print(f"Feature Name: '{feature_name}'")
print("---" * 10)
# -----
print("Numeric Value Info")
print(f"\tNumeric count: {numeric_count}")
print(f"\tNumeric percentage: {(numeric_count / (numeric_count + string_count + datetime_count)) * 100:.3f}%")
print(f"\tNumeric values: {numeric_values}\n")
# -----
print("String Value Info")
print(f"\tString count: {string_count}")
print(f"\tString percentage: {(string_count / (numeric_count + string_count + datetime_count)) * 100:.3f}%")
print(f"\tString values: {string_values}\n")
# -----
print("Datetime Value Info")
print(f"\tString count: {datetime_count}")
print(f"\tString percentage: {(datetime_count / (numeric_count + string_count + datetime_count)) * 100:.3f}%")
print(f"\tString values: {datetime_values}\n")
# Get user input for handling
print(
"You can use the first character of the option for input.\n")
user_input = input(
"\nMove feature to numeric or string and replace any "
"conflicts with nulls.\n* Numeric\n* String\n* Datetime\n"
"* Ignore\nInput: ")
user_input = user_input.lower()
# Clear last user output. If in notebook mode. (A clean notebook is a happy notebook :).)
if notebook_mode:
clear_output()
# -----
if not len(user_input):
print(f"Ignoring feature '{feature_name}")
# -----
elif user_input[0] == "s":
type_conflict_dict[feature_name] = "string"
# -----
elif user_input[0] == "n":
if float_flag:
type_conflict_dict[feature_name] = "float"
else:
numeric_values = set(pd.to_numeric(numeric_values,
errors="coerce").dropna())
if self.__bool_check(numeric_values):
type_conflict_dict[feature_name] = "bool"
elif self.__categorical_check(numeric_values):
type_conflict_dict[feature_name] = "category"
else:
type_conflict_dict[feature_name] = "integer"
# -----
elif user_input[0] == "d":
type_conflict_dict[feature_name] = "datetime"
# -----
else:
print(f"Ignoring feature '{feature_name}")
# Iterate on all features
for feature_name, feature_type in type_conflict_dict.items():
moved_feature = False
if feature_type == "string":
if feature_name not in self.__string_features:
self.set_feature_to_string(feature_name)
moved_feature = True
elif feature_type == "datetime":
if feature_name not in self.__datetime_features:
self.set_feature_to_datetime(feature_name)
moved_feature = True
elif feature_type == "integer":
if feature_name not in self.__integer_features:
self.set_feature_to_integer(feature_name)
moved_feature = True
elif feature_type == "category":
if feature_name not in self.__categorical_features:
self.set_feature_to_categorical(feature_name)
moved_feature = True
elif feature_type == "bool":
if feature_name not in self.__bool_features:
self.set_feature_to_bool(feature_name)
moved_feature = True
elif feature_type == "float":
if feature_name not in self.__float_features:
self.set_feature_to_float(feature_name)
moved_feature = True
else:
raise TypeError("An unknown type was passed!")
if moved_feature:
print(f"\nMoving feature '{feature_name}' to type {feature_type}.")
def fix_nan_features(self,
df):
"""
Attempts to get the data type of the feature if there were no nans
inside it.
Args:
df: pd.DataFrame
Pandas Dataframe object.
"""
nan_features = [feature for feature, nan_found in
df.isna().any().items() if nan_found]
# The features that are found to be floats should partially merge with
# features with nulls.
float_features = | |
'1'],
'open': ['-', '4', '3']})
def test_operator_ready(self):
"""test the method ready of Operator"""
op = qt.Operator()
print(f'operator is ready? "{op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertIsInstance(op, qt.Operator)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[1], qt.SelectingAll)
self.assertIsInstance(op.strategies[2], qt.RiconUrgent)
self.assertIsInstance(op[0], qt.TimingDMA)
self.assertIsInstance(op[1], qt.SelectingAll)
self.assertIsInstance(op[2], qt.RiconUrgent)
self.assertIsInstance(op['dma'], qt.TimingDMA)
self.assertIsInstance(op['all'], qt.SelectingAll)
self.assertIsInstance(op['urgent'], qt.RiconUrgent)
self.assertEqual(op.strategy_count, 3)
print(f'test adding strategies into existing op')
print('test adding strategy by string')
op.add_strategy('macd')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingMACD)
self.assertEqual(op.strategy_count, 4)
op.add_strategy('random')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.SelectingRandom)
self.assertEqual(op.strategy_count, 5)
test_ls = TestLSStrategy()
op.add_strategy(test_ls)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], TestLSStrategy)
self.assertEqual(op.strategy_count, 6)
print(f'Test different instance of objects are added to operator')
op.add_strategy('dma')
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingDMA)
self.assertIsNot(op.strategies[0], op.strategies[6])
def test_operator_add_strategies(self):
""" etst adding multiple strategies to Operator"""
op = qt.Operator('dma, all, urgent')
self.assertEqual(op.strategy_count, 3)
print('test adding multiple strategies -- adding strategy by list of strings')
op.add_strategies(['dma', 'macd'])
self.assertEqual(op.strategy_count, 5)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[3], qt.TimingDMA)
self.assertIsInstance(op.strategies[4], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by comma separated strings')
op.add_strategies('dma, macd')
self.assertEqual(op.strategy_count, 7)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[5], qt.TimingDMA)
self.assertIsInstance(op.strategies[6], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategies')
op.add_strategies([qt.TimingDMA(), qt.TimingMACD()])
self.assertEqual(op.strategy_count, 9)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[7], qt.TimingDMA)
self.assertIsInstance(op.strategies[8], qt.TimingMACD)
print('test adding multiple strategies -- adding strategy by list of strategy and str')
op.add_strategies(['DMA', qt.TimingMACD()])
self.assertEqual(op.strategy_count, 11)
self.assertIsInstance(op.strategies[0], qt.TimingDMA)
self.assertIsInstance(op.strategies[9], qt.TimingDMA)
self.assertIsInstance(op.strategies[10], qt.TimingMACD)
self.assertIsNot(op.strategies[0], op.strategies[9])
self.assertIs(type(op.strategies[0]), type(op.strategies[9]))
print('test adding fault data')
self.assertRaises(AssertionError, op.add_strategies, 123)
self.assertRaises(AssertionError, op.add_strategies, None)
def test_opeartor_remove_strategy(self):
""" test method remove strategy"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.remove_strategy('dma')
self.assertEqual(op.strategy_count, 6)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'dma_1', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['dma_1'])
self.assertEqual(op.strategies[3], op['macd'])
self.assertEqual(op.strategies[4], op['dma_2'])
self.assertEqual(op.strategies[5], op['custom'])
op.remove_strategy('dma_1')
self.assertEqual(op.strategy_count, 5)
self.assertEqual(op.strategy_ids, ['all', 'urgent', 'macd', 'dma_2', 'custom'])
self.assertEqual(op.strategies[0], op['all'])
self.assertEqual(op.strategies[1], op['urgent'])
self.assertEqual(op.strategies[2], op['macd'])
self.assertEqual(op.strategies[3], op['dma_2'])
self.assertEqual(op.strategies[4], op['custom'])
def test_opeartor_clear_strategies(self):
""" test operator clear strategies"""
op = qt.Operator('dma, all, urgent')
op.add_strategies(['dma', 'macd'])
op.add_strategies(['DMA', TestLSStrategy()])
self.assertEqual(op.strategy_count, 7)
print('test removing strategies from Operator')
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
op.add_strategy('dma', pars=(12, 123, 25))
self.assertEqual(op.strategy_count, 1)
self.assertEqual(op.strategy_ids, ['dma'])
self.assertEqual(type(op.strategies[0]), TimingDMA)
self.assertEqual(op.strategies[0].pars, (12, 123, 25))
op.clear_strategies()
self.assertEqual(op.strategy_count, 0)
self.assertEqual(op.strategy_ids, [])
def test_operator_prepare_data(self):
"""test processes that related to prepare data"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(strategies=[test_ls, test_sel, test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='custom',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.assertEqual(self.op.strategies[0].pars, {'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='custom_1',
pars=())
self.assertEqual(self.op.strategies[1].pars, ()),
self.op.set_parameter(stg_id='custom_2',
pars=(0.2, 0.02, -0.02))
self.assertEqual(self.op.strategies[2].pars, (0.2, 0.02, -0.02)),
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._op_history_data, dict)
self.assertEqual(len(self.op._op_history_data), 3)
# test if automatic strategy blenders are set
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '2', '+', '1', '0']})
tim_hist_data = self.op._op_history_data['custom']
sel_hist_data = self.op._op_history_data['custom_1']
ric_hist_data = self.op._op_history_data['custom_2']
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
""" Test signal generation process of operator objects
:return:
"""
# 使用test模块的自定义策略生成三种交易策略
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sel2 = TestSelStrategyDiffTime()
test_sig = TestSigStrategy()
print('--Test PT type signal generation--')
# 测试PT类型的信号生成:
# 创建一个Operator对象,信号类型为PT(比例目标信号)
# 这个Operator对象包含两个策略,分别为LS-Strategy以及Sel-Strategy,代表择时和选股策略
# 两个策略分别生成PT信号后混合成一个信号输出
self.op = qt.Operator(strategies=[test_ls, test_sel])
self.op.set_parameter(stg_id='custom',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id=1,
pars=())
# self.a_to_sell.set_blender(blender='0+1+2')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test operator information in normal mode--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['+', '1', '0']})
self.op.set_blender(None, '0*1')
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0']})
print('--test operation signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
backtest_price_types = op_list.htypes
self.assertEqual(backtest_price_types[0], 'close')
self.assertEqual(op_list.shape, (3, 45, 1))
reduced_op_list = op_list.values.squeeze().T
print(f'op_list created, it is a 3 share/45 days/1 htype array, to make comparison happen, \n'
f'it will be squeezed to a 2-d array to compare on share-wise:\n'
f'{reduced_op_list}')
target_op_values = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
self.assertTrue(np.allclose(target_op_values, reduced_op_list, equal_nan=True))
print('--Test two separate signal generation for different price types--')
# 测试两组PT类型的信号生成:
# 在Operator对象中增加两个SigStrategy策略,策略类型相同但是策略的参数不同,回测价格类型为"OPEN"
# Opeartor应该生成两组交易信号,分别用于"close"和"open"两中不同的价格类型
# 这里需要重新生成两个新的交易策略对象,否则在op的strategies列表中产生重复的对象引用,从而引起错误
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
self.op.add_strategies([test_ls, test_sel])
self.op.set_parameter(stg_id='custom_2',
price_type='open')
self.op.set_parameter(stg_id='custom_3',
price_type='open')
self.assertEqual(self.op['custom'].price_type, 'close')
self.assertEqual(self.op['custom_2'].price_type, 'open')
self.op.set_parameter(stg_id='custom_2',
pars={'000010': (5, 10.),
'000030': (5, 10.),
'000039': (5, 6.)})
self.op.set_parameter(stg_id='custom_3',
pars=())
self.op.set_blender(blender='0 or 1', price_type='open')
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
print('--test how operator information is printed out--')
self.op.info()
self.assertEqual(self.op.strategy_blenders,
{'close': ['*', '1', '0'],
'open': ['or', '1', '0']})
print('--test opeartion signal created in Proportional Target (PT) Mode--')
op_list = self.op.create_signal(hist_data=self.hp1)
self.assertTrue(isinstance(op_list, HistoryPanel))
signal_close = op_list['close'].squeeze().T
signal_open = op_list['open'].squeeze().T
self.assertEqual(signal_close.shape, (45, 3))
self.assertEqual(signal_open.shape, (45, 3))
target_op_close = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.5, 0.0]])
target_op_open = np.array([[0.5, 0.5, 1.0],
[0.5, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 0.5, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 0.5, 0.0],
[1.0, 0.5, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.5],
[0.5, 1.0, 0.0],
[0.5, 1.0, 0.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.5, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" run.py
Code to run the PatchVAE on different datasets
Usage:
# Run with default arguments on mnist
python run.py
Basic VAE borrowed from
https://github.com/pytorch/examples/tree/master/vae
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
import sys
from collections import OrderedDict
import shutil
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from utils import Timer
from utils.torchsummary import summary
from utils.commons import data_loaders, load_vae_model, count_parameters, EdgeWeights
from loss import BetaVaeLoss, VaeConcreteLoss, BetaVaeConcreteLoss,\
BetaVaeConcretePartsLoss, BetaVaeConcretePartsEntropyLoss, DiscLoss
from model import Discriminator
import utils.commons as commons
from torch.utils.tensorboard import SummaryWriter
def train_vaegan(data_loader, model_d, model_v, opt_d, opt_v, d_loss_fn, v_loss_fn, writer):
model_v.train()
model_d.train()
fwd_clock = Timer()
bwd_clock = Timer()
num_batches = args.img_per_epoch // args.batch_size
data_iterator = iter(data_loader)
overall_losses = OrderedDict()
# for batch_idx, (x, _) in enumerate(data_loader):
for batch_idx in range(num_batches):
batch_losses = OrderedDict()
try:
x, _ = next(data_iterator)
except StopIteration:
data_iterator = iter(data_loader)
continue
x = x.to(args.device)
########################################################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
#######################################################
# train with real
model_d.zero_grad()
real_x = x
real_y = torch.ones(x.size(0)).cuda()
outputs = model_d(real_x)
err_d_real = d_loss_fn(outputs.squeeze(), real_y.squeeze())
err_d_real.backward()
batch_losses['err_d_real'] = err_d_real.item()
batch_losses['d_x'] = outputs.data.mean()
# train with fake
fake_y = torch.zeros(x.size(0)).cuda()
x_tilde, z_app_mean, z_app_var, z_vis_mean = model_v(x, args.temp)
# recon_x, _ = x_tilde
outputs = model_d(x_tilde.detach())
err_d_fake = d_loss_fn(outputs.squeeze(), fake_y.squeeze())
err_d_fake.backward()
batch_losses['err_d_fake'] = err_d_fake.item()
batch_losses['d_v1'] = outputs.data.mean()
opt_d.step()
###########################
# (2) Update G network: VAE
###########################
model_v.zero_grad()
loss, loss_dict = v_loss_fn(
x_tilde, x, z_app_mean, z_app_var, z_vis_mean,
categorical=args.categorical, py=args.py, beta_p=args.beta_p,
beta_a=args.beta_a, beta_v=args.beta_v,
beta_ea=args.beta_ea, beta_ew=args.beta_ew
)
loss.backward()
for loss_key, loss_value in loss_dict.items():
batch_losses[loss_key] = loss_value.item()
opt_v.step()
############################
# (3) Update G network: maximize log(D(G(z)))
###########################
x_tilde, z_app_mean, z_app_var, z_vis_mean = model_v(x, args.temp)
# recon_x, _ = x_tilde
outputs = model_d(x_tilde)
real_y.fill_(1)
err_g = d_loss_fn(outputs.squeeze(), real_y.squeeze())
err_g.backward()
batch_losses['err_g'] = err_g.item()
batch_losses['d_v2'] = outputs.data.mean()
opt_v.step()
# Logs
for loss_key, loss_value in batch_losses.items():
writer.add_scalar('loss/train/' + loss_key, loss_value, args.steps)
overall_losses[loss_key] = overall_losses[loss_key] + loss_value \
if loss_key in overall_losses else loss_value
args.steps += 1
if args.steps % 1000 == 1:
args.temp = max(args.temp * np.exp(-args.anneal * args.steps),
args.min_temp)
if batch_idx % args.log_interval != 0:
continue
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in batch_losses.items()])
print('[{}/{} ({:0.0f}%)]\t{}'.format(batch_idx, num_batches,
100. * batch_idx / num_batches, logstr))
overall_losses = OrderedDict([(k, v / num_batches) for k, v in overall_losses.items()])
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in overall_losses.items()])
print('[End of train epoch]\t# steps: {}\t# images: {}, temp: {:0.2f}'.format(
args.steps, num_batches * args.batch_size, args.temp))
print(logstr)
print('[End of train epoch]\t# calls: {}, Fwd: {:.3f} ms\tBwd: {:.3f} ms'.format(
fwd_clock.calls, 1000 * fwd_clock.average_time, 1000 * bwd_clock.average_time))
return overall_losses
def train(data_loader, model, optimizer, loss_function, writer):
model.train()
fwd_clock = Timer()
bwd_clock = Timer()
losses = OrderedDict()
losses['loss'] = 0
num_batches = args.img_per_epoch // args.batch_size
data_iterator = iter(data_loader)
for batch_idx in range(num_batches):
try:
x, _ = next(data_iterator)
x = x.to(args.device)
optimizer.zero_grad()
# Forward Pass
fwd_clock.tic()
x_tilde, z_app_mean, z_app_var, z_vis_mean = model(x, args.temp)
# Compute Loss
loss, loss_dict = loss_function(
x_tilde, x, z_app_mean, z_app_var, z_vis_mean,
categorical=args.categorical, py=args.py, beta_p=args.beta_p,
beta_a=args.beta_a, beta_v=args.beta_v,
beta_ea=args.beta_ea, beta_ew=args.beta_ew
)
fwd_clock.toc()
# Backprop
bwd_clock.tic()
loss.backward()
bwd_clock.toc()
# Update Adam
optimizer.step()
# Logs
losses['loss'] += loss.item()
writer.add_scalar('loss/train/loss', loss.item(), args.steps)
for loss_key, loss_value in loss_dict.items():
writer.add_scalar('loss/train/' + loss_key, loss_value.item(), args.steps)
losses[loss_key] = losses[loss_key] + loss_value.item() \
if loss_key in losses else loss_value.item()
args.steps += 1
if args.steps % 1000 == 1:
args.temp = max(args.temp * np.exp(-args.anneal * args.steps),
args.min_temp)
if batch_idx % args.log_interval != 0:
continue
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v.item()) for k, v in loss_dict.items()])
print('[{}/{} ({:0.0f}%)]\t{}'.format(batch_idx, num_batches,
100. * batch_idx / num_batches, logstr))
except StopIteration:
data_iterator = iter(data_loader)
losses = OrderedDict([(k, v / num_batches) for k, v in losses.items()])
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in losses.items()])
print('[End of train epoch]\t# steps: {}\t# images: {}, temp: {:0.2f}'.format(
args.steps, num_batches * args.batch_size, args.temp))
print(logstr)
print('[End of train epoch]\t# calls: {}, Fwd: {:.3f} ms\tBwd: {:.3f} ms'.format(
fwd_clock.calls, 1000 * fwd_clock.average_time, 1000 * bwd_clock.average_time))
return losses['loss']
def test(data_loader, model, loss_function, writer):
model.eval()
losses = OrderedDict()
losses['loss'] = 0
data_iterator = iter(data_loader)
with torch.no_grad():
for batch_idx, (x, _) in enumerate(data_iterator):
x = x.to(args.device)
x_tilde, z_app_mean, z_app_var, z_vis_mean = model(x, args.temp)
loss, loss_dict = loss_function(
x_tilde, x, z_app_mean, z_app_var, z_vis_mean,
categorical=args.categorical, py=args.py, beta_p=args.beta_p,
beta_a=args.beta_a, beta_v=args.beta_v,
beta_ea=args.beta_ea, beta_ew=args.beta_ew
)
losses['loss'] += loss.item()
for loss_key, loss_value in loss_dict.items():
losses[loss_key] = losses[loss_key] + loss_value.item() \
if loss_key in losses else loss_value.item()
losses = OrderedDict([(k, v / (batch_idx+1)) for k, v in losses.items()])
logstr = '\t'.join(['{}: {:0.4f}'.format(k, v) for k, v in losses.items()])
print('[End of test epoch]')
print(logstr)
# Logs
for loss_key, loss_value in losses.items():
writer.add_scalar('loss/test/' + loss_key, loss_value, args.steps)
return losses['loss']
def plot_graph(height, width, channels, model, writer):
fake = torch.from_numpy(np.random.randn(args.batch_size,
channels, height, width).astype(np.float32))
fake = fake.to(args.device)
writer.add_graph(model, fake)
def main():
np.random.seed(args.seed)
torch.manual_seed(args.seed)
args.steps = 0
writer = SummaryWriter(args.log_dir)
save_filename = args.model_dir
train_loader, test_loader, (channels, height, width), num_classes, _ = \
data_loaders(args.dataset, data_folder=args.data_folder,
classify=False, size=args.size, inet=args.inet,
batch_size=args.batch_size, num_workers=args.workers)
# Fixed images for Tensorboard
fixed_images, _ = next(iter(test_loader))
fixed_images = fixed_images.to(args.device)
fixed_grid = make_grid(commons.unnorm(fixed_images).cpu().data, nrow=32, pad_value=1)
writer.add_image('original', fixed_grid, 0)
# build a VAE model
vae_model, _ = load_vae_model((channels, height, width),
args.arch,
encoder_arch=args.encoder_arch,
decoder_arch=args.decoder_arch,
hidden_size=args.hidden_size,
num_parts=args.num_parts,
base_depth=args.ngf,
independent=args.independent,
hard=args.hard,
categorical=args.categorical,
scale=args.scale,
device=args.device)
args.py = 1 / args.num_parts if args.py is None else args.py
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
vae_model = nn.DataParallel(vae_model)
vae_model.to(args.device)
if args.pretrained is not None:
print("Loading pretrained model from %s" % args.pretrained)
pretrained_dict = torch.load(args.pretrained, map_location=args.device)
if type(pretrained_dict) == OrderedDict:
vae_model.load_state_dict(pretrained_dict)
elif 'vae_dict' in pretrained_dict:
vae_model.load_state_dict(pretrained_dict['vae_dict'])
else:
print('debug')
sys.exit(0)
# Generate samples only, no training
if args.evaluate:
with torch.no_grad():
# Reconstructions after current epoch
if torch.cuda.device_count() > 1:
reconstructions = vae_model.module.get_reconstructions(
fixed_images, temp=args.temp)
else:
reconstructions = vae_model.get_reconstructions(
fixed_images, temp=args.temp)
for key in reconstructions:
grid = make_grid(reconstructions[key].cpu(), nrow=32, pad_value=1)
writer.add_image(key, grid, 0)
# Random samples after current epoch
if torch.cuda.device_count() > 1:
random_samples = vae_model.module.get_random_samples(py=args.py)
else:
random_samples = vae_model.get_random_samples(py=args.py)
for key in random_samples:
grid = make_grid(random_samples[key].cpu(), nrow=32, pad_value=1)
writer.add_image(key, grid, 0)
sys.exit(0)
opt_v = torch.optim.Adam(vae_model.parameters(), lr=args.lr, betas=(0.5, 0.999))
recon_mask = None
if args.recon_mask == 'edge':
recon_mask = EdgeWeights(nc=channels, scale=args.scale)
if args.arch == 'vae':
loss_function = BetaVaeLoss(beta=args.beta_a, mask_nn=recon_mask)
elif args.arch == 'convvae':
loss_function = VaeConcreteLoss(
beta_v=args.beta_v,
py=args.py,
categorical=args.categorical,
mask_nn=recon_mask
)
elif args.arch == 'patchy':
if args.beta_p == 0. and args.beta_ea == 0. and args.beta_ew == 0.:
loss_function = BetaVaeConcreteLoss(
beta_a=args.beta_a,
beta_v=args.beta_v,
py=args.py,
categorical=args.categorical,
mask_nn=recon_mask
)
elif args.beta_ea == 0. and args.beta_ew == 0.:
loss_function = BetaVaeConcretePartsLoss(
beta_a=args.beta_a,
beta_v=args.beta_v,
beta_p=args.beta_p,
py=args.py,
categorical=args.categorical,
)
else:
loss_function = BetaVaeConcretePartsEntropyLoss(
beta_a=args.beta_a,
beta_v=args.beta_v,
beta_p=args.beta_p,
beta_ea=args.beta_ea,
beta_ew=args.beta_ew,
py=args.py,
categorical=args.categorical,
)
else:
print('Unknown model architecture: %s' % args.arch)
sys.exit(0)
if args.gan:
gan_model = Discriminator(height, nc=channels, ndf=args.ndf, scale=args.scale).to(args.device)
opt_d = torch.optim.Adam(gan_model.parameters(), lr=args.lr, betas=(0.5, 0.999))
d_loss_fn = DiscLoss(args.beta_g)
# test after seeing approx. every 50000 images
# num_epochs = (args.num_epochs * len(train_loader.dataset)) // 50000
for epoch in range(1, args.num_epochs + 1):
print("================== Epoch: {} ==================".format(epoch))
if args.gan:
train_loss = train_vaegan(train_loader, gan_model, vae_model, opt_d, opt_v, d_loss_fn, loss_function, writer)
else:
train_loss = train(train_loader, vae_model, opt_v, loss_function, writer)
test_loss = test(test_loader, vae_model, loss_function, writer)
if epoch == 1:
best_loss = test_loss
if epoch % args.save_interval != 0:
continue
# Save model
with torch.no_grad():
# Reconstructions after current epoch
if torch.cuda.device_count() > 1:
reconstructions = vae_model.module.get_reconstructions(
fixed_images, temp=args.temp)
else:
reconstructions = vae_model.get_reconstructions(
fixed_images, temp=args.temp)
for key in reconstructions:
grid = make_grid(reconstructions[key].cpu(), nrow=32, pad_value=1, normalize=True)
writer.add_image(key, grid, epoch)
# Random samples after current epoch
if torch.cuda.device_count() > 1:
random_samples = vae_model.module.get_random_samples(py=args.py)
else:
random_samples = vae_model.get_random_samples(py=args.py)
for key in random_samples:
grid = make_grid(random_samples[key].cpu(), nrow=32, pad_value=1, normalize=True)
writer.add_image(key, grid, epoch)
f = '{0}/model_{1}.pt'.format(save_filename, epoch)
save_state = {
'args': args,
'vae_dict': vae_model.state_dict(),
'loss': train_loss,
}
if args.gan:
save_state['disc_dict'] = gan_model.state_dict()
torch.save(save_state, f)
if test_loss < best_loss:
best_loss = test_loss
shutil.copyfile(f, '{0}/best.pt'.format(save_filename))
print("Model saved at: {0}/best.pt".format(save_filename))
print("# Parameters: {}".format(count_parameters(vae_model)))
if torch.cuda.device_count() > 1:
summary(vae_model.module, (channels, height, width))
else:
summary(vae_model, (channels, height, width))
if __name__ == '__main__':
import argparse
import os
parser = argparse.ArgumentParser(description='Patchy VAE')
# Dataset
parser.add_argument('--dataset', type=str, default='cifar100',
help='name of the dataset (default: cifar100)')
parser.add_argument('--data-folder', type=str, default='./data',
help='name of the data folder (default: | |
try:
from source.Multiprocessing import *
except:
from Multiprocessing import *
def print_citation_mantis():
paper_doi='https://doi.org/10.1093/gigascience/giab042'
separator='##########################################################################################################################'
res=f'{separator}\n# Thank you for using Mantis, please make sure you cite the respective paper {paper_doi} #\n{separator}'
print(res)
def print_version(user,project):
import requests
response = requests.get(f"https://api.github.com/repos/{user}/{project}/releases/latest")
json=response.json()
if 'name' in json:
print(f'{project}\'s latest release is:',json['name'])
else:
print('No release available')
def run_mantis(input_path,
output_folder,
mantis_config=None,
evalue_threshold=None,
overlap_value=None,
minimum_consensus_overlap=None,
organism_details=None,
genetic_code=None,
domain_algorithm=None,
best_combo_formula=None,
sorting_type=None,
keep_files=False,
skip_consensus=False,
skip_managed_memory=False,
force_evalue=False,
no_consensus_expansion=False,
no_taxonomy=False,
no_unifunc=False,
kegg_matrix=False,
verbose_kegg_matrix=False,
output_gff=False,
verbose=True,
default_workers=None,
chunk_size=None,
time_limit=None,
hmmer_threads=None,
cores=None,
memory=None,
):
if evalue_threshold:
if evalue_threshold != 'dynamic': evalue_threshold = float(evalue_threshold)
if overlap_value: overlap_value = float(overlap_value)
if minimum_consensus_overlap: minimum_consensus_overlap = float(minimum_consensus_overlap)
if best_combo_formula: best_combo_formula = int(best_combo_formula)
if default_workers: default_workers = int(default_workers)
if chunk_size: chunk_size = int(chunk_size)
if time_limit: time_limit = int(time_limit)
if hmmer_threads: hmmer_threads = int(hmmer_threads)
if cores: cores = int(cores)
if memory: memory = int(memory)
if genetic_code: genetic_code = int(genetic_code)
mantis = MANTIS(
input_path=input_path,
output_folder=output_folder,
mantis_config=mantis_config,
evalue_threshold=evalue_threshold,
overlap_value=overlap_value,
minimum_consensus_overlap=minimum_consensus_overlap,
organism_details=organism_details,
genetic_code=genetic_code,
domain_algorithm=domain_algorithm,
best_combo_formula=best_combo_formula,
sorting_type=sorting_type,
keep_files=keep_files,
skip_consensus=skip_consensus,
skip_managed_memory=skip_managed_memory,
force_evalue=force_evalue,
no_consensus_expansion=no_consensus_expansion,
no_taxonomy=no_taxonomy,
no_unifunc=no_unifunc,
kegg_matrix=kegg_matrix,
verbose_kegg_matrix=verbose_kegg_matrix,
output_gff=output_gff,
verbose=verbose,
default_workers=default_workers,
chunk_size=chunk_size,
time_limit=time_limit,
hmmer_threads=hmmer_threads,
user_cores=cores,
user_memory=memory,
)
mantis.run_mantis()
def run_mantis_test(input_path,
output_folder,
mantis_config,
):
mantis = MANTIS(
input_path=input_path,
output_folder=output_folder,
mantis_config=mantis_config,
keep_files=True)
mantis.run_mantis_test()
class MANTIS(Multiprocessing):
def __init__(self,
input_path=None,
output_folder=None,
mantis_config=None,
evalue_threshold=None,
overlap_value=None,
minimum_consensus_overlap=None,
domain_algorithm=None,
sorting_type=None,
best_combo_formula=None,
organism_details={},
genetic_code=None,
redirect_verbose=None,
keep_files=False,
skip_consensus=False,
skip_managed_memory=False,
force_evalue=False,
no_consensus_expansion=False,
no_taxonomy=False,
no_unifunc=False,
kegg_matrix=False,
verbose_kegg_matrix=False,
output_gff=False,
verbose=True,
default_workers=None,
chunk_size=None,
time_limit=None,
hmmer_threads=None,
user_cores=None,
user_memory=None,
):
self.output_folder = add_slash(output_folder)
self.redirect_verbose = redirect_verbose
print('------------------------------------------', flush=True, file=self.redirect_verbose)
print_cyan('Setting up Mantis!', flush=True, file=self.redirect_verbose)
print('------------------------------------------', flush=True, file=self.redirect_verbose)
self.input_path = input_path
self.mantis_config = mantis_config
#Prediction parameters
self.evalue_threshold = evalue_threshold
self.default_evalue_threshold = 1e-3 #1e-6 might be better a better default
self.minimum_evalue_threshold=1e-2
self.force_evalue=force_evalue
if overlap_value: self.overlap_value = overlap_value
else: self.overlap_value = 0.1
if minimum_consensus_overlap: self.minimum_consensus_overlap=minimum_consensus_overlap
else: self.minimum_consensus_overlap = 0.7
if domain_algorithm: self.domain_algorithm = domain_algorithm
else: self.domain_algorithm = 'dfs'
if best_combo_formula: self.best_combo_formula = best_combo_formula
else: self.best_combo_formula = 1
if hmmer_threads: self.hmmer_threads = hmmer_threads
# 1 should be ideal if we are already using the maximum amount of cores with Mantis
else: self.hmmer_threads = 1
#the user can force the sorting type
if sorting_type: self.sorting_type = sorting_type
else:
#but we recommend using bitscore for dfs, evalue for bpo or heuristic
if self.domain_algorithm =='dfs': self.sorting_type='bitscore'
else: self.sorting_type='evalue'
self.organism_details = organism_details
self.genetic_code = genetic_code
#Execution parameters
self.skip_consensus = skip_consensus
self.skip_managed_memory = skip_managed_memory
self.no_consensus_expansion = no_consensus_expansion
self.no_unifunc = no_unifunc
self.kegg_matrix = kegg_matrix
self.verbose_kegg_matrix = verbose_kegg_matrix
self.output_gff = output_gff
if self.verbose_kegg_matrix: self.kegg_matrix=True
self.default_workers = default_workers
self.user_memory = user_memory
# chunk size is highly relevant in the execution time
self.chunk_size = chunk_size
if time_limit:
self.time_limit = time_limit
else:
self.time_limit = 60
#diamond db size for scaling. we increase the db size to avoid overly good e-values, i.e., 0 where sample scaling by multiplication wouldn't change anything
self.diamond_db_size=1e6
print_cyan('Reading config file and setting up paths', flush=True, file=self.redirect_verbose)
Assembler.__init__(self, verbose=verbose, redirect_verbose=redirect_verbose,mantis_config=mantis_config,keep_files=keep_files,user_cores=user_cores,no_taxonomy=no_taxonomy)
datetime_str = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if self.input_path:
print_cyan(f'This MANTIS process started running at {datetime_str}',flush=True, file=self.redirect_verbose)
self.chunks_to_annotate = []
self.chunks_to_fasta = {}
self.fastas_to_annotate = []
self.print_available_hardware()
def print_available_hardware(self):
if self.user_cores:
print(f'Cores allocated: {self.user_cores}')
else:
print(f'Cores allocated: {ENVIRONMENT_CORES}')
if self.user_memory:
print(f'Memory allocated: {self.user_memory}')
else:
print(f'Memory allocated: {round(AVAILABLE_RAM, 2)}')
print(f'Workers per core: {WORKER_PER_CORE}')
def __str__(self):
if self.kegg_matrix and not self.verbose_kegg_matrix:
kegg_matrix_str='Generate KEGG modules matrix:\t' + str(self.kegg_matrix) + '\n'
elif self.kegg_matrix and self.verbose_kegg_matrix:
kegg_matrix_str='Generate KEGG modules matrix in verbose mode:\t' + str(self.verbose_kegg_matrix) + '\n'
else:
kegg_matrix_str=''
output_list = [
'Output folder:\t\t\t' + str(self.output_folder) + '\n' if self.output_folder else '',
'Mantis config:\t\t\t' + str(self.mantis_config) + '\n' if self.mantis_config else '',
'Target path:\t\t\t' + str(self.input_path) + '\n' if self.input_path else '',
'E-value threshold:\t\t' + str(self.evalue_threshold) + '\n' if self.evalue_threshold else '',
'E-value threshold:\t\t' + str(self.default_evalue_threshold) + '\n' if not self.evalue_threshold else '',
'Forcing e-value:\t\t' + str(self.force_evalue) + '\n' if self.force_evalue else '',
'Overlap value:\t\t\t' + str(self.overlap_value) + '\n' if self.overlap_value else '',
'Default workers:\t\t' + str(self.default_workers) + '\n' if self.default_workers else '',
'User cores:\t\t\t' + str(self.user_cores) + '\n' if self.user_cores else '',
'HMMER threads:\t\t\t' + str(self.hmmer_threads) + '\n' if self.hmmer_threads else '',
'Chunk size:\t\t\t' + str(self.chunk_size) + '\n' if self.chunk_size else '',
'Algorithm:\t\t\t' + str(self.domain_algorithm) + '\n' if self.domain_algorithm else '',
#'Formula:\t\t\t' + str(self.best_combo_formula) + '\n' if self.best_combo_formula else '',
#'Sorting type:\t\t\t' + str(self.sorting_type) + '\n' if self.sorting_type else '',
'Outputting GFF:\t\t\t' + str(self.output_gff) + '\n' if self.output_gff else '',
'Skip consensus:\t\t' + str(self.skip_consensus) + '\n' if self.skip_consensus else '',
'Skip memory management:\t\t' + str(self.skip_managed_memory) + '\n' if self.skip_managed_memory else '',
'Skip consensus expansion:\t' + str(self.no_consensus_expansion) + '\n' if self.no_consensus_expansion else '',
'Do not use taxonomy:\t' + str(self.use_taxonomy) + '\n' if not self.use_taxonomy else '',
'Skip text similarity analysis:\t' + str(self.no_unifunc) + '\n' if self.no_unifunc else '',
kegg_matrix_str,
'------------------------------------------']
return 'User configuration:' + '\n' + '------------------------------------------' + '\n' + ''.join(output_list)
def generate_fastas_to_annotate(self):
if '.' not in self.input_path:
kill_switch(InvalidTargetFile,'Your file does not have an extension, so Mantis can\'t detect the file format.\nPlease provide a valid target file',flush=True, file=self.redirect_verbose)
else:
if os.path.isdir(self.input_path):
self.annotate_directory()
elif self.input_path.endswith('.tsv'):
self.annotate_multiple_samples()
elif self.input_path.split('.')[-2] in ['tar']:
self.annotate_compressed_sample()
elif self.input_path.endswith('.gz') or self.input_path.endswith('.zip'):
self.annotate_compressed_sample()
elif is_fasta(self.input_path):
self.annotate_one_sample()
else:
kill_switch(InvalidTargetFile,'Your file does not appear to be a fasta. If you want to annotate multiple samples, make sure your file has the <.tsv> extension.',flush=True, file=self.redirect_verbose)
if not self.fastas_to_annotate: kill_switch(NoValidFiles)
for file_path, output_path, organism_details, genetic_code, count_seqs_original_file,count_residues_original_file in self.fastas_to_annotate:
Path(output_path).mkdir(parents=True, exist_ok=True)
def annotate_multiple_samples(self):
try:
with open(self.input_path) as file:
line = file.readline()
if SPLITTER not in line:
line = file.readline()
while line:
line = line.strip('\n').split('\t')
if len(line) >= 2:
query_name = line[0]
line_path = line[1]
if len(line) == 4:
genetic_code = ' '.join(line[-1])
organism_details = ' '.join(line[2:-1])
if organism_details=='None': organism_details=''
elif len(line)== 3:
organism_details = ' '.join(line[2:])
if organism_details=='None': organism_details=''
genetic_code=None
else:
organism_details = None
genetic_code = None
count_seqs_original_file = get_seqs_count(line_path)
count_residues_original_file = count_residues(line_path)
if os.path.exists(line_path):
self.fastas_to_annotate.append([line_path, add_slash(self.output_folder + query_name),
organism_details,genetic_code,
count_seqs_original_file,count_residues_original_file])
else:
kill_switch(TargetFileNotFound,
flush=True, file=self.redirect_verbose)
line = file.readline()
except:
kill_switch(InvalidTargetFile,'If you want to annotate multiple samples, make sure your file is correctly formatted. Please see the examples in the <tests> folder.',
flush=True, file=self.redirect_verbose)
def annotate_directory(self):
try:
list_dir = os.listdir(self.input_path)
for file in list_dir:
if 'faa' in file.split('.')[-1]:
query_name = '.'.join(file.split('.')[0:-1])
query_path = self.input_path + file
count_seqs_original_file = get_seqs_count(query_path)
count_residues_original_file = count_residues(query_path)
self.fastas_to_annotate.append([query_path, add_slash(self.output_folder + query_name), None,None,
count_seqs_original_file,count_residues_original_file])
except:
kill_switch(InvalidTargetFile,'Something went wrong when annotating the provided directory!',flush=True, file=self.redirect_verbose)
def annotate_compressed_sample(self):
try:
uncompressed_path = self.output_folder + 'uncompressed_samples/'
Path(uncompressed_path).mkdir(parents=True, exist_ok=True)
uncompressing_function = uncompress_archive(source_filepath=self.input_path,
extract_path=uncompressed_path)
list_dir = os.listdir(uncompressed_path)
for file in list_dir:
if os.path.isdir(uncompressed_path + file):
sub_list_dir = os.listdir(uncompressed_path + file)
for sub_file in sub_list_dir:
if 'faa' in sub_file.split('.')[-1]:
query_name = '.'.join(sub_file.split('.')[0:-1])
query_path = add_slash(uncompressed_path + file) + sub_file
count_seqs_original_file = get_seqs_count(query_path)
count_residues_original_file = count_residues(query_path)
self.fastas_to_annotate.append(
[query_path, add_slash(self.output_folder + query_name), None,None,
count_seqs_original_file,count_residues_original_file])
if 'faa' in file.split('.')[-1]:
query_name = '.'.join(file.split('.')[0:-1])
query_path = uncompressed_path + file
count_seqs_original_file = get_seqs_count(query_path)
count_residues_original_file = count_residues(query_path)
self.fastas_to_annotate.append(
[query_path, add_slash(self.output_folder + query_name), None, None,
count_seqs_original_file,count_residues_original_file])
except:
kill_switch(InvalidTargetFile,'Something went wrong when annotating the provided compressed file!',flush=True, file=self.redirect_verbose)
def annotate_one_sample(self):
count_seqs_original_file = get_seqs_count(self.input_path)
count_residues_original_file = count_residues(self.input_path)
self.fastas_to_annotate.append(
[self.input_path, self.output_folder, self.organism_details,self.genetic_code,
count_seqs_original_file,count_residues_original_file])
def setup_organism_lineage(self, organism_details, stdout_file):
if not self.use_taxonomy: return []
if not organism_details:
print_cyan('No data provided for organism lineage!', flush=True, file=stdout_file)
return []
if re.match('\d+', organism_details):
print_cyan('Setting up organism lineage from provided NCBI taxon id', flush=True, file=stdout_file)
organism_lineage = self.fetch_ncbi_lineage(organism_details)
else:
print_cyan('Setting up organism lineage from provided taxon synonym or GTDB lineage', flush=True, file=stdout_file)
organism_details_dict = {'synonyms': organism_details}
ncbi_taxon_id = self.get_taxa_ncbi(organism_details)
organism_lineage = self.fetch_ncbi_lineage(ncbi_taxon_id)
return organism_lineage
def generate_translated_sample(self):
ncbi_resources=add_slash(self.mantis_paths['resources']+'NCBI')
translation_tables = parse_translation_tables(ncbi_resources + 'gc.prt')
for i in range(len(self.fastas_to_annotate)):
file_path, output_path, organism_details,genetic_code, count_seqs_original_file , count_residues_original_file = self.fastas_to_annotate[i]
sample_type = check_sample_type(file_path)
if sample_type=='dna' or sample_type=='rna':
if not genetic_code:
genetic_code=11
translated_fasta_path = f'{output_path}translated_gc_{genetic_code}.fasta'
try:
write_translated_fasta(original_fasta_path=file_path, translated_fasta_path=translated_fasta_path, translation_table=translation_tables[genetic_code],sample_type=sample_type)
self.fastas_to_annotate[i][0] = translated_fasta_path
self.fastas_to_annotate[i][5] = count_residues(translated_fasta_path)
except Exception as e:
kill_switch(InvalidTranslation,file_path)
def generate_sample_lineage(self):
self.start_taxonomy_connection()
for i in range(len(self.fastas_to_annotate)):
file_path, output_path, organism_details,genetic_code, count_seqs_original_file,count_residues_original_file = self.fastas_to_annotate[i]
stdout_file = open(output_path + 'Mantis.out', 'a+')
organism_lineage = self.setup_organism_lineage(organism_details, stdout_file)
self.fastas_to_annotate[i][2] = organism_lineage
print('------------------------------------------', flush=True, file=stdout_file)
if organism_lineage:
print_cyan('Target file:\n' + file_path + '\n has the following taxonomy lineage: ' + ' > '.join(organism_lineage), flush=True, file=stdout_file)
else:
print_cyan('Target file:\n' + file_path + '\n has no organism lineage!', flush=True, file=stdout_file)
print('------------------------------------------', flush=True, file=stdout_file)
stdout_file.close()
| |
termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
if post_start is not None:
pulumi.set(__self__, "post_start", post_start)
if pre_stop is not None:
pulumi.set(__self__, "pre_stop", pre_stop)
@property
@pulumi.getter(name="postStart")
def post_start(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStart']:
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "post_start")
@property
@pulumi.getter(name="preStop")
def pre_stop(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStop']:
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
return pulumi.get(self, "pre_stop")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStart(dict):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
"""
def __init__(__self__, *,
exec_: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExec'] = None,
http_get: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGet'] = None,
tcp_socket: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocket'] = None):
"""
PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExecArgs' exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetArgs' http_get: HTTPGet specifies the http request to perform.
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocketArgs' tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExec']:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGet']:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocket']:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartExec(dict):
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
def __init__(__self__, *,
command: Optional[Sequence[str]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param Sequence[str] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[Sequence[str]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGet(dict):
"""
HTTPGet specifies the http request to perform.
"""
def __init__(__self__, *,
port: Any,
host: Optional[str] = None,
http_headers: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeaders']] = None,
path: Optional[str] = None,
scheme: Optional[str] = None):
"""
HTTPGet specifies the http request to perform.
:param Union[int, str] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param str host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param Sequence['SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeadersArgs'] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param str path: Path to access on the HTTP server.
:param str scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> Any:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeaders']]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def scheme(self) -> Optional[str]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartHttpGetHttpHeaders(dict):
"""
HTTPHeader describes a custom header to be used in HTTP probes
"""
def __init__(__self__, *,
name: str,
value: str):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param str name: The header field name
:param str value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
The header field name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
The header field value
"""
return pulumi.get(self, "value")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePostStartTcpSocket(dict):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
def __init__(__self__, *,
port: Any,
host: Optional[str] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param Union[int, str] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param str host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> Any:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecContainersLifecyclePreStop(dict):
"""
PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's | |
import os
import sys
import serial
class Autostar():
def __init__(self):
self.port = serial.Serial(
port='/dev/ttyAMA0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
# ACK - Alignment Query
def alignment_query(self):
self.port.write('0x06')
response = self.port.readline()
assert response in ['A', 'L', 'P']
return response
# ACK <0x06> Query of alignment mounting mode. Returns:
# A If scope in AltAz Mode / L If scope in Land Mode / P If scope in Polar Mode
# A - Alignment Commands
def align_auto(self):
self.port.write(':Aa#')
response = self.port.readline()
return response
# :Aa# Start Telescope Automatic Alignment Sequence [LX200GPS only] Returns:
# 1: When complete (can take several minutes). 0: If scope not AzEl Mounted or align fails
def align_land(self):
self.port.write(':AL#')
response = self.port.readline()
return response
# :AL# Sets telescope to Land alignment mode Returns: nothing
def align_polar(self):
self.port.write(':AP#')
response = self.port.readline()
return response
# :AP# Sets telescope to Polar alignment mode Returns: nothing
def align_altaz(self):
self.port.write(':AA#')
response = self.port.readline()
return response
# :AA# Sets telescope the AltAz alignment mode Returns: nothing
# $B – Active Backlash Compensation
def antibacklash_alt(self, value):
self.port.write(':$BA{:2d}#'.format(value))
response = self.port.readline()
return response
# :$BAdd#
# Set Altitude/Dec Antibacklash
# Returns Nothing
def antibacklash_az(self, value):
self.port.write(':$BZ{:2d}#'.format(value))
response = self.port.readline()
return response
# :$BZdd#
# Set Azimuth/RA Antibacklash
# Returns Nothing
# B - Reticule/Accessory Control
def reticule_brightness_inc(self):
self.port.write(':B+#')
response = self.port.readline()
return response
# :B+# Increase reticule Brightness Return: Nothing
def reticule_brightness_dec(self):
self.port.write(':B-#')
response = self.port.readline()
return response
# :B-# Decrease Reticule Brightness Return: Nothing
def set_reticule_flash_rate(self, value):
assert value in [0,1,2,3,4,5,6,7,8,9]
self.port.write(':B{:1d}#'.format(value))
response = self.port.readline()
return response
# :B<n># Set Reticle flash rate to <n> (an ASCII expressed number) <n> Values of 0..3 for LX200 series
# <n> Values of 0..9 for Autostar and LX200GPS Return: Nothing
def set_reticule_flash_cycle(self, value):
# self.port.write(':T+#')
response = self.port.readline()
return response
# :BDn# Set Reticule Duty flash duty cycle to <n> (an ASCII expressed digit) [LX200 GPS Only]
# <n> Values: 0 = On, 1..15 flash rate
# Return: Nothing
# C - Sync Control
def sync_selenographic(self):
self.port.write(':CL#')
response = self.port.readline()
return response
# :CL# Synchonize the telescope with the current Selenographic coordinates.
def sync_object(self):
self.port.write(':CM#')
response = self.port.readline()
return response
# :CM# Synchronizes the telescope's position with the currently selected database object's coordinates. Returns:
# LX200's - a "#" terminated string with the name of the object that was synced. Autostars & LX200GPS - At static string: " M31 EX GAL MAG 3.5 SZ178.0'#"
# D - Distnace Bars
def get_distance_bars(self):
self.port.write(':D#')
response = self.port.readline()
return response
# :D# Requests a string of bars indicating the distance to the current library object. Returns:
# LX200's – a string of bar characters indicating the distance.
# Autostars and LX200GPS – a string containing one bar until a slew is complete, then a null string is returned.
# f - Fan Command
def fan_on(self):
self.port.write(':f+#')
response = self.port.readline()
return response
# :f+#
# LX 16”– Turn on the tube exhaust fan
# LX200GPS – Turn on power to accessor panel
# Autostar & LX200 < 16” – Not Supported
# Returns: nothing
def fan_off(self):
self.port.write(':f-#')
response = self.port.readline()
return response
# :f-#
# LX 16”– Turn off tube exhaust fan
# LX200GPS - Turn off power to accessory panel
# Autostar & LX200 < 16” – Not Supported
# Returns: Nothing
def get_tube_temp(self):
self.port.write(':fT#')
response = self.port.readline()
return response
# :fT#
# LX200GPS – Return Optical Tube Assembly Temperature
# Returns <sdd.ddd># - a ‘#’ terminated signed ASCII real number indicating the Celsius ambient temperature.
# All others – Not supported
# F – Focuser Control
def focus_in(self):
self.port.write(':F+#')
response = self.port.readline()
return response
# :F+# Start Focuser moving inward (toward objective) Returns: None
def focus_out(self):
self.port.write(':F-#')
response = self.port.readline()
return response
# :F-# Start Focuser moving outward (away from objective) Returns: None
def focus_stop(self):
self.port.write(':FQ#')
response = self.port.readline()
return response
# :FQ# Halt Focuser Motion Returns: Notrhing
def set_focus_fast(self):
self.port.write(':FF#')
response = self.port.readline()
return response
# :FF# Set Focus speed to fastest setting Returns: Nothing
def set_fcous_slow(self):
self.port.write(':FS#')
response = self.port.readline()
return response
# :FS# Set Focus speed to slowest setting Returns: Nothing
def set_focus_speed(self, value):
assert value in [1,2,3,4]
self.port.write(':F{:1d}#'.format(value))
response = self.port.readline()
return response
# :F<n># Autostar & LX200GPS – set focuser speed to <n> where <n> is an ASCII digit 1..4 Returns: Nothing
# LX200 – Not Supported
# g – GPS/Magnetometer commands
# :g+# LX200GPS Only - Turn on GPS Returns: Nothing
# :g-# LX200GPS Only - Turn off GPS
# :gps# LX200GPS Only – Turns on NMEA GPS data stream.
# Returns: The next string from the GPS in standard NEMA format followed by a ‘#’ key
# :gT# Powers up the GPS and updates the system time from the GPS stream. The process my take several minutes to complete. During GPS update, normal handbox operations are interrupted. [LX200gps only]
# Returns: ‘0’ In the event that the user interrupts the process, or the GPS times out.
# Returns: ‘1’ After successful updates
# G – Get Telescope Information
def get_align0(self):
self.port.write(':G0#')
response = self.port.readline()
return response
# :G0# Get Alignment Menu Entry 0
# Returns: A ‘#’ Terminated ASCII string. [LX200 legacy command]
def get_align1(self):
self.port.write(':G1#')
response = self.port.readline()
return response
# :G1# Get Alignment Menu Entry 0
# Returns: A ‘#’ Terminated ASCII string. [LX200 legacy command]
def get_align2(self):
self.port.write(':G2#')
response = self.port.readline()
return response
# :G2# Get Alignment Menu Entry 0
# Returns: A ‘#’ Terminated ASCII string. [LX200 legacy command]
def get_tel_alt(self):
self.port.write(':GA#')
response = self.port.readline()
return response
# :GA# Get Telescope Altitude
# Returns: sDD*MM# or sDD*MM’SS#
# The current scope altitude. The returned format depending on the current precision setting.
def get_lt12(self):
self.port.write(':Ga#')
response = self.port.readline()
return response
# :Ga# Get Local Telescope Time In 12 Hour Format Returns: HH:MM:SS#
# The time in 12 format
def get_mag_bright_lim(self):
self.port.write(':Gb#')
response = self.port.readline()
return response
# :Gb# Get Browse Brighter Magnitude Limit Returns: sMM.M#
# The magnitude of the faintest object to be returned from the telescope FIND/BROWSE command. Command when searching for objects in the Deep Sky database.
def get_date(self):
self.port.write(':GC#')
response = self.port.readline()
return response
# :GC# Get current date. Returns: MM/DD/YY#
# The current local calendar date for the telescope.
def get_cal_format(self):
self.port.write(':Gc#')
response = self.port.readline()
return response
# :Gc# Get Calendar Format Returns: 12# or 24#
# Depending on the current telescope format setting.
def get_telescope_dec(self):
self.port.write(':GD#')
response = self.port.readline()
return response
# :GD# Get Telescope Declination.
# Returns: sDD*MM# or sDD*MM’SS#
# Depending upon the current precision setting for the telescope.
def get_obj_dec(self):
self.port.write(':Gd#')
response = self.port.readline()
return response
# :Gd# Get Currently Selected Object/Target Declination Returns: sDD*MM# or sDD*MM’SS#
# Depending upon the current precision setting for the telescope.
def get_field_diameter(self):
self.port.write(':GF#')
response = self.port.readline()
return response
# :GF# Get Find Field Diameter Returns: NNN#
# An ASCIi interger expressing the diameter of the field search used in the IDENTIFY/FIND commands.
def get_mag_faint_lim(self):
self.port.write(':Gf#')
response = self.port.readline()
return response
# :Gf# Get Browse Faint Magnitude Limit Returns: sMM.M#
# The magnitude or the birghtest object to be returned from the telescope FIND/BROWSE command.
def get_utc_offset(self):
self.port.write(':GG#')
response = self.port.readline()
return response
# :GG# Get UTC offset time Returns: sHH# or sHH.H#
# The number of decimal hours to add to local time to convert it to UTC. If the number is a whole number the sHH# form is returned, otherwise the longer form is return. On Autostar and LX200GPS, the daylight savings setting in effect is factored into returned value.
| |
'action': action,
'global': global_,
'metered': metered,
'resource_group': resource_group,
'updates': updates
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/gateways/{0}/actions'.format(
*self.encode_path_vars(id))
request = self.prepare_request(method='POST',
url=url,
headers=headers,
params=params,
data=data)
response = self.send(request)
return response
def list_gateway_completion_notice(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Get completion notice.
Retrieve a Direct Link Dedicated gateway's completion notice.
:param str id: Direct Link Dedicated gateway identifier.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `BinaryIO` result
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_gateway_completion_notice')
headers.update(sdk_headers)
params = {
'version': self.version
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/gateways/{0}/completion_notice'.format(
*self.encode_path_vars(id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_gateway_completion_notice(self,
id: str,
*,
upload: BinaryIO = None,
upload_content_type: str = None,
**kwargs
) -> DetailedResponse:
"""
Create completion notice.
Upload a Direct Link Dedicated gateway completion notice.
:param str id: Direct Link Dedicated gateway identifier.
:param BinaryIO upload: (optional) Completion notice PDF file.
:param str upload_content_type: (optional) The content type of upload.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_gateway_completion_notice')
headers.update(sdk_headers)
params = {
'version': self.version
}
form_data = []
if upload:
form_data.append(('upload', (None, upload, upload_content_type or 'application/octet-stream')))
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/gateways/{0}/completion_notice'.format(
*self.encode_path_vars(id))
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
params=params,
files=form_data)
response = self.send(request)
return response
def list_gateway_letter_of_authorization(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Get letter of authorization.
Retrieve a Direct Link Dedicated gateway's Letter of Authorization.
:param str id: Direct Link Dedicated gateway identifier.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `BinaryIO` result
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_gateway_letter_of_authorization')
headers.update(sdk_headers)
params = {
'version': self.version
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/gateways/{0}/letter_of_authorization'.format(
*self.encode_path_vars(id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# offeringInformation
#########################
def list_offering_type_locations(self,
offering_type: str,
**kwargs
) -> DetailedResponse:
"""
List available locations.
Retrieve the list of valid locations for the specified Direct Link offering.
:param str offering_type: The Direct Link offering type. Current supported
values are `"dedicated"` and `"connect"`.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `LocationCollection` object
"""
if offering_type is None:
raise ValueError('offering_type must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_offering_type_locations')
headers.update(sdk_headers)
params = {
'version': self.version
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/offering_types/{0}/locations'.format(
*self.encode_path_vars(offering_type))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def list_offering_type_location_cross_connect_routers(self,
offering_type: str,
location_name: str,
**kwargs
) -> DetailedResponse:
"""
List routers.
Retrieve location specific cross connect router information. Only valid for
offering_type=dedicated locations.
:param str offering_type: The Direct Link offering type. Only value
`"dedicated"` is supported for this API.
:param str location_name: The name of the Direct Link location.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `LocationCrossConnectRouterCollection` object
"""
if offering_type is None:
raise ValueError('offering_type must be provided')
if location_name is None:
raise ValueError('location_name must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_offering_type_location_cross_connect_routers')
headers.update(sdk_headers)
params = {
'version': self.version
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/offering_types/{0}/locations/{1}/cross_connect_routers'.format(
*self.encode_path_vars(offering_type, location_name))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def list_offering_type_speeds(self,
offering_type: str,
**kwargs
) -> DetailedResponse:
"""
List speed options.
List the available Direct Link speeds.
:param str offering_type: The Direct Link offering type. Current supported
values are `"dedicated"` and `"connect"`.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `OfferingSpeedCollection` object
"""
if offering_type is None:
raise ValueError('offering_type must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_offering_type_speeds')
headers.update(sdk_headers)
params = {
'version': self.version
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/offering_types/{0}/speeds'.format(
*self.encode_path_vars(offering_type))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# ports
#########################
def list_ports(self,
*,
start: str = None,
limit: int = None,
location_name: str = None,
**kwargs
) -> DetailedResponse:
"""
List ports.
Retrieve list of available Direct Link connect ports. These ports can be used to
create Direct Link connect gateways.
:param str start: (optional) A server-supplied token determining which
resource to start the page on.
:param int limit: (optional) The number of resources to return on a page.
:param str location_name: (optional) Direct Link location short name.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `PortCollection` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_ports')
headers.update(sdk_headers)
params = {
'version': self.version,
'start': start,
'limit': limit,
'location_name': location_name
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/ports'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_port(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Get port.
Retrieve Direct Link Connect provider port.
:param str id: The port identifier.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `Port` object
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_port')
headers.update(sdk_headers)
params = {
'version': self.version
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/ports/{0}'.format(
*self.encode_path_vars(id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
#########################
# virtualConnections
#########################
def list_gateway_virtual_connections(self,
gateway_id: str,
**kwargs
) -> DetailedResponse:
"""
List virtual connections.
List a gateway's virtual connections. For gateway in other account with virtual
connections that connect to network in this account. Only virtual connections
that connect to this account are returned.
:param str gateway_id: Direct Link gateway identifier.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `GatewayVirtualConnectionCollection` object
"""
if gateway_id is None:
raise ValueError('gateway_id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_gateway_virtual_connections')
headers.update(sdk_headers)
params = {
'version': self.version
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
url = '/gateways/{0}/virtual_connections'.format(
*self.encode_path_vars(gateway_id))
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_gateway_virtual_connection(self,
gateway_id: str,
name: str,
type: str,
*,
network_id: str = None,
**kwargs
) -> DetailedResponse:
"""
Create virtual connection.
Create a virtual connection to the specified network.
:param str gateway_id: Direct Link gateway identifier.
:param str name: The user-defined name for this virtual connection.
Virtual connection names are unique within a gateway. This is the name of
the virtual connection itself, the network being connected may have its own
name attribute.
:param str type: The type of virtual connection.
:param str network_id: (optional) Unique identifier of the target network.
For type=vpc virtual connections this is the CRN of the target VPC. This
field does not apply to type=classic connections.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `GatewayVirtualConnection` object
"""
if gateway_id is None:
raise ValueError('gateway_id must be provided')
if name is None:
raise ValueError('name must be provided')
if type is | |
"▁Bun": 7672,
"▁Missouri": 7673,
"▁Four": 7674,
"▁UV": 7675,
"▁SO": 7676,
"▁Zlatan": 7677,
"▁Larry": 7678,
"if": 7679,
"vor": 7680,
"XU": 7681,
"▁Security": 7682,
"-80": 7683,
"▁Baker": 7684,
"/2000": 7685,
"▁Alfred": 7686,
"▁Bug": 7687,
"ios": 7688,
"BL": 7689,
"board": 7690,
"yp": 7691,
"▁Kings": 7692,
"tico": 7693,
"▁British": 7694,
"650": 7695,
"▁mafi": 7696,
"KL": 7697,
"plo": 7698,
"▁30-40": 7699,
"yz": 7700,
"▁Quin": 7701,
"ike": 7702,
"ks": 7703,
"▁Toni": 7704,
"▁Gui": 7705,
"▁boot": 7706,
"▁Blatt": 7707,
"▁Santiago": 7708,
"▁Schu": 7709,
"MI": 7710,
"▁Vision": 7711,
"glo": 7712,
"▁Everest": 7713,
"▁anti": 7714,
"7,9": 7715,
"and": 7716,
"VO": 7717,
"888": 7718,
"▁1934": 7719,
"▁1-4": 7720,
"rid": 7721,
"▁Guatemala": 7722,
"/2003": 7723,
"▁Julie": 7724,
"▁Case": 7725,
"mel": 7726,
"▁Fat": 7727,
"▁Bis": 7728,
"istan": 7729,
"▁Manhattan": 7730,
"▁Range": 7731,
"▁MSN": 7732,
"▁Avengers": 7733,
"▁Pal": 7734,
"ase": 7735,
"▁1935": 7736,
"▁Hell": 7737,
"▁Hilton": 7738,
"́": 7739,
"▁Haut": 7740,
"▁Craig": 7741,
"▁nit": 7742,
"▁XD": 7743,
"heng": 7744,
"bre": 7745,
"20%": 7746,
"any": 7747,
"▁Albania": 7748,
"▁Fans": 7749,
"▁Indiana": 7750,
"▁Dom": 7751,
"▁Data": 7752,
"8,7": 7753,
"737": 7754,
"▁Silicon": 7755,
"port": 7756,
"▁Kel": 7757,
"ent": 7758,
"▁Rosa": 7759,
"rata": 7760,
"▁Spec": 7761,
"▁Award": 7762,
"▁Bosni": 7763,
"▁950": 7764,
"̣": 7765,
"kra": 7766,
"▁Gmail": 7767,
"▁testosteron": 7768,
"▁UFO": 7769,
"ksin": 7770,
"▁Hussein": 7771,
"▁Ob": 7772,
"▁Pala": 7773,
"quo": 7774,
"▁studio": 7775,
"▁Brook": 7776,
"chá": 7777,
"atti": 7778,
"COM": 7779,
"▁Style": 7780,
"▁Cin": 7781,
"▁Levi": 7782,
"▁Wikipedia": 7783,
"▁Skype": 7784,
"▁Oliver": 7785,
"▁Graham": 7786,
"▁sê": 7787,
"▁coca": 7788,
"hydrat": 7789,
"▁Turk": 7790,
"▁Kawasaki": 7791,
"▁Wy": 7792,
"bull": 7793,
"▁RO": 7794,
"▁nè": 7795,
"▁caro": 7796,
"▁English": 7797,
"▁Lip": 7798,
"feld": 7799,
"set": 7800,
"▁test": 7801,
"▁Vie": 7802,
"ela": 7803,
"bis": 7804,
"▁AR": 7805,
"quin": 7806,
"bom": 7807,
"▁Explorer": 7808,
"ij": 7809,
"▁Rossi": 7810,
"▁Lenin": 7811,
"▁Petr": 7812,
"our": 7813,
"▁Hydro": 7814,
"▁Wit": 7815,
"ean": 7816,
"▁(*)": 7817,
"▁March": 7818,
"TI": 7819,
"imi": 7820,
"▁143": 7821,
"2001": 7822,
"▁LP": 7823,
"▁Ong": 7824,
"▁2014)": 7825,
"▁ac": 7826,
"REA": 7827,
"▁Bir": 7828,
"tner": 7829,
"▁Jer": 7830,
"World": 7831,
"Ự": 7832,
"▁Hand": 7833,
"▁XP": 7834,
"▁2015-2016": 7835,
"▁1932": 7836,
"get": 7837,
"▁Rachel": 7838,
"▁Muller": 7839,
"▁sul": 7840,
"ib": 7841,
"▁Vidal": 7842,
"▁Foundation": 7843,
"▁Miranda": 7844,
"▁IN": 7845,
"My": 7846,
"▁153": 7847,
"TO": 7848,
"quez": 7849,
"▁click": 7850,
"put": 7851,
"bet": 7852,
"agan": 7853,
"▁menu": 7854,
"sama": 7855,
"onal": 7856,
"▁br": 7857,
"▁Laura": 7858,
"▁Bit": 7859,
"ance": 7860,
"▁Loe": 7861,
"uti": 7862,
"fera": 7863,
"▁Doha": 7864,
"▁310": 7865,
"Malaysia": 7866,
"▁Sean": 7867,
"▁Cross": 7868,
"-36": 7869,
"▁Challenge": 7870,
"▁Dark": 7871,
"▁Sheikh": 7872,
"▁212": 7873,
"▁147": 7874,
"▁Nathan": 7875,
"▁Mohammad": 7876,
"-2007": 7877,
"war": 7878,
"▁Masa": 7879,
"▁from": 7880,
"wr": 7881,
"\\": 7882,
"▁NY": 7883,
"dl": 7884,
"▁Novo": 7885,
"NCH": 7886,
"▁Heart": 7887,
"kel": 7888,
"lever": 7889,
"960": 7890,
"▁Ché": 7891,
"▁Happy": 7892,
"nak": 7893,
"etta": 7894,
"Pad": 7895,
"▁1933": 7896,
"DG": 7897,
"▁block": 7898,
"▁Lisbon": 7899,
"ee": 7900,
"▁1931": 7901,
"▁Premium": 7902,
"▁NC": 7903,
"-70": 7904,
"▁Eugen": 7905,
"shu": 7906,
"000": 7907,
"wick": 7908,
"▁Dio": 7909,
"LL": 7910,
"hel": 7911,
"▁sofa": 7912,
"▁Start": 7913,
"▁4.2": 7914,
"▁9-10": 7915,
"▁Fiorentina": 7916,
"▁Chip": 7917,
"lai": 7918,
"YP": 7919,
"▁See": 7920,
"eu": 7921,
"▁Sne": 7922,
"CI": 7923,
"esc": 7924,
"ball": 7925,
"▁Scan": 7926,
"▁Made": 7927,
"1995": 7928,
"Philip": 7929,
"▁FM": 7930,
"680": 7931,
"▁mail": 7932,
"zur": 7933,
"▁33%": 7934,
"▁Indo": 7935,
"▁Glo": 7936,
"▁158": 7937,
"▁Bul": 7938,
"▁Hamilton": 7939,
"▁8.1": 7940,
"▁Abbas": 7941,
"9,6": 7942,
"khar": 7943,
"▁Campbell": 7944,
"▁Dis": 7945,
"▁Wong": 7946,
"▁System": 7947,
"125": 7948,
"▁Sharp": 7949,
"▁162": 7950,
"break": 7951,
"▁Hoo": 7952,
"▁Alexandr": 7953,
"▁148": 7954,
"Cola": 7955,
"▁CL": 7956,
"▁Nicole": 7957,
"▁Fra": 7958,
"▁IQ": 7959,
"ậ": 7960,
"▁6-8": 7961,
"▁Hunt": 7962,
"▁Anth": 7963,
"▁(3)": 7964,
"Book": 7965,
"▁OP": 7966,
"▁Raj": 7967,
"▁iTunes": 7968,
"▁State": 7969,
"sar": 7970,
"▁off": 7971,
"▁Brooklyn": 7972,
"ambia": 7973,
"life": 7974,
"▁Berlusconi": 7975,
"▁Fort": 7976,
"▁Lou": 7977,
"Ợ": 7978,
"▁Charlotte": 7979,
"▁Plan": 7980,
"▁Linux": 7981,
"Ju": 7982,
"wol": 7983,
"2003": 7984,
"Mac": 7985,
"▁Bros": 7986,
"▁Sierra": 7987,
"379": 7988,
"▁Roberts": 7989,
"▁Sanders": 7990,
"▁Kabul": 7991,
"▁Aires": 7992,
"▁Horn": 7993,
"GG": 7994,
"▁Pier": 7995,
"▁Helen": 7996,
"kka": 7997,
"KK": 7998,
"ctus": 7999,
"nick": 8000,
"▁Champ": 8001,
"▁Valde": 8002,
"ión": 8003,
"Tam": 8004,
"ctic": 8005,
"▁Sak": 8006,
"tham": 8007,
"chel": 8008,
"▁Renault": 8009,
"▁par": 8010,
"hop": 8011,
"Tri": 8012,
"▁DM": 8013,
"chter": 8014,
"▁520": 8015,
"▁Stre": 8016,
"▁Sali": 8017,
"▁balla": 8018,
"▁oli": 8019,
"▁Rhy": 8020,
"11)": 8021,
"koha": 8022,
"Ò": 8023,
"▁Matteo": 8024,
"ech": 8025,
"▁Brit": 8026,
"pula": 8027,
"▁174": 8028,
"▁concert": 8029,
"▁Pot": 8030,
"thro": 8031,
"▁178": 8032,
"WE": 8033,
"▁430": 8034,
"▁Panasonic": 8035,
"▁Guy": 8036,
"▁Vor": 8037,
"▁Tara": 8038,
"▁Test": 8039,
"▁Hard": 8040,
"▁Sp": 8041,
"▁36.": 8042,
"▁Parker": 8043,
"▁How": 8044,
"ett": 8045,
"▁Nelson": 8046,
"▁Band": 8047,
"Fa": 8048,
"▁teaser": 8049,
"▁XV": 8050,
"▁Make": 8051,
"mit": 8052,
"Cycl": 8053,
"▁Mog": 8054,
"▁Gear": 8055,
"10)": 8056,
"▁hip": 8057,
"▁Dou": 8058,
"-150": 8059,
"▁PP": 8060,
"▁Hindu": 8061,
"zen": 8062,
"▁Kay": 8063,
"News": 8064,
"▁1917": 8065,
"▁Britney": 8066,
"▁Tah": 8067,
"+3": 8068,
"▁Bag": 8069,
"▁10-12": 8070,
"cat": 8071,
"oni": 8072,
"▁Pak": 8073,
"▁Design": 8074,
"AF": 8075,
"watch": 8076,
"▁Vic": 8077,
"2000": 8078,
"VAS": 8079,
"▁Kid": 8080,
"▁198": 8081,
"xia": 8082,
"▁Demi": 8083,
"▁gluco": 8084,
"▁corti": 8085,
"dine": 8086,
"▁KV": 8087,
"▁code": 8088,
"cán": 8089,
"▁Mode": 8090,
"▁Ag": 8091,
"▁pH": 8092,
"▁CBS": 8093,
"DR": 8094,
"▁Jam": 8095,
"Land": 8096,
"▁Muhammad": 8097,
"world": 8098,
"▁1925": 8099,
"hol": 8100,
"tura": 8101,
"▁Sto": 8102,
"760": 8103,
"▁Aaron": 8104,
"Par": 8105,
"asia": 8106,
"▁Rin": 8107,
"▁Sach": 8108,
"550": 8109,
"▁Buenos": 8110,
"od": 8111,
"▁Dic": 8112,
"▁1912": 8113,
"▁34.": 8114,
"hman": 8115,
"eva": 8116,
"dd": 8117,
"pot": 8118,
"▁Orlando": 8119,
"sland": 8120,
"▁Point": 8121,
"nôm": 8122,
"ets": 8123,
"264": 8124,
"▁Gerard": 8125,
"▁Limited": 8126,
"▁Epi": 8127,
"gin": 8128,
"-1)": 8129,
"▁Share": 8130,
"▁Wisconsin": 8131,
"rak": 8132,
"▁DK": 8133,
"▁Control": 8134,
"▁Sound": 8135,
"▁smart": 8136,
"vada": 8137,
"inka": 8138,
"▁Chun": 8139,
"▁Philipp": 8140,
"▁Gha": 8141,
"▁Scu": 8142,
"▁Pink": 8143,
"▁Lei": 8144,
"/01": 8145,
"▁Lung": 8146,
"▁Nice": 8147,
"▁cup": 8148,
"card": 8149,
"chip": 8150,
"enne": 8151,
"ison": 8152,
"1993": 8153,
"▁Interes": 8154,
"El": 8155,
"cket": 8156,
"▁Rak": 8157,
"kes": 8158,
"/09/": 8159,
"anum": 8160,
"tv": 8161,
"Ge": 8162,
"QQ": 8163,
"▁1.1": 8164,
"bs": 8165,
"far": 8166,
"▁149": 8167,
"▁Eye": 8168,
"▁1914": 8169,
"▁Falk": 8170,
"▁nylon": 8171,
"lé": 8172,
"/18": 8173,
"▁Knight": 8174,
"▁Lata": 8175,
"TY": 8176,
"▁Lion": 8177,
"▁Wann": 8178,
"▁Main": 8179,
"illy": 8180,
"1996": 8181,
"cut": 8182,
"▁Pia": 8183,
"▁BM": 8184,
"101": 8185,
"▁Seven": 8186,
"lee": 8187,
"▁Aqua": 8188,
"▁Peugeot": 8189,
"▁Corporation": 8190,
"▁Wind": 8191,
"–": 8192,
"▁Now": 8193,
"▁Report": 8194,
"kira": 8195,
"▁AM": 8196,
"▁box": 8197,
"9,9": 8198,
"Men": 8199,
"9,8": 8200,
"▁Spur": 8201,
"▁Papua": 8202,
"AA": 8203,
"▁159": 8204,
"ties": 8205,
"▁Sou": 8206,
"▁Karim": 8207,
"▁Stockholm": 8208,
"Cloud": 8209,
"▁Nintendo": 8210,
"▁gel": 8211,
"▁Hil": 8212,
"▁1922": 8213,
"▁Xbox": 8214,
"▁microSD": 8215,
"ow": 8216,
"1,7": 8217,
"▁Drive": 8218,
"▁Raja": 8219,
"icum": 8220,
"▁Catherine": 8221,
"▁comment": 8222,
"▁Sil": 8223,
"kura": 8224,
"▁Are": 8225,
"▁Hongkong": 8226,
"/07/": 8227,
"▁171": 8228,
"ding": 8229,
"▁Av": 8230,
"dom": 8231,
"▁Xiao": 8232,
"tron": 8233,
"hard": 8234,
"isha": 8235,
"NET": 8236,
"▁Vel": 8237,
"▁Rein": 8238,
"NJ": 8239,
"▁KP": 8240,
"▁Block": 8241,
"yle": 8242,
"rida": 8243,
"cant": 8244,
"llus": 8245,
"▁toilet": 8246,
"▁2012)": 8247,
"▁460": 8248,
"▁Sebastian": 8249,
"Cor": 8250,
"▁Spider": 8251,
"▁1500": 8252,
"ISA": 8253,
"▁Lima": 8254,
"▁1800": 8255,
"BRE": 8256,
"▁ne": 8257,
"▁Hend": 8258,
"▁Research": 8259,
"▁Visa": 8260,
"fol": 8261,
"bil": 8262,
"illo": 8263,
"▁Mans": 8264,
"1,8": 8265,
"▁Final": 8266,
"▁Joan": 8267,
"▁Haram": 8268,
"▁Ele": 8269,
"▁love": 8270,
"▁1918": 8271,
"dra": 8272,
"▁Hid": 8273,
"zin": 8274,
"CW": 8275,
"▁1928": 8276,
"hur": 8277,
"▁Cr": 8278,
"▁Brother": 8279,
"▁Nano": 8280,
"dow": 8281,
"vel": 8282,
"▁MI": 8283,
"ppa": 8284,
"▁oz": 8285,
"lá": 8286,
"▁Bry": 8287,
"polis": 8288,
"ym": 8289,
"▁Casa": 8290,
"▁Kati": 8291,
"▁Malta": 8292,
"mino": 8293,
"ddin": 8294,
"670": 8295,
"▁Pru": 8296,
"▁Technology": 8297,
"▁Fir": 8298,
"kova": 8299,
"cast": 8300,
"▁McDonald": 8301,
"▁blue": 8302,
"dale": 8303,
"bike": 8304,
"sine": 8305,
"7.0": 8306,
"eko": 8307,
"▁Ol": 8308,
"-33": 8309,
"▁Grace": 8310,
"▁Birmingham": 8311,
| |
import copy
import logging
import random
logging.getLogger('transformers').setLevel(level=logging.WARNING)
from nltk.tokenize import sent_tokenize
import torch
import tqdm
from transformers import BertForMaskedLM, BertTokenizer, AdamW, get_linear_schedule_with_warmup
from utils import (
BertInput,
Defaults,
batch_data,
mask_tokens_evenly,
mask_tokens_randomly,
get_input_tensors,
determine_correctness,
measure_relative,
measure_improve,
clean_text,
truncate_list_of_lists,
truncate_sentence_and_summary,
NOT_MASKED,
TOKEN_TYPE_A,
LABEL_IGNORE,
BERT_MAX_TOKENS,
P_TOKEN_REPLACE,
P_TOKEN_ORIGINAL,
TOKEN_REPLACE_RANGE,
)
class Blanc:
"""An abstract superclass containing shared functionality between BlancHelp and BlancTune.
measure ('relative' or 'improve') is a choice of how the success of inference is measured.
Add '-counts' to return also counts: 'relative-counts' or 'improve-counts'.
"""
def __init__(
self,
gap=Defaults.gap,
base=Defaults.base,
model_name=Defaults.model_name,
measure=Defaults.measure,
min_token_length_normal=Defaults.min_token_length_normal,
min_token_length_lead=Defaults.min_token_length_lead,
min_token_length_followup=Defaults.min_token_length_followup,
device=Defaults.device,
inference_batch_size=Defaults.inference_batch_size,
inference_mask_evenly=Defaults.inference_mask_evenly,
len_sent_allow_cut=Defaults.len_sent_allow_cut,
):
"""This class should not be instantiated directly: instead use BlancHelp or BlancTune"""
self.gap = gap
self.base = base
self.model_name = model_name
self.measure = measure
self.min_token_length_normal = min_token_length_normal
self.min_token_length_lead = min_token_length_lead
self.min_token_length_followup = min_token_length_followup
self.device = device
self.inference_batch_size = inference_batch_size
self.inference_mask_evenly = inference_mask_evenly
self.len_sent_allow_cut = len_sent_allow_cut
self.model_tokenizer = BertTokenizer.from_pretrained(model_name)
def eval_once(self, doc, summary):
"""Calculate the BLANC score for a single doc with a single summary.
Args:
doc (str): The input document
summary (str): The input summary for the input document
Returns:
score (float): The BLANC score for the input
"""
(doc_score, total_unks) = self.eval_summaries_for_docs([doc], [[summary]])
(score,) = doc_score
return score, total_unks
def eval_pairs(self, docs, summaries):
"""Calculate the BLANC score for multiple docs, each with a single summary
Args:
docs (List[str]): A list of input documents
summaries (List[str]): The input summary for each input document
Returns:
scores (List[float]): The BLANC scores for the inputs
"""
doc_summaries = [[summary] for summary in summaries]
full_scores, total_unks = self.eval_summaries_for_docs(docs, doc_summaries)
scores = [score for score, in full_scores]
return scores, total_unks
def eval_summaries_for_docs(self, docs, doc_summaries):
"""Calculate the BLANC score for multiple docs, each with multiple summaries
Args:
docs (List[str]): A list of input documents
doc_summaries (List[List[str]]): A list of summaries for every input document
Returns:
scores (List[List[float]]): A list of blanc scores corresponding to each summary for
each document
"""
raise NotImplementedError()
def get_inputs_for_sentence(self, sent_tokens, summary_tokens):
"""Used by subclasses to specify inference inputs corresponding to a sentence
Args:
sent_tokens (List[str]): list of tokens corresponding to sentence
summary_tokens (List[str]): list of tokens corresponding to a summary
sep (List[str]): List of tokens corresponding to a separator between summary and sentence
Returns:
inputs (List[BertInput]): a list of masked token inputs to BERT
answers (List[Dict[int, str]]): a list of "answer" dicts, where each answer dict maps
token indices corresponding to masked tokens back to their original token.
"""
raise NotImplementedError()
def mask_and_infer(self, model, docs, doc_summaries, loading_bar=True, sep=None):
"""Run the given model on masked versions of the provided doc_summaries and collect model
output
Args:
model (BertForMaskedLM): a BERT for masked language modeling torch model
docs (List[str]): A list of input documents
doc_summaries (List[List[str]]): A list of summaries for every input document
loading_bar (bool): whether or not to use a tqdm loading bar to show progress
sep (str): Separator between the inference help (summary) and a sentence from the doc
Returns:
all_outputs (List[List[List[Dict[int, str]]]]): for each doc, for each summary for the
doc, for each input sequence for the summary, we have a dict mapping indices to
model predictions
all_answers (List[List[List[Dict[int, str]]]]): for each doc, for each summary for the
doc, for each input sequence for the summary, we have a dict mapping indices to
original tokens
"""
# Prepare inputs
all_inputs, all_answers = [], []
for doc, summaries in zip(docs, doc_summaries):
doc_inputs, doc_answers = [], []
for summary in summaries:
summary_inputs, summary_answers = self.get_inference_inputs(doc, summary, sep)
doc_inputs.append(summary_inputs)
doc_answers.append(summary_answers)
all_inputs.append(doc_inputs)
all_answers.append(doc_answers)
# Run inference in batches
inputs_per_summary_per_doc = [
[len(inputs) for inputs in summary_input] for summary_input in all_inputs
]
collapsed_inputs = sum(sum(all_inputs, []), [])
batched_inputs = batch_data(collapsed_inputs, self.inference_batch_size)
iterator = tqdm.tqdm(batched_inputs, disable=not loading_bar)
batched_outputs = [self.run_inference_batch(model, batch) for batch in iterator]
collapsed_outputs = sum(batched_outputs, [])
# Regroup outputs
i = 0
all_outputs = []
for inputs_per_summary in inputs_per_summary_per_doc:
doc_outputs = []
for num_inputs in inputs_per_summary:
doc_outputs.append(collapsed_outputs[i : i + num_inputs])
i += num_inputs
all_outputs.append(doc_outputs)
return all_outputs, all_answers
def get_inference_inputs(self, doc, summary=None, sep=None):
"""Get the inference inputs for a document, which possibly includes a summary
Args:
doc (str): an input document
summary (str): an optional input summary
sep (str): Separator between the inference help (summary) and a sentence from the doc
Returns:
summary_inputs (List[BertInput]): a list of BertInputs for inference
summary_answers (List[Dict[int, str]]): each dict maps token indices back to their
original token
"""
doc = clean_text(doc)
doc_sents = sent_tokenize(doc)
doc_sent_tokens = [self.model_tokenizer.tokenize(sent) for sent in doc_sents]
summary_sent_tokens = None
if summary:
summary = clean_text(summary)
summary_sents = sent_tokenize(summary)
summary_sent_tokens = [self.model_tokenizer.tokenize(sent) for sent in summary_sents]
if not summary_sent_tokens:
summary_sent_tokens = [[]]
len_sep = 0
if sep:
len_sep = len(sep)
summary_inputs, summary_answers = [], []
half_num_sents = len(doc_sent_tokens)
truncate_bottom = True
for i_sent, sent_tokens in enumerate(doc_sent_tokens):
if i_sent > half_num_sents:
truncate_bottom = False
sent_tokens, summary_tokens = truncate_sentence_and_summary(
sent=sent_tokens,
summary=summary_sent_tokens,
len_sep=len_sep,
len_sent_allow_cut=self.len_sent_allow_cut,
truncate_bottom=truncate_bottom,
)
# now it is assured that everything fits into the allowed input size:
assert len(sent_tokens) + len(summary_tokens) + len_sep + 2 <= BERT_MAX_TOKENS
inputs, answers = self.get_inputs_for_sentence(sent_tokens, summary_tokens)
summary_inputs += inputs
summary_answers += answers
return summary_inputs, summary_answers
def assemble_inference_input(self, answers, sent_tokens, help_tokens=None, help_sep=None):
"""Given input tokens, assemble them into the tensors used by the model for inference
Args:
answers (Dict[int, str]): a mapping of input token indices to their original value
sent_tokens (List[str]): tokens corresponding to an input sentence
help_tokens (List[str]): tokens corresponding to an input summary or filler
help_sep (List[str]): tokens to put between the summary/filler and the sentence
Returns:
model_input (BertInput): an input to the BERT model
shifted_answers (Dict[int, str]): the input answers but with shifted indices that take
into account the summary/filler and starting CLS token
Raises:
ValueError: if the sentence itself is longer than the BERT_MAX_TOKENS limit, we raise
this error as opposed to truncating the sentence
"""
if not help_tokens:
help_tokens = []
if not help_sep:
help_sep = []
all_tokens = (
[self.model_tokenizer.cls_token]
+ help_tokens
+ help_sep
+ sent_tokens
+ [self.model_tokenizer.sep_token]
)
input_ids = self.model_tokenizer.convert_tokens_to_ids(all_tokens)
token_type_ids = [TOKEN_TYPE_A] * len(all_tokens)
attention_mask = [NOT_MASKED] * len(all_tokens)
offset = 1 + len(help_tokens) + len(help_sep)
shifted_answers = {}
for idx, token in answers.items():
shifted_answers[idx + offset] = token
model_input = BertInput(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
labels=None,
masked_idxs=list(shifted_answers.keys()),
)
return model_input, shifted_answers
def run_inference_batch(self, model, batch):
"""Run an inference batch through the provided model
Args:
model (BertForMaskedLM): a BERT for masked language modeling torch model
batch (List[BertInput]): the input batch to run through the model
Returns:
all_predictions (List[Dict[int, str]]): predicted tokens for every masked token in
the inputs
"""
input_ids, attention_mask, token_type_ids, _ = get_input_tensors(
batch, device=self.device, tokenizer=self.model_tokenizer,
)
with torch.no_grad():
(model_output_batch,) = model(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
)
all_predictions = []
for model_input, model_output in zip(batch, model_output_batch):
predictions = {}
for idx in model_input.masked_idxs:
predicted_id = model_output[idx].argmax()
(predicted_token,) = self.model_tokenizer.convert_ids_to_tokens([predicted_id])
predictions[idx] = predicted_token
all_predictions.append(predictions)
return all_predictions
def mask_input_tokens(self, tokens, is_finetune):
"""Given a list of tokens, produce maskings for them
Args:
tokens (List[str]): a sequence of wordpiece tokens
is_finetune (bool): whether or not these tokens are going to be used for finetuning
Returns:
masked_inputs (List[List[str]]): a list of token sequences, where each token sequence
contains some masked tokens.
all_answers (List[Dict[int, str]]): a list of "answer" dicts, where each answer dict maps
token indices corresponding to masked tokens back to their original token.
"""
if is_finetune:
even_masking = self.finetune_mask_evenly
else:
even_masking = self.inference_mask_evenly
min_token_lengths = (
self.min_token_length_normal,
self.min_token_length_lead,
self.min_token_length_followup,
)
if even_masking:
return mask_tokens_evenly(
tokens=tokens,
gap=self.gap,
min_token_lengths=min_token_lengths,
mask_token=self.model_tokenizer.mask_token,
)
else:
return mask_tokens_randomly(
tokens=tokens,
min_token_lengths=min_token_lengths,
mask_token=self.model_tokenizer.mask_token,
)
def judge_output(self, base_output, assisted_output, base_answers, assisted_answers):
"""Given a model's predicted tokens with and without assistance, as well as the correct
token predictions, produce the BLANC score
Args:
base_outputs (List[Dict[int, str]]): outputs without using "help" or "tune." Each list
represents a different input masking, and each dict maps indices to model
predictions.
assisted_outputs (List[Dict[int, str]]): outputs using "help" or "tune." Each list
represents a different input masking, and each dict maps indices to model
predictions.
base_answers (List[Dict[int, str]]): answers without | |
fake_images=fake_images,
discriminator=self.Dis,
device=self.local_rank)
dis_acml_loss += self.LOSS.maxgp_lambda * maxgp_loss
# apply LeCam reg. for data-efficient training if self.LOSS.apply_lecam is set to True
if self.LOSS.apply_lecam:
if self.DDP:
real_adv_output = torch.cat(losses.GatherLayer.apply(real_dict["adv_output"]), dim=0)
fake_adv_output = torch.cat(losses.GatherLayer.apply(fake_dict["adv_output"]), dim=0)
else:
real_adv_output, fake_adv_output = real_dict["adv_output"], fake_dict["adv_output"]
self.lecam_ema.update(torch.mean(real_adv_output).item(), "D_real", current_step)
self.lecam_ema.update(torch.mean(fake_adv_output).item(), "D_fake", current_step)
if current_step > self.LOSS.lecam_ema_start_iter:
lecam_loss = losses.lecam_reg(real_adv_output, fake_adv_output, self.lecam_ema)
else:
lecam_loss = torch.tensor(0., device=self.local_rank)
dis_acml_loss += self.LOSS.lecam_lambda*lecam_loss
if self.LOSS.apply_r1_reg and not self.is_stylegan:
self.r1_penalty = losses.cal_r1_reg(adv_output=real_dict["adv_output"], images=real_images, device=self.local_rank)
dis_acml_loss += self.LOSS.r1_lambda*self.r1_penalty
elif self.LOSS.apply_r1_reg and self.LOSS.r1_place == "inside_loop" and \
(self.OPTIMIZATION.d_updates_per_step*current_step + step_index) % self.STYLEGAN.d_reg_interval == 0:
real_images.requires_grad_(True)
real_dict = self.Dis(self.AUG.series_augment(real_images), real_labels)
self.r1_penalty = losses.stylegan_cal_r1_reg(adv_output=real_dict["adv_output"],
images=real_images)
dis_acml_loss += self.STYLEGAN.d_reg_interval*self.LOSS.r1_lambda*self.r1_penalty
if self.AUG.apply_ada or self.AUG.apply_apa:
self.dis_sign_real += torch.tensor((real_dict["adv_output"].sign().sum().item(),
self.OPTIMIZATION.batch_size),
device=self.local_rank)
self.dis_logit_real += torch.tensor((real_dict["adv_output"].sum().item(),
self.OPTIMIZATION.batch_size),
device=self.local_rank)
# adjust gradients for applying gradient accumluation trick
dis_acml_loss = dis_acml_loss / self.OPTIMIZATION.acml_steps
batch_counter += 1
# accumulate gradients of the discriminator
if self.RUN.mixed_precision and not self.is_stylegan:
self.scaler.scale(dis_acml_loss).backward()
else:
dis_acml_loss.backward()
# update the discriminator using the pre-defined optimizer
if self.RUN.mixed_precision and not self.is_stylegan:
self.scaler.step(self.OPTIMIZATION.d_optimizer)
self.scaler.update()
else:
self.OPTIMIZATION.d_optimizer.step()
if self.LOSS.apply_r1_reg and self.LOSS.r1_place == "outside_loop" and \
(self.OPTIMIZATION.d_updates_per_step*current_step + step_index) % self.STYLEGAN.d_reg_interval == 0:
self.OPTIMIZATION.d_optimizer.zero_grad()
for acml_index in range(self.OPTIMIZATION.acml_steps):
real_images = real_image_basket[batch_counter - acml_index - 1].to(self.local_rank, non_blocking=True)
real_labels = real_label_basket[batch_counter - acml_index - 1].to(self.local_rank, non_blocking=True)
# blur images for stylegan3-r
if self.MODEL.backbone == "stylegan3" and self.STYLEGAN.stylegan3_cfg == "stylegan3-r" and self.blur_init_sigma != "N/A":
blur_sigma = max(1 - (self.effective_batch_size * current_step) / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
f = torch.arange(-blur_size, blur_size + 1, device=real_images.device).div(blur_sigma).square().neg().exp2()
real_images = upfirdn2d.filter2d(real_images, f / f.sum())
if self.AUG.apply_apa:
real_images = apa_aug.apply_apa_aug(real_images, fake_images.detach(), self.aa_p, self.local_rank)
real_images.requires_grad_(True)
real_dict = self.Dis(self.AUG.series_augment(real_images), real_labels)
self.r1_penalty = losses.stylegan_cal_r1_reg(adv_output=real_dict["adv_output"], images=real_images) + \
misc.enable_allreduce(real_dict)
self.r1_penalty *= self.STYLEGAN.d_reg_interval*self.LOSS.r1_lambda/self.OPTIMIZATION.acml_steps
self.r1_penalty.backward()
if self.AUG.apply_ada or self.AUG.apply_apa:
self.dis_sign_real += torch.tensor((real_dict["adv_output"].sign().sum().item(),
self.OPTIMIZATION.batch_size),
device=self.local_rank)
self.dis_logit_real += torch.tensor((real_dict["adv_output"].sum().item(),
self.OPTIMIZATION.batch_size),
device=self.local_rank)
self.OPTIMIZATION.d_optimizer.step()
# apply ada heuristics
if (self.AUG.apply_ada or self.AUG.apply_apa) and self.aa_target is not None and current_step % self.aa_interval == 0:
if self.DDP: dist.all_reduce(self.dis_sign_real, op=dist.ReduceOp.SUM, group=self.group)
heuristic = (self.dis_sign_real[0] / self.dis_sign_real[1]).item()
adjust = np.sign(heuristic - self.aa_target) * (self.dis_sign_real[1].item()) / (self.aa_kimg * 1000)
self.aa_p = min(torch.as_tensor(1.), max(self.aa_p + adjust, torch.as_tensor(0.)))
if self.AUG.apply_ada: self.AUG.series_augment.p.copy_(torch.as_tensor(self.aa_p))
self.dis_sign_real_log.copy_(self.dis_sign_real), self.dis_sign_fake_log.copy_(self.dis_sign_fake)
self.dis_logit_real_log.copy_(self.dis_logit_real), self.dis_logit_fake_log.copy_(self.dis_logit_fake)
self.dis_sign_real.mul_(0), self.dis_sign_fake.mul_(0)
self.dis_logit_real.mul_(0), self.dis_logit_fake.mul_(0)
# clip weights to restrict the discriminator to satisfy 1-Lipschitz constraint
if self.LOSS.apply_wc:
for p in self.Dis.parameters():
p.data.clamp_(-self.LOSS.wc_bound, self.LOSS.wc_bound)
if self.RUN.empty_cache:
torch.cuda.empty_cache()
return real_cond_loss, dis_acml_loss
# -----------------------------------------------------------------------------
# train Generator
# -----------------------------------------------------------------------------
def train_generator(self, current_step):
# make GAN be trainable before starting training
misc.make_GAN_trainable(self.Gen, self.Gen_ema, self.Dis)
# toggle gradients of the generator and discriminator
misc.toggle_grad(model=self.Dis, grad=False, num_freeze_layers=-1, is_stylegan=self.is_stylegan)
misc.toggle_grad(model=self.Gen, grad=True, num_freeze_layers=-1, is_stylegan=self.is_stylegan)
if self.MODEL.info_type in ["discrete", "both"]:
misc.toggle_grad(getattr(misc.peel_model(self.Dis), self.MISC.info_params[0]), grad=True, num_freeze_layers=-1, is_stylegan=False)
if self.MODEL.info_type in ["continuous", "both"]:
misc.toggle_grad(getattr(misc.peel_model(self.Dis), self.MISC.info_params[1]), grad=True, num_freeze_layers=-1, is_stylegan=False)
misc.toggle_grad(getattr(misc.peel_model(self.Dis), self.MISC.info_params[2]), grad=True, num_freeze_layers=-1, is_stylegan=False)
self.Gen.apply(misc.track_bn_statistics)
for step_index in range(self.OPTIMIZATION.g_updates_per_step):
self.OPTIMIZATION.g_optimizer.zero_grad()
for acml_step in range(self.OPTIMIZATION.acml_steps):
with torch.cuda.amp.autocast() if self.RUN.mixed_precision and not self.is_stylegan else misc.dummy_context_mgr() as mpc:
# sample fake images and labels from p(G(z), y)
fake_images, fake_labels, fake_images_eps, trsp_cost, ws, info_discrete_c, info_conti_c = sample.generate_images(
z_prior=self.MODEL.z_prior,
truncation_factor=-1.0,
batch_size=self.OPTIMIZATION.batch_size,
z_dim=self.MODEL.z_dim,
num_classes=self.DATA.num_classes,
y_sampler="totally_random",
radius=self.LOSS.radius,
generator=self.Gen,
discriminator=self.Dis,
is_train=True,
LOSS=self.LOSS,
RUN=self.RUN,
MODEL=self.MODEL,
device=self.local_rank,
generator_mapping=self.Gen_mapping,
generator_synthesis=self.Gen_synthesis,
is_stylegan=self.is_stylegan,
style_mixing_p=self.cfgs.STYLEGAN.style_mixing_p,
stylegan_update_emas=False,
cal_trsp_cost=True if self.LOSS.apply_lo else False)
# blur images for stylegan3-r
if self.MODEL.backbone == "stylegan3" and self.STYLEGAN.stylegan3_cfg == "stylegan3-r" and self.blur_init_sigma != "N/A":
blur_sigma = max(1 - (self.effective_batch_size * current_step) / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
f = torch.arange(-blur_size, blur_size + 1, device=fake_images.device).div(blur_sigma).square().neg().exp2()
fake_images = upfirdn2d.filter2d(fake_images, f / f.sum())
# apply differentiable augmentations if "apply_diffaug" is True
fake_images_ = self.AUG.series_augment(fake_images)
# calculate adv_output, embed, proxy, and cls_output using the discriminator
fake_dict = self.Dis(fake_images_, fake_labels)
if self.AUG.apply_ada or self.AUG.apply_apa:
# accumulate discriminator output informations for logging
self.dis_sign_fake += torch.tensor((fake_dict["adv_output"].sign().sum().item(),
self.OPTIMIZATION.batch_size),
device=self.local_rank)
self.dis_logit_fake += torch.tensor((fake_dict["adv_output"].sum().item(),
self.OPTIMIZATION.batch_size),
device=self.local_rank)
# apply top k sampling for discarding bottom 1-k samples which are 'in-between modes'
if self.LOSS.apply_topk:
fake_dict["adv_output"] = torch.topk(fake_dict["adv_output"], int(self.topk)).values
# calculate adversarial loss defined by "LOSS.adv_loss"
if self.LOSS.adv_loss == "UNET":
gen_acml_loss = self.LOSS.g_loss(fake_dict["adv_output"], fake_dict["decoder_output"], self.DDP)
elif self.LOSS.adv_loss == "MH":
gen_acml_loss = self.LOSS.mh_lambda * self.LOSS.g_loss(DDP=self.DDP, **fake_dict, )
else:
gen_acml_loss = self.LOSS.g_loss(fake_dict["adv_output"], DDP=self.DDP)
# calculate class conditioning loss defined by "MODEL.d_cond_mtd"
if self.MODEL.d_cond_mtd in self.MISC.classifier_based_GAN:
fake_cond_loss = self.cond_loss(**fake_dict)
gen_acml_loss += self.LOSS.cond_lambda * fake_cond_loss
if self.MODEL.aux_cls_type == "TAC":
tac_gen_loss = -self.cond_loss_mi(**fake_dict)
gen_acml_loss += self.LOSS.tac_gen_lambda * tac_gen_loss
elif self.MODEL.aux_cls_type == "ADC":
adc_fake_dict = self.Dis(fake_images_, fake_labels, adc_fake=self.adc_fake)
adc_fake_cond_loss = -self.cond_loss(**adc_fake_dict)
gen_acml_loss += self.LOSS.cond_lambda * adc_fake_cond_loss
pass
# apply feature matching regularization to stabilize adversarial dynamics
if self.LOSS.apply_fm:
real_image_basket, real_label_basket = self.sample_data_basket()
real_images = real_image_basket[0].to(self.local_rank, non_blocking=True)
real_labels = real_label_basket[0].to(self.local_rank, non_blocking=True)
real_images_ = self.AUG.series_augment(real_images)
real_dict = self.Dis(real_images_, real_labels)
mean_match_loss = self.fm_loss(real_dict["h"].detach(), fake_dict["h"])
gen_acml_loss += self.LOSS.fm_lambda * mean_match_loss
# add transport cost for latent optimization training
if self.LOSS.apply_lo:
gen_acml_loss += self.LOSS.lo_lambda * trsp_cost
# apply latent consistency regularization for generating diverse images
if self.LOSS.apply_zcr:
fake_zcr_loss = -1 * self.l2_loss(fake_images, fake_images_eps)
gen_acml_loss += self.LOSS.g_lambda * fake_zcr_loss
if self.MODEL.info_type in ["discrete", "both"]:
dim = self.MODEL.info_dim_discrete_c
self.info_discrete_loss = 0.0
for info_c in range(self.MODEL.info_num_discrete_c):
self.info_discrete_loss += self.ce_loss(
fake_dict["info_discrete_c_logits"][:, info_c*dim: dim*(info_c+1)],
info_discrete_c[:, info_c: info_c+1].squeeze())
gen_acml_loss += self.LOSS.infoGAN_loss_discrete_lambda*self.info_discrete_loss + misc.enable_allreduce(fake_dict)
if self.MODEL.info_type in ["continuous", "both"]:
self.info_conti_loss = losses.normal_nll_loss(info_conti_c, fake_dict["info_conti_mu"], fake_dict["info_conti_var"])
gen_acml_loss += self.LOSS.infoGAN_loss_conti_lambda*self.info_conti_loss + misc.enable_allreduce(fake_dict)
# adjust gradients for applying gradient accumluation trick
gen_acml_loss = gen_acml_loss / self.OPTIMIZATION.acml_steps
# accumulate gradients of the generator
if self.RUN.mixed_precision and not self.is_stylegan:
self.scaler.scale(gen_acml_loss).backward()
else:
gen_acml_loss.backward()
# update the generator using the pre-defined optimizer
if self.RUN.mixed_precision and not self.is_stylegan:
self.scaler.step(self.OPTIMIZATION.g_optimizer)
self.scaler.update()
else:
self.OPTIMIZATION.g_optimizer.step()
# apply path length regularization
if self.STYLEGAN.apply_pl_reg and (self.OPTIMIZATION.g_updates_per_step*current_step + step_index) % self.STYLEGAN.g_reg_interval == 0:
self.OPTIMIZATION.g_optimizer.zero_grad()
for acml_index in range(self.OPTIMIZATION.acml_steps):
fake_images, fake_labels, fake_images_eps, trsp_cost, ws, _, _ = sample.generate_images(
z_prior=self.MODEL.z_prior,
truncation_factor=-1.0,
batch_size=self.OPTIMIZATION.batch_size // 2,
z_dim=self.MODEL.z_dim,
num_classes=self.DATA.num_classes,
y_sampler="totally_random",
radius=self.LOSS.radius,
generator=self.Gen,
discriminator=self.Dis,
is_train=True,
LOSS=self.LOSS,
RUN=self.RUN,
MODEL=self.MODEL,
device=self.local_rank,
generator_mapping=self.Gen_mapping,
generator_synthesis=self.Gen_synthesis,
is_stylegan=self.is_stylegan,
style_mixing_p=self.cfgs.STYLEGAN.style_mixing_p,
stylegan_update_emas=False,
cal_trsp_cost=True if self.LOSS.apply_lo else False)
# blur images for stylegan3-r
if self.MODEL.backbone == "stylegan3" and self.STYLEGAN.stylegan3_cfg == "stylegan3-r" and self.blur_init_sigma != "N/A":
blur_sigma = max(1 - (self.effective_batch_size * current_step) / (self.blur_fade_kimg * 1e3), 0) * self.blur_init_sigma
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
f = torch.arange(-blur_size, blur_size + 1, device=fake_images.device).div(blur_sigma).square().neg().exp2()
fake_images = upfirdn2d.filter2d(fake_images, f / f.sum())
self.pl_reg_loss = self.pl_reg.cal_pl_reg(fake_images=fake_images, ws=ws) + fake_images[:,0,0,0].mean()*0
self.pl_reg_loss *= self.STYLEGAN.g_reg_interval/self.OPTIMIZATION.acml_steps
self.pl_reg_loss.backward()
self.OPTIMIZATION.g_optimizer.step()
# if ema is True: update parameters of the Gen_ema in adaptive way
if self.MODEL.apply_g_ema:
self.ema.update(current_step)
if self.RUN.empty_cache:
torch.cuda.empty_cache()
return gen_acml_loss
# -----------------------------------------------------------------------------
# log training statistics
# -----------------------------------------------------------------------------
def log_train_statistics(self, current_step, real_cond_loss, gen_acml_loss, dis_acml_loss):
self.wandb_step = current_step + 1
if self.MODEL.d_cond_mtd in self.MISC.classifier_based_GAN:
cls_loss = real_cond_loss.item()
else:
cls_loss = "N/A"
log_message = LOG_FORMAT.format(
step=current_step + 1,
progress=(current_step + 1) / self.OPTIMIZATION.total_steps,
elapsed=misc.elapsed_time(self.start_time),
gen_loss=gen_acml_loss.item(),
dis_loss=dis_acml_loss.item(),
cls_loss=cls_loss,
topk=int(self.topk) if self.LOSS.apply_topk else "N/A",
aa_p=self.aa_p if self.AUG.apply_ada or self.AUG.apply_apa else "N/A",
)
self.logger.info(log_message)
# save loss values in wandb event file and .npz format
loss_dict = {
"gen_loss": gen_acml_loss.item(),
"dis_loss": dis_acml_loss.item(),
"cls_loss": 0.0 if cls_loss == "N/A" else cls_loss,
}
wandb.log(loss_dict, step=self.wandb_step)
save_dict = misc.accm_values_convert_dict(list_dict=self.loss_list_dict,
value_dict=loss_dict,
step=current_step + 1,
interval=self.RUN.print_every)
misc.save_dict_npy(directory=join(self.RUN.save_dir, "statistics", self.run_name),
name="losses",
dictionary=save_dict)
if self.AUG.apply_ada or self.AUG.apply_apa:
dis_output_dict = {
"dis_sign_real": (self.dis_sign_real_log[0]/self.dis_sign_real_log[1]).item(),
"dis_sign_fake": (self.dis_sign_fake_log[0]/self.dis_sign_fake_log[1]).item(),
"dis_logit_real": (self.dis_logit_real_log[0]/self.dis_logit_real_log[1]).item(),
"dis_logit_fake": (self.dis_logit_fake_log[0]/self.dis_logit_fake_log[1]).item(),
}
wandb.log(dis_output_dict, step=self.wandb_step)
wandb.log({"aa_p": self.aa_p.item()}, step=self.wandb_step)
infoGAN_dict = {}
if self.MODEL.info_type in ["discrete", "both"]:
infoGAN_dict["info_discrete_loss"] = self.info_discrete_loss.item()
if self.MODEL.info_type in ["continuous", "both"]:
infoGAN_dict["info_conti_loss"] = self.info_conti_loss.item()
wandb.log(infoGAN_dict, step=self.wandb_step)
if self.LOSS.apply_r1_reg:
wandb.log({"r1_reg_loss": self.r1_penalty.item()}, step=self.wandb_step)
if self.STYLEGAN.apply_pl_reg:
wandb.log({"pl_reg_loss": self.pl_reg_loss.item()}, step=self.wandb_step)
# calculate the spectral norms of all weights in the generator for monitoring purpose
if self.MODEL.apply_g_sn:
gen_sigmas = misc.calculate_all_sn(self.Gen, prefix="Gen")
wandb.log(gen_sigmas, step=self.wandb_step)
# calculate the spectral norms of all weights in the discriminator for monitoring purpose
if self.MODEL.apply_d_sn:
dis_sigmas = misc.calculate_all_sn(self.Dis, prefix="Dis")
wandb.log(dis_sigmas, step=self.wandb_step)
# -----------------------------------------------------------------------------
# visualize fake images for monitoring purpose.
# -----------------------------------------------------------------------------
def visualize_fake_images(self, num_cols, current_step):
if self.global_rank == 0:
self.logger.info("Visualize (num_rows x 8) fake image canvans.")
if self.gen_ctlr.standing_statistics:
self.gen_ctlr.std_stat_counter += 1
requires_grad = self.LOSS.apply_lo or self.RUN.langevin_sampling
with torch.no_grad() if not requires_grad else misc.dummy_context_mgr() as ctx:
misc.make_GAN_untrainable(self.Gen, self.Gen_ema, self.Dis)
generator, generator_mapping, generator_synthesis = self.gen_ctlr.prepare_generator()
fake_images, fake_labels, _, _, _, _, _ = sample.generate_images(z_prior=self.MODEL.z_prior,
truncation_factor=self.RUN.truncation_factor,
batch_size=self.OPTIMIZATION.batch_size,
z_dim=self.MODEL.z_dim,
num_classes=self.DATA.num_classes,
y_sampler="totally_random",
radius="N/A",
generator=generator,
discriminator=self.Dis,
is_train=False,
LOSS=self.LOSS,
RUN=self.RUN,
MODEL=self.MODEL,
device=self.local_rank,
is_stylegan=self.is_stylegan,
generator_mapping=generator_mapping,
generator_synthesis=generator_synthesis,
style_mixing_p=0.0,
| |
<reponame>mbattistello/lambda_converters<filename>pythonocc/lib/OCC/BRepMAT2d.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.10
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3,0,0):
new_instancemethod = lambda func, inst, cls: _BRepMAT2d.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_BRepMAT2d', [dirname(__file__)])
except ImportError:
import _BRepMAT2d
return _BRepMAT2d
if fp is not None:
try:
_mod = imp.load_module('_BRepMAT2d', fp, pathname, description)
finally:
fp.close()
return _mod
_BRepMAT2d = swig_import_helper()
del swig_import_helper
else:
import _BRepMAT2d
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _BRepMAT2d.delete_SwigPyIterator
def __iter__(self): return self
SwigPyIterator.value = new_instancemethod(_BRepMAT2d.SwigPyIterator_value,None,SwigPyIterator)
SwigPyIterator.incr = new_instancemethod(_BRepMAT2d.SwigPyIterator_incr,None,SwigPyIterator)
SwigPyIterator.decr = new_instancemethod(_BRepMAT2d.SwigPyIterator_decr,None,SwigPyIterator)
SwigPyIterator.distance = new_instancemethod(_BRepMAT2d.SwigPyIterator_distance,None,SwigPyIterator)
SwigPyIterator.equal = new_instancemethod(_BRepMAT2d.SwigPyIterator_equal,None,SwigPyIterator)
SwigPyIterator.copy = new_instancemethod(_BRepMAT2d.SwigPyIterator_copy,None,SwigPyIterator)
SwigPyIterator.next = new_instancemethod(_BRepMAT2d.SwigPyIterator_next,None,SwigPyIterator)
SwigPyIterator.__next__ = new_instancemethod(_BRepMAT2d.SwigPyIterator___next__,None,SwigPyIterator)
SwigPyIterator.previous = new_instancemethod(_BRepMAT2d.SwigPyIterator_previous,None,SwigPyIterator)
SwigPyIterator.advance = new_instancemethod(_BRepMAT2d.SwigPyIterator_advance,None,SwigPyIterator)
SwigPyIterator.__eq__ = new_instancemethod(_BRepMAT2d.SwigPyIterator___eq__,None,SwigPyIterator)
SwigPyIterator.__ne__ = new_instancemethod(_BRepMAT2d.SwigPyIterator___ne__,None,SwigPyIterator)
SwigPyIterator.__iadd__ = new_instancemethod(_BRepMAT2d.SwigPyIterator___iadd__,None,SwigPyIterator)
SwigPyIterator.__isub__ = new_instancemethod(_BRepMAT2d.SwigPyIterator___isub__,None,SwigPyIterator)
SwigPyIterator.__add__ = new_instancemethod(_BRepMAT2d.SwigPyIterator___add__,None,SwigPyIterator)
SwigPyIterator.__sub__ = new_instancemethod(_BRepMAT2d.SwigPyIterator___sub__,None,SwigPyIterator)
SwigPyIterator_swigregister = _BRepMAT2d.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
import OCC.Standard
import OCC.MAT
import OCC.MMgt
import OCC.TCollection
import OCC.Geom2d
import OCC.gp
import OCC.GeomAbs
import OCC.TColgp
import OCC.TColStd
import OCC.Bisector
import OCC.math
import OCC.IntRes2d
import OCC.TopoDS
import OCC.TopLoc
import OCC.TopAbs
import OCC.TColGeom2d
def register_handle(handle, base_object):
"""
Inserts the handle into the base object to
prevent memory corruption in certain cases
"""
try:
if base_object.IsKind("Standard_Transient"):
base_object.thisHandle = handle
base_object.thisown = False
except:
pass
class BRepMAT2d_BisectingLocus(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
"""
_BRepMAT2d.BRepMAT2d_BisectingLocus_swiginit(self,_BRepMAT2d.new_BRepMAT2d_BisectingLocus(*args))
def Compute(self, *args):
"""
:param anExplo:
:type anExplo: BRepMAT2d_Explorer &
:param LineIndex: default value is 1
:type LineIndex: int
:param aSide: default value is MAT_Left
:type aSide: MAT_Side
:rtype: None
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_Compute(self, *args)
def IsDone(self, *args):
"""
:rtype: bool
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_IsDone(self, *args)
def Graph(self, *args):
"""
:rtype: Handle_MAT_Graph
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_Graph(self, *args)
def NumberOfContours(self, *args):
"""
:rtype: int
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_NumberOfContours(self, *args)
def NumberOfElts(self, *args):
"""
:param IndLine:
:type IndLine: int
:rtype: int
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_NumberOfElts(self, *args)
def NumberOfSections(self, *args):
"""
:param IndLine:
:type IndLine: int
:param Index:
:type Index: int
:rtype: int
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_NumberOfSections(self, *args)
def BasicElt(self, *args):
"""
:param IndLine:
:type IndLine: int
:param Index:
:type Index: int
:rtype: Handle_MAT_BasicElt
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_BasicElt(self, *args)
def GeomElt(self, *args):
"""
:param aBasicElt:
:type aBasicElt: Handle_MAT_BasicElt &
:rtype: Handle_Geom2d_Geometry
:param aNode:
:type aNode: Handle_MAT_Node &
:rtype: gp_Pnt2d
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_GeomElt(self, *args)
def GeomBis(self, *args):
"""
:param anArc:
:type anArc: Handle_MAT_Arc &
:param Reverse:
:type Reverse: bool
:rtype: Bisector_Bisec
"""
return _BRepMAT2d.BRepMAT2d_BisectingLocus_GeomBis(self, *args)
__swig_destroy__ = _BRepMAT2d.delete_BRepMAT2d_BisectingLocus
BRepMAT2d_BisectingLocus.Compute = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_Compute,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.IsDone = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_IsDone,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.Graph = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_Graph,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.NumberOfContours = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_NumberOfContours,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.NumberOfElts = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_NumberOfElts,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.NumberOfSections = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_NumberOfSections,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.BasicElt = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_BasicElt,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.GeomElt = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_GeomElt,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus.GeomBis = new_instancemethod(_BRepMAT2d.BRepMAT2d_BisectingLocus_GeomBis,None,BRepMAT2d_BisectingLocus)
BRepMAT2d_BisectingLocus_swigregister = _BRepMAT2d.BRepMAT2d_BisectingLocus_swigregister
BRepMAT2d_BisectingLocus_swigregister(BRepMAT2d_BisectingLocus)
class BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape(OCC.TCollection.TCollection_BasicMapIterator):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param aMap:
:type aMap: BRepMAT2d_DataMapOfBasicEltShape &
:rtype: None
"""
_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_swiginit(self,_BRepMAT2d.new_BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape(*args))
def Initialize(self, *args):
"""
:param aMap:
:type aMap: BRepMAT2d_DataMapOfBasicEltShape &
:rtype: None
"""
return _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_Initialize(self, *args)
def Key(self, *args):
"""
:rtype: Handle_MAT_BasicElt
"""
return _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_Key(self, *args)
def Value(self, *args):
"""
:rtype: TopoDS_Shape
"""
return _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_Value(self, *args)
__swig_destroy__ = _BRepMAT2d.delete_BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape
BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape.Initialize = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_Initialize,None,BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape)
BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape.Key = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_Key,None,BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape)
BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape.Value = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_Value,None,BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape)
BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_swigregister = _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_swigregister
BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape_swigregister(BRepMAT2d_DataMapIteratorOfDataMapOfBasicEltShape)
class BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt(OCC.TCollection.TCollection_BasicMapIterator):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:rtype: None
:param aMap:
:type aMap: BRepMAT2d_DataMapOfShapeSequenceOfBasicElt &
:rtype: None
"""
_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_swiginit(self,_BRepMAT2d.new_BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt(*args))
def Initialize(self, *args):
"""
:param aMap:
:type aMap: BRepMAT2d_DataMapOfShapeSequenceOfBasicElt &
:rtype: None
"""
return _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_Initialize(self, *args)
def Key(self, *args):
"""
:rtype: TopoDS_Shape
"""
return _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_Key(self, *args)
def Value(self, *args):
"""
:rtype: BRepMAT2d_SequenceOfBasicElt
"""
return _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_Value(self, *args)
__swig_destroy__ = _BRepMAT2d.delete_BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt
BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt.Initialize = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_Initialize,None,BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt)
BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt.Key = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_Key,None,BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt)
BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt.Value = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_Value,None,BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt)
BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_swigregister = _BRepMAT2d.BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_swigregister
BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt_swigregister(BRepMAT2d_DataMapIteratorOfDataMapOfShapeSequenceOfBasicElt)
class BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape(OCC.TCollection.TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:param I:
:type I: TopoDS_Shape &
:param n:
:type n: TCollection_MapNodePtr &
:rtype: None
"""
_BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swiginit(self,_BRepMAT2d.new_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape(*args))
def Key(self, *args):
"""
:rtype: Handle_MAT_BasicElt
"""
return _BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_Key(self, *args)
def Value(self, *args):
"""
:rtype: TopoDS_Shape
"""
return _BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_Value(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _BRepMAT2d.delete_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape
BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape.Key = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_Key,None,BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape)
BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape.Value = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_Value,None,BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape)
BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swigregister = _BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swigregister
BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swigregister(BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape)
class Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape(OCC.TCollection.Handle_TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swiginit(self,_BRepMAT2d.new_Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_DownCast)
__swig_destroy__ = _BRepMAT2d.delete_Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape
Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape.Nullify = new_instancemethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_Nullify,None,Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape.IsNull = new_instancemethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_IsNull,None,Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape.GetObject = new_instancemethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_GetObject,None,Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swigregister = _BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swigregister
Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_swigregister(Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape)
def Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_DownCast(*args):
return _BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_DownCast(*args)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_DownCast = _BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfBasicEltShape_DownCast
class BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt(OCC.TCollection.TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param K:
:type K: TopoDS_Shape &
:param I:
:type I: BRepMAT2d_SequenceOfBasicElt &
:param n:
:type n: TCollection_MapNodePtr &
:rtype: None
"""
_BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swiginit(self,_BRepMAT2d.new_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt(*args))
def Key(self, *args):
"""
:rtype: TopoDS_Shape
"""
return _BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_Key(self, *args)
def Value(self, *args):
"""
:rtype: BRepMAT2d_SequenceOfBasicElt
"""
return _BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_Value(self, *args)
def GetHandle(self):
try:
return self.thisHandle
except:
self.thisHandle = Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt(self)
self.thisown = False
return self.thisHandle
__swig_destroy__ = _BRepMAT2d.delete_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt
BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt.Key = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_Key,None,BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt)
BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt.Value = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_Value,None,BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt)
BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swigregister = _BRepMAT2d.BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swigregister
BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swigregister(BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt)
class Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt(OCC.TCollection.Handle_TCollection_MapNode):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swiginit(self,_BRepMAT2d.new_Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt(*args))
# register the handle in the base object
if len(args) > 0:
register_handle(self, args[0])
DownCast = staticmethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_DownCast)
__swig_destroy__ = _BRepMAT2d.delete_Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt
Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt.Nullify = new_instancemethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_Nullify,None,Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt.IsNull = new_instancemethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_IsNull,None,Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt.GetObject = new_instancemethod(_BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_GetObject,None,Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swigregister = _BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swigregister
Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_swigregister(Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt)
def Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_DownCast(*args):
return _BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_DownCast(*args)
Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_DownCast = _BRepMAT2d.Handle_BRepMAT2d_DataMapNodeOfDataMapOfShapeSequenceOfBasicElt_DownCast
class BRepMAT2d_DataMapOfBasicEltShape(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_swiginit(self,_BRepMAT2d.new_BRepMAT2d_DataMapOfBasicEltShape(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: BRepMAT2d_DataMapOfBasicEltShape &
:rtype: BRepMAT2d_DataMapOfBasicEltShape
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Assign(self, *args)
def Set(self, *args):
"""
:param Other:
:type Other: BRepMAT2d_DataMapOfBasicEltShape &
:rtype: BRepMAT2d_DataMapOfBasicEltShape
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Set(self, *args)
def ReSize(self, *args):
"""
:param NbBuckets:
:type NbBuckets: int
:rtype: None
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_ReSize(self, *args)
def Clear(self, *args):
"""
:rtype: None
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Clear(self, *args)
def Bind(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:param I:
:type I: TopoDS_Shape &
:rtype: bool
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Bind(self, *args)
def IsBound(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:rtype: bool
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_IsBound(self, *args)
def UnBind(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:rtype: bool
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_UnBind(self, *args)
def Find(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:rtype: TopoDS_Shape
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Find(self, *args)
def ChangeFind(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:rtype: TopoDS_Shape
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_ChangeFind(self, *args)
def Find1(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:rtype: Standard_Address
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Find1(self, *args)
def ChangeFind1(self, *args):
"""
:param K:
:type K: Handle_MAT_BasicElt &
:rtype: Standard_Address
"""
return _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_ChangeFind1(self, *args)
__swig_destroy__ = _BRepMAT2d.delete_BRepMAT2d_DataMapOfBasicEltShape
BRepMAT2d_DataMapOfBasicEltShape.Assign = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Assign,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.Set = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Set,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.ReSize = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_ReSize,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.Clear = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Clear,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.Bind = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Bind,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.IsBound = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_IsBound,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.UnBind = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_UnBind,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.Find = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Find,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.ChangeFind = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_ChangeFind,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.Find1 = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_Find1,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape.ChangeFind1 = new_instancemethod(_BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_ChangeFind1,None,BRepMAT2d_DataMapOfBasicEltShape)
BRepMAT2d_DataMapOfBasicEltShape_swigregister = _BRepMAT2d.BRepMAT2d_DataMapOfBasicEltShape_swigregister
BRepMAT2d_DataMapOfBasicEltShape_swigregister(BRepMAT2d_DataMapOfBasicEltShape)
class BRepMAT2d_DataMapOfShapeSequenceOfBasicElt(OCC.TCollection.TCollection_BasicMap):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
:param NbBuckets: default value is 1
:type NbBuckets: int
:rtype: None
"""
_BRepMAT2d.BRepMAT2d_DataMapOfShapeSequenceOfBasicElt_swiginit(self,_BRepMAT2d.new_BRepMAT2d_DataMapOfShapeSequenceOfBasicElt(*args))
def Assign(self, *args):
"""
:param Other:
:type Other: | |
<filename>kernelmethods/operations.py
# -*- coding: utf-8 -*-
"""
This module implements the common kernel operations such as
- normalization of a kernel matrix (KM),
- centering (one- and two-sample cases),
- evaluating similarity, computing alignment,
- frobenius norms,
- linear combinations and
- checking whether a KM is PSD.
API
----
"""
import traceback
from warnings import warn
import numpy as np
from kernelmethods.config import KMNormError, KernelMethodsException
from kernelmethods.utils import contains_nan_inf, ensure_ndarray_1D
from numpy import multiply as elem_wise_multiply
from scipy.linalg import LinAlgError, eigh
def is_positive_semidefinite(sym_matrix,
tolerance=1e-6,
verbose=False):
"""
Tests whether a given matrix is positive-semidefinite (PSD).
A symmetric matrix is PSD if ALL its eigen values >= 0 (non-negative).
If any of its eigen values are negative, it is not PSD.
This functions accounts for numerical instabilities with a tolerance parameter.
This function can also be called with a shorthand ``is_PSD()``
Parameters
----------
sym_matrix : ndarray
Matrix to be evaluted for PSDness
tolerance : float
Tolerance parameter to account for numerical instabilities in the eigen
value computations (which can result in negative eigen values very slightly
below 0)
verbose : bool
Flag to indicate whether to print traceback in case of errors
during the computation of the eigen values
Returns
-------
psd : bool
Flag indicating whether the matrix is PSD.
"""
if not isinstance(sym_matrix, np.ndarray):
raise TypeError('Input matrix must be in numpy array format!')
if sym_matrix.shape[0] != sym_matrix.shape[1]:
warn('Input matrix is not square, and hence not PSD')
return False
if not np.isclose(sym_matrix, sym_matrix.T).all():
warn('Input matrix is not symmetric, and hence not PSD')
return False
try:
eig_values = eigh(sym_matrix, eigvals_only=True)
except LinAlgError:
if verbose:
traceback.print_exc()
# we are not actually raising LinAlgError, just using it to categorize as
# not PSD. So, can't use test cases to try raise LinAlgError, so not
# testable!
print('LinAlgError raised - eigen value computation failed --> not PSD')
psd = False
except:
if verbose:
traceback.print_exc()
warn('Unknown exception during eigen value computation --> not PSD')
psd = False
else:
if verbose:
print('Smallest eigen values are:\n'
'{}'.format(eig_values[:min(10, len(eig_values))]))
if any(eig_values < -tolerance): # notice the negative sign before tolerance
psd = False
else:
psd = True
return psd
# shorter alias
is_PSD = is_positive_semidefinite
def center_km(KM):
"""
Centers a given kernel matrix.
Implements the definition according to Lemma 1 in Section 2.2 in
Cortes, Corinna, <NAME>, and <NAME>, 2012, "Algorithms for
Learning Kernels Based on Centered Alignment", Journal of Machine Learning
Research 13(Mar): 795–828.
Parameters
----------
KM : ndarray
Symmetric matrix to be centered.
Returns
-------
centered_km : ndarray
Centered kernel matrix
"""
if isinstance(KM, np.ndarray):
if KM.shape[0] == KM.shape[1]:
n_rows = KM.shape[0]
else:
raise ValueError('Input matrix is not square!')
else:
raise ValueError('Unknown format for input matrix -'
'must be a square numpy ndarray')
# directly initializing one_oneT without going through unnecessary matrix
# products
# vec_1s = np.ones((n_rows, 1)) # row vector of 1s
# one_oneT = vec_1s.dot(vec_1s.T) # 1 dot 1T
one_oneT = np.ones((n_rows, n_rows))
Ic = np.eye(n_rows) - (one_oneT / n_rows)
return Ic.dot(KM).dot(Ic)
def normalize_km(KM, method='cosine'):
"""
Normalize a kernel matrix to have unit diagonal.
Cosine normalization normalizes the kernel matrix to have unit diagonal.
Implements definition according to Section 5.1 in book (Page 113)
Shawe-Taylor and Cristianini, "Kernels Methods for Pattern Analysis", 2004
Matrix must be square (and coming from a single sample: K(X,X), not K(X,Y)
Parameters
----------
KM : ndarray
Symmetric matrix to be normalized
method : str
Method of normalization. Options: ``cosine`` only.
Returns
-------
normed_km : ndarray
Normalized kernel matrix
"""
if KM.shape[0] != KM.shape[1]:
raise ValueError('Input kernel matrix must be square! '
'i.e. K(X,X) must be generated from '
'inner products on a single sample X, '
'not an inner-product on two separate samples X and Y')
try:
method = method.lower()
if method == 'cosine':
km_diag = KM.diagonal()
if np.isclose(km_diag, 0.0).any():
raise KMNormError(
'Some diagnoal entries in KM are [close to] zero - '
' this results in infinite or Nan values '
'during Cosine normalization of KM!')
# D = diag(1./sqrt(diag(K)))
# normed_K = D * K * D;
_1bySqrtDiag = np.diagflat(1 / np.sqrt(km_diag))
# notice @ is matrix multiplication operator
normed_km = _1bySqrtDiag @ KM @ _1bySqrtDiag
# in case of two samples K(X, Y), the left- and right-most factors
# must come from K(X,X) & K(Y,Y) respectively: see normalize_km_2sample
else:
raise NotImplementedError('normalization method {} is not implemented'
'yet!'.format(method))
except (KMNormError, KernelMethodsException):
raise
except:
warn('Unable to normalize kernel matrix using method {}'.format(method))
raise
else:
if contains_nan_inf(normed_km):
warn('normalization of kernel matrix resulted in Inf / NaN '
'values - check your parameters and data!')
return normed_km
def normalize_km_2sample(cross_K_XY, diag_K_XX, diag_K_YY, method='cosine'):
"""
Normalize a kernel matrix K(X,Y) to have unit diagonal.
Cosine normalization normalizes the kernel matrix to have unit diagonal.
Implements definition _similar_ to Section 5.1 in book (Page 113)
Shawe-Taylor and Cristianini, "Kernels Methods for Pattern Analysis", 2004
Parameters
----------
cross_K_XY : ndarray, 2D
Matrix of inner-products for samples from X onto Y i.e. K(X,Y)
diag_K_XX : array
Diagonal from matrix of inner-products for samples from X onto itself i.e.
K(X,X)
K(X,X) must NOT be normalized (otherwise they will all be 1s)
diag_K_YY : array
Diagonal from matrix of inner-products for samples from Y onto itself i.e.
K(Y,Y)
Returns
-------
normed_km : ndarray
Normalized version of K(X,Y)
NOTE: K_XY may NOT have unit diagonal, as k(x,y) != sqrt(k(x,x))*sqrt(k(y,y))
"""
if diag_K_XX.size != cross_K_XY.shape[0] or \
cross_K_XY.shape[1] != diag_K_YY.size:
raise ValueError('Shape mismatch for multiplication across the 3 kernel '
'matrices! Length of diag_K_XX must match '
'number of rows in K_XY, and number of columns in K_XY '
'must match length of diag_K_XX.')
method = method.lower()
if method == 'cosine':
if np.isclose(diag_K_XX, 0.0).any() or \
np.isclose(diag_K_YY, 0.0).any():
raise KMNormError(
'Some diagnoal entries in one of the KMs are [close to] zero - '
' this results in infinite or Nan values '
'during Cosine normalization of KM!')
# using diagflat to explicitly construct a matrix from diag values
diag_factor_xx = np.diagflat(1 / np.sqrt(diag_K_XX))
diag_factor_yy = np.diagflat(1 / np.sqrt(diag_K_YY))
# notice @ is matrix multiplication operator
normed_km = diag_factor_xx @ cross_K_XY @ diag_factor_yy
else:
raise NotImplementedError('Two-sample normalization method {} is not'
'implemented yet!'.format(method))
return normed_km
def frobenius_product(A, B):
"""
Computes the Frobenious product between two matrices of equal dimensions.
<A, B>_F is equal to the sum of element-wise products between A and B.
.. math::
<\mathbf{A}, \mathbf{B}>_F = \sum_{i, j} \mathbf{A}_{ij} \mathbf{B}_{ij}
Parameters
----------
A, B : ndarray
Two matrices of equal dimensions to compute the product.
Returns
-------
product : float
Frobenious product
"""
if A.shape != B.shape:
raise ValueError('Dimensions of the two matrices must be the same '
'to compute Frobenious product! They differ: {}, {}'
''.format(A.shape, B.shape))
return np.sum(elem_wise_multiply(A, B), axis=None)
def frobenius_norm(A):
"""Computes the Frobenius norm of a matrix A, which is the square root of the
Frobenius product with itself.
Parameters
----------
A : ndarray
Matrix to compute the norm of
Returns
-------
norm : float
Frobenious norm
"""
return np.sqrt(frobenius_product(A, A))
def alignment_centered(km_one, km_two,
value_if_zero_division='raise',
centered_already=False):
"""
Computes the centered alignment between two kernel matrices
(Alignment is computed on centered kernel matrices)
Implements Definition 4 (Kernel matrix alignment) from Section 2.3 in Cortes,
Corinna, <NAME>, and <NAME>, 2012, "Algorithms for
Learning Kernels Based on Centered Alignment", Journal of Machine Learning
Research 13(Mar): 795–828.
Parameters
----------
km_one, km_two : KernelMatrix
value_if_zero_division : str or float
determines the value of alignment, in case the norm of one of the two
kernel matrices is close to zero and we are unable to compute it.
Default is 'raise', requesting to raise an exception.
One could also choose 0.0, which assigns lowest alignment, effectively
discarding it for ranking purposes.
centered_already : bool
Flag to indicate whether the input kernel matrices are centered already
or not. If False, input KMs will be centered.
Returns
-------
centered_alignment : float
Value of centered_alignment between the two kernel matrices
"""
if km_one.shape != km_two.shape:
raise ValueError('Dimensions of the two matrices must be the |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.