hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f9dca279923d69029cb436fe4a1ffad45fd7ce9
| 12,992
|
py
|
Python
|
utils/scripts/OOOlevelGen/src/levels/level_3_2.py
|
fullscreennl/bullettime
|
8967449cdf926aaed6bb7ec217d92e0689fb0c3c
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/levels/level_3_2.py
|
fullscreennl/bullettime
|
8967449cdf926aaed6bb7ec217d92e0689fb0c3c
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/levels/level_3_2.py
|
fullscreennl/bullettime
|
8967449cdf926aaed6bb7ec217d92e0689fb0c3c
|
[
"MIT"
] | null | null | null |
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background="NO")
lb.addObject(Hero.HeroSprite(x=49, y=58,width=42,height=74))
lb.addObject(Bullet.BulletSprite(x=0, y=0,width=10,height=10,angle='0',restitution=0.5,static='false',friction=0.5,density=3,spawnEvent='onShoot'))
#classname='Destructable',firstframe='brittle_brick.png'
lb.addObject(Beam.BeamSprite(x=600+262, y=38,width=425,height=26,angle='0' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable'))
lb.addObject(Beam.BeamSprite(x=600+240, y=10,width=500,height=30,angle='0',restitution=0.2,static='true',friction=0.5,density=20,classname='SimpleScrollStrategySprite').setName('Beam'))
lb.addObject(Beam.BeamSprite(x=600+65, y=71,width=38,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+65, y=118,width=38,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+65, y=165,width=38,height=16,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+65, y=199,width=17,height=16,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+65, y=95,width=8,height=35,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+65, y=142,width=8,height=31,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+65, y=187,width=5,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+65, y=210,width=5,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Enemy.EnemySprite(x=600+65, y=225,width=22,height=22,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Beam.BeamSprite(x=600+455, y=71,width=38,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+455, y=118,width=38,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+455, y=165,width=38,height=16,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+455, y=199,width=17,height=16,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+455, y=95,width=8,height=35,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+455, y=142,width=8,height=31,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+455, y=187,width=5,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+455, y=210,width=5,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Enemy.EnemySprite(x=600+455, y=225,width=22,height=22,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Beam.BeamSprite(x=600+408, y=65,width=26,height=15,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+408, y=97,width=26,height=15,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+408, y=130,width=26,height=11,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+408, y=153,width=12,height=11,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+408, y=81,width=5,height=24,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+408, y=114,width=5,height=22,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+408, y=145,width=3,height=15,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+408, y=161,width=3,height=15,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Enemy.EnemySprite(x=600+408, y=171,width=22,height=22,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Beam.BeamSprite(x=600+114, y=66,width=27,height=15,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+114, y=99,width=27,height=15,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+114, y=133,width=27,height=11,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+114, y=157,width=12,height=11,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+114, y=83,width=6,height=25,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+114, y=116,width=6,height=23,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+114, y=149,width=4,height=16,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+114, y=165,width=4,height=16,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Enemy.EnemySprite(x=600+114, y=176,width=22,height=22,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Beam.BeamSprite(x=600+299, y=93,width=82,height=23,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+223, y=93,width=82,height=21,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+261, y=144,width=85,height=19,angle='-180' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Enemy.EnemySprite(x=600+260, y=201,width=92,height=92,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Enemy.EnemySprite(x=600+260, y=257,width=20,height=20,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Enemy.EnemySprite(x=600+260, y=274,width=22,height=22,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Beam.BeamSprite(x=600+180, y=93,width=82,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+157, y=93,width=82,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+340, y=93,width=82,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+366, y=93,width=82,height=14,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+184, y=139,width=68,height=9,angle='-180' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+339, y=139,width=68,height=9,angle='-180' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+155, y=154,width=20,height=5,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+164, y=151,width=13,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+185, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+194, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+205, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+214, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+199, y=159,width=5,height=38,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Enemy.EnemySprite(x=600+200, y=175,width=26,height=26,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Beam.BeamSprite(x=600+369, y=154,width=20,height=5,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+359, y=151,width=13,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+310, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+318, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+330, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+339, y=150,width=12,height=3,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Beam.BeamSprite(x=600+324, y=159,width=5,height=38,angle='90' ,restitution=0.2,static='false',friction=0.5,density=20, classname='Destructable' ))
lb.addObject(Enemy.EnemySprite(x=600+325, y=175,width=26,height=26,angle='0',restitution=0.2,static='false',friction=0.5,density=20))
lb.addObject(Crate.CrateSprite(x=2798-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2759-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2717-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2674-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2638-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2599-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2798-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2759-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2717-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2674-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2638-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2599-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2638-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Crate.CrateSprite(x=2599-50,y=223,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2570-50,y=260,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2609-50,y=260,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2647-50,y=260,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2684-50,y=260,width=32, height=32, static='false',angle=0))
lb.addObject(Pickup.PickupSprite(x=2720-50,y=260,width=32, height=32, static='false',angle=0))
lb.addObject(Teleporter.TeleporterSprite(level_id='leveldata/level_3_3'))
lb.render()
| 132.571429
| 189
| 0.735376
| 2,161
| 12,992
| 4.41925
| 0.078205
| 0.101361
| 0.070157
| 0.119267
| 0.930262
| 0.927539
| 0.916021
| 0.916021
| 0.911204
| 0.911204
| 0
| 0.131508
| 0.058267
| 12,992
| 98
| 190
| 132.571429
| 0.64904
| 0.004233
| 0
| 0.150538
| 0
| 0
| 0.099946
| 0.00201
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010753
| false
| 0
| 0.021505
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c824e00f639ba0d802f6b5a65244e7c796b0991a
| 4,658
|
py
|
Python
|
apps/book/apiview.py
|
PyCN/BlogBackendProject
|
144ea98d54c624cf93a69816779e4f3483ab80a2
|
[
"Apache-2.0"
] | 335
|
2018-02-06T11:40:44.000Z
|
2022-02-01T07:55:24.000Z
|
apps/book/apiview.py
|
PyCN/BlogBackendProject
|
144ea98d54c624cf93a69816779e4f3483ab80a2
|
[
"Apache-2.0"
] | 18
|
2018-03-11T05:02:27.000Z
|
2022-03-11T23:18:34.000Z
|
apps/book/apiview.py
|
PyCN/BlogBackendProject
|
144ea98d54c624cf93a69816779e4f3483ab80a2
|
[
"Apache-2.0"
] | 83
|
2018-03-02T03:24:06.000Z
|
2021-12-15T05:04:09.000Z
|
# _*_ coding: utf-8 _*_
__author__ = 'LennonChin'
__date__ = '2017/12/2 12:52'
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status, viewsets, filters, mixins
from rest_framework.response import Response
from .models import BookInfo, BookNoteInfo
from .serializers import BookBaseInfoSerializer, BookDetailInfoSerializer, BookNoteBaseInfoSerializer, BookNoteDetialInfoSerializer
from .filters import BookFilter, BookNoteFilter
from base.utils import CustomeLimitOffsetPagination
class BookBaseInfoListViewset(viewsets.ReadOnlyModelViewSet):
"""
List:
图书基本信息文章列表页
"""
queryset = BookInfo.objects.filter(is_active=True)
serializer_class = BookBaseInfoSerializer
# 过滤,搜索,排序
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = BookFilter
search_fields = ('title', 'subtitle', 'abstract', 'desc')
ordering_fields = ('click_num', 'like_num', 'comment_num', 'index', 'add_time')
ordering = ('-index', '-add_time')
# 分页设置
pagination_class = CustomeLimitOffsetPagination
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
instance.click_num += 1
instance.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class BookDetailInfoListViewset(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""
List:
图书详细信息列表页
"""
queryset = BookInfo.objects.filter(is_active=True)
serializer_class = BookDetailInfoSerializer
# 过滤,搜索,排序
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = BookFilter
search_fields = ('title', 'subtitle', 'abstract', 'desc')
ordering_fields = ('click_num', 'like_num', 'comment_num')
# 分页设置
pagination_class = CustomeLimitOffsetPagination
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
if instance.browse_password_encrypt:
browse_auth = ""
if 'browse_auth' in request.query_params:
browse_auth = request.query_params['browse_auth']
if browse_auth != instance.browse_password_encrypt:
context = {
"error": "文章密码错误"
}
return Response(context, status=status.HTTP_401_UNAUTHORIZED)
instance.click_num += 1
instance.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
# 图书笔记
class BookNoteBaseInfoListViewset(viewsets.ReadOnlyModelViewSet):
"""
List:
图书笔记信息列表页
"""
queryset = BookNoteInfo.objects.filter(is_active=True)
serializer_class = BookNoteBaseInfoSerializer
# 过滤,搜索,排序
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_class = BookNoteFilter
search_fields = ('title', 'subtitle', 'abstract', 'desc')
ordering_fields = ('click_num', 'like_num', 'comment_num', 'index', 'add_time')
ordering = ('-index', '-add_time')
# 分页设置
pagination_class = CustomeLimitOffsetPagination
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
instance.click_num += 1
instance.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class BookNoteDetailInfoListViewset(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
"""
List:
图书笔记信息列表页
"""
queryset = BookNoteInfo.objects.filter(is_active=True)
serializer_class = BookNoteDetialInfoSerializer
# 过滤,搜索,排序
# filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
# filter_class = BookFilter
search_fields = ('title', 'subtitle', 'abstract', 'desc')
ordering_fields = ('click_num', 'like_num', 'comment_num')
# 分页设置
pagination_class = CustomeLimitOffsetPagination
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
if instance.browse_password_encrypt:
browse_auth = ""
if 'browse_auth' in request.query_params:
browse_auth = request.query_params['browse_auth']
if browse_auth != instance.browse_password_encrypt:
context = {
"error": "文章密码错误"
}
return Response(context, status=status.HTTP_401_UNAUTHORIZED)
instance.click_num += 1
instance.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
| 34
| 131
| 0.681408
| 439
| 4,658
| 7.006834
| 0.234624
| 0.03251
| 0.019506
| 0.027308
| 0.758778
| 0.727568
| 0.727568
| 0.727568
| 0.727568
| 0.691157
| 0
| 0.006056
| 0.220052
| 4,658
| 137
| 132
| 34
| 0.840628
| 0.059468
| 0
| 0.759036
| 0
| 0
| 0.083605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0.048193
| 0.084337
| 0
| 0.590361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
c82d829f0cb9d94f4748c0ac354c599232344c3a
| 1,341
|
py
|
Python
|
skqulacs/qsvm/qsvmbase.py
|
kenjikun/scikit-qulacs
|
afc502f63927ab61da964698da54ec4b410c30c4
|
[
"MIT"
] | null | null | null |
skqulacs/qsvm/qsvmbase.py
|
kenjikun/scikit-qulacs
|
afc502f63927ab61da964698da54ec4b410c30c4
|
[
"MIT"
] | null | null | null |
skqulacs/qsvm/qsvmbase.py
|
kenjikun/scikit-qulacs
|
afc502f63927ab61da964698da54ec4b410c30c4
|
[
"MIT"
] | null | null | null |
from qulacs import QuantumState
from qulacs.gate import H, RZ, CNOT
import numpy as np
def get_qvec(x, n_qubit, tlotstep):
# xはデータ
# n_qubit,tlotstepはそのままの意味
data_state = QuantumState(n_qubit)
data_state.set_zero_state()
for a in range(n_qubit):
H(a).update_quantum_state(data_state)
for tlotkai in range(tlotstep):
for a in range(n_qubit):
RZ(a, x[a] / tlotstep).update_quantum_state(data_state)
# aとa+1のゲートの交互作用
b = (a + 1) % n_qubit
CNOT(a, b).update_quantum_state(data_state)
RZ(b, (np.pi - x[a]) * (np.pi - x[b]) / tlotstep).update_quantum_state(
data_state
)
CNOT(a, b).update_quantum_state(data_state)
for a in range(n_qubit):
H(a).update_quantum_state(data_state)
for tlotkai in range(tlotstep):
for a in range(n_qubit):
RZ(a, x[a] / tlotstep).update_quantum_state(data_state)
# aとa+1のゲートの交互作用
b = (a + 1) % n_qubit
CNOT(a, b).update_quantum_state(data_state)
RZ(b, (np.pi - x[a]) * (np.pi - x[b]) / tlotstep).update_quantum_state(
data_state
)
CNOT(a, b).update_quantum_state(data_state)
# 000の行のベクトルを取る
return data_state
| 36.243243
| 84
| 0.577927
| 186
| 1,341
| 3.924731
| 0.204301
| 0.160274
| 0.246575
| 0.30137
| 0.735616
| 0.735616
| 0.735616
| 0.735616
| 0.735616
| 0.735616
| 0
| 0.007634
| 0.316182
| 1,341
| 36
| 85
| 37.25
| 0.788441
| 0.055183
| 0
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c835a19b1d4422543a1945d892e1b721d91a1295
| 490
|
py
|
Python
|
eval_mosmed_timm-regnetx_002_Flip.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_mosmed_timm-regnetx_002_Flip.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_mosmed_timm-regnetx_002_Flip.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_0_Flip.yml",
"python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_1_Flip.yml",
"python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_2_Flip.yml",
"python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_3_Flip.yml",
"python main.py --configs configs/eval_mosmed_unetplusplus_timm-regnetx_002_4_Flip.yml",
]
for l in ls:
os.system(l)
| 44.545455
| 92
| 0.830612
| 80
| 490
| 4.7125
| 0.3
| 0.132626
| 0.159151
| 0.251989
| 0.896552
| 0.896552
| 0.896552
| 0.896552
| 0.896552
| 0.896552
| 0
| 0.043573
| 0.063265
| 490
| 11
| 93
| 44.545455
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.86558
| 0.610998
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c8687780824b68621b125ca25aa502e1c7c1c16a
| 161
|
py
|
Python
|
metrics/__init__.py
|
ryosukehata/severstal
|
cb54703b820cb27d7b93fb80a42b41f84ec8cf08
|
[
"Apache-2.0"
] | null | null | null |
metrics/__init__.py
|
ryosukehata/severstal
|
cb54703b820cb27d7b93fb80a42b41f84ec8cf08
|
[
"Apache-2.0"
] | null | null | null |
metrics/__init__.py
|
ryosukehata/severstal
|
cb54703b820cb27d7b93fb80a42b41f84ec8cf08
|
[
"Apache-2.0"
] | null | null | null |
from .metric import dice_channel_torch, dice_channel_torch_with_each_channel
__all__ = [
"dice_channel_torch",
"dice_channel_torch_with_each_channel"
]
| 23
| 76
| 0.813665
| 22
| 161
| 5.136364
| 0.409091
| 0.389381
| 0.566372
| 0.353982
| 0.831858
| 0.831858
| 0.831858
| 0.831858
| 0.831858
| 0
| 0
| 0
| 0.124224
| 161
| 6
| 77
| 26.833333
| 0.801418
| 0
| 0
| 0
| 0
| 0
| 0.335404
| 0.223602
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2391aaef0023f332758660c17e6870fec503cab1
| 17,935
|
py
|
Python
|
MagicTelecomAPILib/Controllers/DidsProductsController.py
|
MagicTelecom/mt_python_api
|
b014760809cb1d0cab48a3376cbaface0c4bef66
|
[
"MIT"
] | null | null | null |
MagicTelecomAPILib/Controllers/DidsProductsController.py
|
MagicTelecom/mt_python_api
|
b014760809cb1d0cab48a3376cbaface0c4bef66
|
[
"MIT"
] | null | null | null |
MagicTelecomAPILib/Controllers/DidsProductsController.py
|
MagicTelecom/mt_python_api
|
b014760809cb1d0cab48a3376cbaface0c4bef66
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
MagicTelecomAPILib.Controllers.DidsProductsController
This file was automatically generated by APIMATIC BETA v2.0 on 06/22/2016
"""
from MagicTelecomAPILib.APIHelper import APIHelper
from MagicTelecomAPILib.APIException import APIException
from MagicTelecomAPILib.Configuration import Configuration
from MagicTelecomAPILib.Http.HttpRequest import HttpRequest
from MagicTelecomAPILib.Http.HttpResponse import HttpResponse
from MagicTelecomAPILib.Http.RequestsClient import RequestsClient
from MagicTelecomAPILib.Controllers.BaseController import BaseController
class DidsProductsController(BaseController):
"""A Controller to access Endpoints in the MagicTelecomAPILib API."""
def __init__(self, http_client = None):
"""Constructor which allows a different HTTP client for this controller."""
BaseController.__init__(self, http_client)
def get_dids(self,
page=None,
limit=None,
filter=None):
"""Does a GET request to /dids/products/dids.
Allow clients to get the list of available phone_numbers
Args:
page (int, optional): Zero based offset index for the results.
e.g. 0 would start at the first result and 10 would start at
the eleventh result
limit (int, optional): Maximum number of results to return in the
response
filter (string, optional): Allowed fields: country_iso2,
region_handle, location_handle, location_name, phone_number,
phone_number_type, zip_code
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/dids/products/dids"
# Process optional query parameters
query_parameters = {
"page": page,
"limit": limit,
"filter": filter
}
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers, query_parameters=query_parameters)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def get_dids_by_phone_number(self,
phone_number):
"""Does a GET request to /dids/products/dids/{phone_number}.
Allow clients to get a specific phone_number
Args:
phone_number (string): Phone Number
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/dids/products/dids/{phone_number}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"phone_number": phone_number
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def get_locations(self,
page=None,
limit=None,
filter=None):
"""Does a GET request to /dids/products/locations.
Allow clients to get the list of available locations.
Args:
page (int, optional): Zero based offset index for the results.
e.g. 0 would start at the first result and 10 would start at
the eleventh result
limit (int, optional): Maximum number of results to return in the
response
filter (string, optional): Allowed fields: country_iso2,
region_handle, location_handle, location_name, prefix,
phone_number_type, zip_code, npa, nxx, fax
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/dids/products/locations"
# Process optional query parameters
query_parameters = {
"page": page,
"limit": limit,
"filter": filter
}
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers, query_parameters=query_parameters)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def get_location_by_handle(self,
location_handle):
"""Does a GET request to /dids/products/locations/{location_handle}.
Allow clients to get a specific location.
Args:
location_handle (string): Location Handle
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/dids/products/locations/{location_handle}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"location_handle": location_handle
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def get_trunks(self,
page=None,
limit=None):
"""Does a GET request to /dids/products/trunks.
Allow clients to get the list of available trunks
Args:
page (int, optional): Zero based offset index for the results.
e.g. 0 would start at the first result and 10 would start at
the eleventh result
limit (int, optional): Maximum number of results to return in the
response
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/dids/products/trunks"
# Process optional query parameters
query_parameters = {
"page": page,
"limit": limit
}
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers, query_parameters=query_parameters)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def get_trunk_by_handle(self,
trunk_handle):
"""Does a GET request to /dids/products/trunks/{trunk_handle}.
Allow clients to get a specific trunk
Args:
trunk_handle (string): Trunk Handle
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/dids/products/trunks/{trunk_handle}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"trunk_handle": trunk_handle
})
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
def get_countries_by_trunk(self,
page,
limit):
"""Does a GET request to /dids/products/trunks/countries.
Allow clients to get trunk zones.
Args:
page (int): Zero based offset index for the results. e.g. 0 would
start at the first result and 10 would start at the eleventh
result.
limit (int): Maximum number of results to return in the response.
Returns:
mixed: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/dids/products/trunks/countries"
# Process optional query parameters
query_parameters = {
"page": page,
"limit": limit
}
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "APIMATIC 2.0",
"accept": "application/json",
"X-Auth-Token": Configuration.x_auth_token,
"X-Auth-Token": Configuration.x_auth_token
}
# Prepare the API call.
http_request = self.http_client.get(query_url, headers=headers, query_parameters=query_parameters)
# Invoke the API call to fetch the response.
response = self.http_client.execute_as_string(http_request);
# Endpoint error handling using HTTP status codes.
if response.status_code == 401:
raise APIException("You are not authenticated", 401, response.raw_body)
elif response.status_code == 403:
raise APIException("This action needs a valid WSSE header", 403, response.raw_body)
elif response.status_code == 404:
raise APIException("Resource not found", 404, response.raw_body)
# Global error handling using HTTP status codes.
self.validate_response(response)
return response.raw_body
| 36.453252
| 107
| 0.601617
| 1,997
| 17,935
| 5.269905
| 0.089634
| 0.013303
| 0.026606
| 0.030597
| 0.891676
| 0.881319
| 0.878848
| 0.872767
| 0.843311
| 0.843311
| 0
| 0.01379
| 0.332869
| 17,935
| 491
| 108
| 36.527495
| 0.865775
| 0.366769
| 0
| 0.768421
| 1
| 0
| 0.133749
| 0.018906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042105
| false
| 0
| 0.036842
| 0
| 0.121053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
23b457644dac6dc44487a932c06e0e8fcfb2037e
| 29,771
|
py
|
Python
|
eZmaxApi/api/module_sspr_api.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
eZmaxApi/api/module_sspr_api.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
eZmaxApi/api/module_sspr_api.py
|
eZmaxinc/eZmax-SDK-python
|
5b4d54b69db68aab8ee814a1e26460a0af03784e
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.3
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from eZmaxApi.api_client import ApiClient, Endpoint as _Endpoint
from eZmaxApi.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from eZmaxApi.model.common_response_error import CommonResponseError
from eZmaxApi.model.sspr_reset_password_request_v1_request import SsprResetPasswordRequestV1Request
from eZmaxApi.model.sspr_reset_password_v1_request import SsprResetPasswordV1Request
from eZmaxApi.model.sspr_send_usernames_v1_request import SsprSendUsernamesV1Request
from eZmaxApi.model.sspr_unlock_account_request_v1_request import SsprUnlockAccountRequestV1Request
from eZmaxApi.model.sspr_unlock_account_v1_request import SsprUnlockAccountV1Request
from eZmaxApi.model.sspr_validate_token_v1_request import SsprValidateTokenV1Request
class ModuleSsprApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.sspr_reset_password_request_v1_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'Authorization'
],
'endpoint_path': '/1/module/sspr/resetPasswordRequest',
'operation_id': 'sspr_reset_password_request_v1',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'sspr_reset_password_request_v1_request',
],
'required': [
'sspr_reset_password_request_v1_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'sspr_reset_password_request_v1_request':
(SsprResetPasswordRequestV1Request,),
},
'attribute_map': {
},
'location_map': {
'sspr_reset_password_request_v1_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.sspr_reset_password_v1_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'Authorization'
],
'endpoint_path': '/1/module/sspr/resetPassword',
'operation_id': 'sspr_reset_password_v1',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'sspr_reset_password_v1_request',
],
'required': [
'sspr_reset_password_v1_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'sspr_reset_password_v1_request':
(SsprResetPasswordV1Request,),
},
'attribute_map': {
},
'location_map': {
'sspr_reset_password_v1_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.sspr_send_usernames_v1_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'Authorization'
],
'endpoint_path': '/1/module/sspr/sendUsernames',
'operation_id': 'sspr_send_usernames_v1',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'sspr_send_usernames_v1_request',
],
'required': [
'sspr_send_usernames_v1_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'sspr_send_usernames_v1_request':
(SsprSendUsernamesV1Request,),
},
'attribute_map': {
},
'location_map': {
'sspr_send_usernames_v1_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.sspr_unlock_account_request_v1_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'Authorization'
],
'endpoint_path': '/1/module/sspr/unlockAccountRequest',
'operation_id': 'sspr_unlock_account_request_v1',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'sspr_unlock_account_request_v1_request',
],
'required': [
'sspr_unlock_account_request_v1_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'sspr_unlock_account_request_v1_request':
(SsprUnlockAccountRequestV1Request,),
},
'attribute_map': {
},
'location_map': {
'sspr_unlock_account_request_v1_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.sspr_unlock_account_v1_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'Authorization'
],
'endpoint_path': '/1/module/sspr/unlockAccount',
'operation_id': 'sspr_unlock_account_v1',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'sspr_unlock_account_v1_request',
],
'required': [
'sspr_unlock_account_v1_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'sspr_unlock_account_v1_request':
(SsprUnlockAccountV1Request,),
},
'attribute_map': {
},
'location_map': {
'sspr_unlock_account_v1_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.sspr_validate_token_v1_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'Authorization'
],
'endpoint_path': '/1/module/sspr/validateToken',
'operation_id': 'sspr_validate_token_v1',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'sspr_validate_token_v1_request',
],
'required': [
'sspr_validate_token_v1_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'sspr_validate_token_v1_request':
(SsprValidateTokenV1Request,),
},
'attribute_map': {
},
'location_map': {
'sspr_validate_token_v1_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def sspr_reset_password_request_v1(
self,
sspr_reset_password_request_v1_request,
**kwargs
):
"""Reset Password Request # noqa: E501
This endpoint sends an email with a link to reset the user's password. sEmailAddress must be set if eUserTypeSSPR = EzsignUser sUserLoginname must be set if eUserTypeSSPR = Native # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sspr_reset_password_request_v1(sspr_reset_password_request_v1_request, async_req=True)
>>> result = thread.get()
Args:
sspr_reset_password_request_v1_request (SsprResetPasswordRequestV1Request):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['sspr_reset_password_request_v1_request'] = \
sspr_reset_password_request_v1_request
return self.sspr_reset_password_request_v1_endpoint.call_with_http_info(**kwargs)
def sspr_reset_password_v1(
self,
sspr_reset_password_v1_request,
**kwargs
):
"""Reset Password # noqa: E501
This endpoint resets the user's password. sEmailAddress must be set if eUserTypeSSPR = EzsignUser sUserLoginname must be set if eUserTypeSSPR = Native # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sspr_reset_password_v1(sspr_reset_password_v1_request, async_req=True)
>>> result = thread.get()
Args:
sspr_reset_password_v1_request (SsprResetPasswordV1Request):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['sspr_reset_password_v1_request'] = \
sspr_reset_password_v1_request
return self.sspr_reset_password_v1_endpoint.call_with_http_info(**kwargs)
def sspr_send_usernames_v1(
self,
sspr_send_usernames_v1_request,
**kwargs
):
"""Send username(s) # noqa: E501
This endpoint returns an email with the username(s) matching the email address provided in case of forgotten username # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sspr_send_usernames_v1(sspr_send_usernames_v1_request, async_req=True)
>>> result = thread.get()
Args:
sspr_send_usernames_v1_request (SsprSendUsernamesV1Request):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['sspr_send_usernames_v1_request'] = \
sspr_send_usernames_v1_request
return self.sspr_send_usernames_v1_endpoint.call_with_http_info(**kwargs)
def sspr_unlock_account_request_v1(
self,
sspr_unlock_account_request_v1_request,
**kwargs
):
"""Unlock Account Request # noqa: E501
This endpoint sends an email with a link to unlock the user account. sEmailAddress must be set if eUserTypeSSPR = EzsignUser sUserLoginname must be set if eUserTypeSSPR = Native # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sspr_unlock_account_request_v1(sspr_unlock_account_request_v1_request, async_req=True)
>>> result = thread.get()
Args:
sspr_unlock_account_request_v1_request (SsprUnlockAccountRequestV1Request):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['sspr_unlock_account_request_v1_request'] = \
sspr_unlock_account_request_v1_request
return self.sspr_unlock_account_request_v1_endpoint.call_with_http_info(**kwargs)
def sspr_unlock_account_v1(
self,
sspr_unlock_account_v1_request,
**kwargs
):
"""Unlock Account # noqa: E501
This endpoint unlocks the user account. sEmailAddress must be set if eUserTypeSSPR = EzsignUser sUserLoginname must be set if eUserTypeSSPR = Native # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sspr_unlock_account_v1(sspr_unlock_account_v1_request, async_req=True)
>>> result = thread.get()
Args:
sspr_unlock_account_v1_request (SsprUnlockAccountV1Request):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['sspr_unlock_account_v1_request'] = \
sspr_unlock_account_v1_request
return self.sspr_unlock_account_v1_endpoint.call_with_http_info(**kwargs)
def sspr_validate_token_v1(
self,
sspr_validate_token_v1_request,
**kwargs
):
"""Validate Token # noqa: E501
This endpoint validates if a Token is valid and not expired. sEmailAddress must be set if eUserTypeSSPR = EzsignUser sUserLoginname must be set if eUserTypeSSPR = Native # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sspr_validate_token_v1(sspr_validate_token_v1_request, async_req=True)
>>> result = thread.get()
Args:
sspr_validate_token_v1_request (SsprValidateTokenV1Request):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['sspr_validate_token_v1_request'] = \
sspr_validate_token_v1_request
return self.sspr_validate_token_v1_endpoint.call_with_http_info(**kwargs)
| 37.973214
| 203
| 0.53673
| 2,861
| 29,771
| 5.28172
| 0.077246
| 0.035736
| 0.03375
| 0.021441
| 0.908014
| 0.853683
| 0.793726
| 0.75852
| 0.755079
| 0.749917
| 0
| 0.009384
| 0.387894
| 29,771
| 783
| 204
| 38.021711
| 0.819843
| 0.372678
| 0
| 0.6
| 0
| 0
| 0.252025
| 0.100526
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013725
| false
| 0.054902
| 0.021569
| 0
| 0.04902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
23b466bc97112f027838014abcd299ff96591d9d
| 22,576
|
py
|
Python
|
tests/integration/test_smoke.py
|
shawn-rusaw/aws-lambda-fsm-workflows
|
9fcf5af14bf0f4500d4a7e7b3e0eda00423c1d42
|
[
"Apache-2.0"
] | 21
|
2017-01-26T21:23:57.000Z
|
2021-08-07T02:56:49.000Z
|
tests/integration/test_smoke.py
|
shawn-rusaw/aws-lambda-fsm-workflows
|
9fcf5af14bf0f4500d4a7e7b3e0eda00423c1d42
|
[
"Apache-2.0"
] | 151
|
2016-11-29T05:09:33.000Z
|
2021-05-19T22:47:58.000Z
|
tests/integration/test_smoke.py
|
shawn-rusaw/aws-lambda-fsm-workflows
|
9fcf5af14bf0f4500d4a7e7b3e0eda00423c1d42
|
[
"Apache-2.0"
] | 17
|
2016-11-29T05:07:58.000Z
|
2021-05-04T21:22:29.000Z
|
# Copyright 2016-2020 Workiva Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# system imports
import mock
import threading
import time
# library imports
# application imports
from aws_lambda_fsm.fsm import FSM
from aws_lambda_fsm.config import get_current_configuration
from aws_lambda_fsm.constants import AWS as AWS_CONSTANTS
from tests.integration.utils import AWSStub
from tests.integration.utils import BaseFunctionalTest
from .actions import get_counter
from .actions import set_counter
AWS = AWSStub()
@mock.patch("aws_lambda_fsm.client.send_next_event_for_dispatch", wraps=AWS.send_next_event_for_dispatch)
@mock.patch("aws_lambda_fsm.fsm.send_next_event_for_dispatch", wraps=AWS.send_next_event_for_dispatch)
@mock.patch("aws_lambda_fsm.fsm.set_message_dispatched", wraps=AWS.set_message_dispatched)
@mock.patch("aws_lambda_fsm.fsm.get_message_dispatched", wraps=AWS.get_message_dispatched)
@mock.patch("aws_lambda_fsm.fsm.acquire_lease", wraps=AWS.acquire_lease)
@mock.patch("aws_lambda_fsm.fsm.release_lease", wraps=AWS.release_lease)
@mock.patch("aws_lambda_fsm.fsm.start_retries", wraps=AWS.start_retries)
@mock.patch("aws_lambda_fsm.fsm.increment_error_counters", wraps=AWS.increment_error_counters)
@mock.patch("aws_lambda_fsm.fsm.store_checkpoint", wraps=AWS.store_checkpoint)
class Test(BaseFunctionalTest):
def setUp(self):
AWS.reset()
FSM(get_current_configuration('tests/integration/fsm.yaml'))
################################################################################
# START: machine_name="simple"
################################################################################
def test_simple(self, *args):
self._execute(AWS, "simple", {})
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), ()),
(1, ('start', 'ok', 1, 0), ())
]
self.assertEqual(expected, AWS.all_sources.trace())
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = []
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_simple_with_primary_failure(self, *args):
self._execute(AWS, "simple", {}, primary_stream_chaos=1.0)
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), ()),
(1, ('start', 'ok', 1, 0), ())
]
self.assertEqual(expected, AWS.all_sources.trace())
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), ()),
]
self.assertEqual(expected, AWS.primary_stream_source.trace())
expected = [
(0, ('start', 'ok', 1, 0), ())
]
self.assertEqual(expected, AWS.secondary_stream_source.trace())
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[{'error': 1}, {'current_state': 'pseudo_init', 'current_event': 'pseudo_init', 'machine_name': 'simple'}]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_simple_with_failure(self, *args):
self._execute(AWS, "simple", {'fail_at': [(0, 0)]})
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), ()),
(1, ('pseudo_init', 'pseudo_init', 0, 1), ()), # retry
(2, ('start', 'ok', 1, 0), ())
]
self.assertEqual(expected, AWS.all_sources.trace())
expected = {
'correlation_id-0': True,
'correlation_id-1': True
}
self.assertEqual(expected, AWS.primary_cache)
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[
{'retry': 1},
{'current_state': 'pseudo_init', 'current_event': 'pseudo_init', 'machine_name': 'simple'}
]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_simple_with_failure_with_primary_retry_failure(self, *args):
self._execute(AWS, "simple", {'fail_at': [(0, 0)]}, primary_retry_chaos=1.0)
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), ()),
(1, ('pseudo_init', 'pseudo_init', 0, 1), ()), # retry
(2, ('start', 'ok', 1, 0), ())
]
self.assertEqual(expected, AWS.all_sources.trace())
expected = []
self.assertEqual(expected, AWS.primary_retry_source.trace())
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 1), ())
]
self.assertEqual(expected, AWS.secondary_retry_source.trace())
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[
{'retry': 1, 'error': 1},
{'current_state': 'pseudo_init', 'current_event': 'pseudo_init', 'machine_name': 'simple'}
]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
################################################################################
# START: machine_name="looper"
################################################################################
def test_looper(self, *args):
set_counter(0)
self.assertEqual(0, get_counter())
self._execute(AWS, "looper", {"loops": 3})
self.assertEqual(3, get_counter())
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('start', 'ok', 1, 0), (1,)),
(2, ('start', 'ok', 2, 0), (2,)),
(3, ('start', 'done', 3, 0), (3,))
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
'lease-correlation_id-2': True,
'lease-correlation_id-3': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = []
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_looper_with_primary_failure(self, *args):
self._execute(AWS, "looper", {"loops": 3}, primary_stream_chaos=1.0)
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('start', 'ok', 1, 0), (1,)),
(2, ('start', 'ok', 2, 0), (2,)),
(3, ('start', 'done', 3, 0), (3,))
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
]
self.assertEqual(expected, AWS.primary_stream_source.trace(('counter',)))
expected = [
(0, ('start', 'ok', 1, 0), (1,)),
(1, ('start', 'ok', 2, 0), (2,)),
(2, ('start', 'done', 3, 0), (3,))
]
self.assertEqual(expected, AWS.secondary_stream_source.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
'lease-correlation_id-2': True,
'lease-correlation_id-3': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[
{'error': 1}, {'current_state': 'pseudo_init', 'current_event': 'pseudo_init', 'machine_name': 'looper'}
],
[
{'error': 1}, {'current_state': 'start', 'current_event': 'ok', 'machine_name': 'looper'}
],
[
{'error': 1}, {'current_state': 'start', 'current_event': 'ok', 'machine_name': 'looper'}
]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_looper_with_failure(self, *args):
self._execute(AWS, "looper", {"loops": 3, 'fail_at': [(1, 0)]})
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('start', 'ok', 1, 0), (1,)),
(2, ('start', 'ok', 1, 1), (1,)), # retry
(3, ('start', 'ok', 2, 0), (2,)),
(4, ('start', 'done', 3, 0), (3,))
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
'lease-correlation_id-2': True,
'lease-correlation_id-3': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[
{'retry': 1},
{'current_state': 'start', 'current_event': 'ok', 'machine_name': 'looper'}
]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_looper_with_failure_with_primary_retry_failure(self, *args):
self._execute(AWS, "looper", {"loops": 3, 'fail_at': [(1, 0)]}, primary_retry_chaos=1.0)
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('start', 'ok', 1, 0), (1,)),
(2, ('start', 'ok', 1, 1), (1,)), # retry
(3, ('start', 'ok', 2, 0), (2,)),
(4, ('start', 'done', 3, 0), (3,))
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
expected = []
self.assertEqual(expected, AWS.primary_retry_source.trace(('counter',)))
expected = [
(0, ('start', 'ok', 1, 1), (1,))
]
self.assertEqual(expected, AWS.secondary_retry_source.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
'lease-correlation_id-2': True,
'lease-correlation_id-3': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[
{'retry': 1, 'error': 1},
{'current_state': 'start', 'current_event': 'ok', 'machine_name': 'looper'}
]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
################################################################################
# START: machine_name="looper-local"
################################################################################
def test_looper_local(self, *args):
set_counter(0)
self.assertEqual(0, get_counter())
AWS.add_callback('send_next_event_for_dispatch', mock.Mock(side_effect=([None] + [Exception()] * 100)))
self._execute(AWS, "looper-local", {"loops": 3})
self.assertEqual(3, get_counter())
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'lease-correlation_id-0': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = []
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_looper_local_with_failure(self, *args):
set_counter(0)
self.assertEqual(0, get_counter())
AWS.add_callback('send_next_event_for_dispatch', mock.Mock(side_effect=([None] + [Exception()] * 100)))
self._execute(AWS, "looper-local", {"loops": 3, 'fail_at': [(i, 0) for i in range(100)]})
self.assertEqual(3, get_counter())
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('pseudo_init', 'pseudo_init', 0, 1), (None,)) # retry
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'lease-correlation_id-0': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[
{'retry': 1},
{'current_state': 'pseudo_init', 'current_event': 'pseudo_init', 'machine_name': 'looper-local'}
]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
################################################################################
# START: machine_name="looper-mixed"
################################################################################
def test_looper_mixed(self, *args):
set_counter(0)
self.assertEqual(0, get_counter())
self._execute(AWS, "looper-mixed", {"loops": 3})
self.assertEqual(6, get_counter())
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('start', 'done', 1, 0), (3,)),
(2, ('reset', 'done', 2, 0), (None,)),
(3, ('loop', 'done', 3, 0), (3,))
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
'lease-correlation_id-2': True,
'lease-correlation_id-3': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = []
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_looper_mixed_with_failure(self, *args):
set_counter(0)
self.assertEqual(0, get_counter())
self._execute(AWS, "looper-mixed", {"loops": 3, 'fail_at': [(i, 0) for i in range(100)]})
self.assertEqual(6, get_counter())
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('pseudo_init', 'pseudo_init', 0, 1), (None,)), # retry
(2, ('start', 'done', 1, 0), (3,)),
(3, ('reset', 'done', 2, 0), (None,)),
(4, ('reset', 'done', 2, 1), (None,)), # retry
(5, ('loop', 'done', 3, 0), (3,))
]
self.assertEqual(expected, AWS.all_sources.trace(('counter',)))
# check cache
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True
}
self.assertEqual(expected, AWS.primary_cache)
self.assertEqual(expected, AWS.secondary_cache)
expected = {
'correlation_id-0': True,
'correlation_id-1': True,
'correlation_id-2': True,
'correlation_id-3': True,
'lease-correlation_id-0': True,
'lease-correlation_id-1': True,
'lease-correlation_id-2': True,
'lease-correlation_id-3': True
}
self.assertEqual(expected, AWS.all_caches)
# check errors
expected = [
[
{'retry': 1},
{'current_event': 'pseudo_init', 'current_state': 'pseudo_init', 'machine_name': 'looper-mixed'}
],
[
{'retry': 1},
{'current_event': 'done', 'current_state': 'reset', 'machine_name': 'looper-mixed'}
]
]
self.assertEqual(expected, AWS.errors.trace(raw=True))
def test_looper_mixed_uses_queue(self, *args):
AWS.add_callback('send_next_event_for_dispatch', mock.Mock(side_effect=Exception()))
self.assertRaises(Exception, self._execute, AWS, "looper-mixed", {"loops": 3})
################################################################################
# START: machine_name="serialization"
################################################################################
def test_serialization(self, *args):
self._execute(AWS, "serialization", {})
# check messages
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), (None,)),
(1, ('start', 'ok', 1, 0), ('<not_serializable>',)),
(2, ('middle', 'ok', 2, 0), ('<not_serializable>',))
]
self.assertEqual(expected, AWS.all_sources.trace(uvars={"error"}))
################################################################################
# START: machine_name="longpause"
################################################################################
def test_two_at_same_time(self, *args):
thread1 = TestThread(self, "longpause", {'key': 'val1'})
thread2 = TestThread(self, "longpause", {'key': 'val2'})
thread1.start()
time.sleep(2)
thread2.start()
thread1.join()
thread2.join()
expected = [
(0, ('pseudo_init', 'pseudo_init', 0, 0), ('val1',)),
(1, ('pseudo_init', 'pseudo_init', 0, 0), ('val2',)), # both start
(2, ('pseudo_init', 'pseudo_init', 0, 1), ('val2',)), # second unable to acquire lease
(3, ('start', 'ok', 1, 0), ('val1',)),
(4, ('middle', 'ok', 2, 0), ('val1',)), # first finished
(5, ('start', 'ok', 1, 0), ('val2',)) # second gets lease, but that has already run
]
self.assertEqual(expected, AWS.all_sources.trace(uvars={"key"}))
class TestThread(threading.Thread):
def __init__(self, test, name, context):
threading.Thread.__init__(self)
self.test = test
self.name = name
self.context = context
def run(self):
self.test._execute(AWS, self.name, self.context)
class TestSqs(Test):
MESSAGE_TYPE = AWS_CONSTANTS.SQS
class TestSns(Test):
MESSAGE_TYPE = AWS_CONSTANTS.SNS
| 36.29582
| 120
| 0.526267
| 2,375
| 22,576
| 4.808
| 0.085053
| 0.118399
| 0.143007
| 0.16166
| 0.826079
| 0.804449
| 0.792977
| 0.784657
| 0.741921
| 0.71924
| 0
| 0.024413
| 0.285126
| 22,576
| 621
| 121
| 36.354267
| 0.683128
| 0.06374
| 0
| 0.617336
| 0
| 0
| 0.210584
| 0.060231
| 0
| 0
| 0
| 0
| 0.173362
| 1
| 0.038055
| false
| 0
| 0.021142
| 0
| 0.071882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
23b5ade293d46ff8fc5136c015ccbac45f12a15f
| 3,614
|
py
|
Python
|
assignments/assignment5/train.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | 1
|
2019-03-27T09:18:47.000Z
|
2019-03-27T09:18:47.000Z
|
assignments/assignment5/train.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | null | null | null |
assignments/assignment5/train.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from tqdm.auto import tqdm
def tqdm_enumerate(iter):
i = 0
for y in tqdm(iter):
yield i, y
i += 1
def train_model(model, device, dataset, loss, optimizer, scheduler, num_epochs):
'''
Trains plain word2vec using cross-entropy loss and regenerating dataset every epoch
Returns:
loss_history, train_history
'''
loss_history = []
train_history = []
for epoch in range(num_epochs):
model.train()
loss_accum = 0
correct_samples = 0
total_samples = 0
del dataset.samples
dataset.samples = []
dataset.generate_dataset()
train_loader = torch.utils.data.DataLoader(dataset, batch_size=20)
for i_step, (x, y) in tqdm_enumerate(train_loader):
x_gpu = x.to(device)
y_gpu = y.to(device)
prediction = model(x_gpu)
loss_value = loss(prediction, y_gpu)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
_, indices = torch.max(prediction, 1)
correct_samples += torch.sum(indices == y_gpu)
total_samples += y.shape[0]
loss_accum += loss_value.detach()
ave_loss = loss_accum / i_step
train_accuracy = float(correct_samples) / total_samples
loss_history.append(float(ave_loss))
train_history.append(train_accuracy)
print("Epoch %i, Average loss: %f, Train accuracy: %f" % (epoch+1, ave_loss, train_accuracy))
if scheduler is not None:
if type(scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(ave_loss)
else:
scheduler.step()
return loss_history, train_history
def train_neg_sample(model, device, dataset, loss, optimizer, scheduler, num_epochs):
'''
Trains word2vec with negative samples on and regenerating dataset every epoch
Returns:
loss_history, train_history
'''
loss_history = []
train_history = []
for epoch in range(num_epochs):
model.train()
loss_accum = 0
correct_samples = 0
total_samples = 0
del dataset.samples
dataset.samples = []
dataset.generate_dataset()
train_loader = torch.utils.data.DataLoader(dataset, batch_size=20)
for i_step, (x, y, z) in tqdm_enumerate(train_loader):
x_gpu = x.to(device)
y_gpu = y.to(device)
z_gpu = z.to(device)
prediction = model(x_gpu, y_gpu)
loss_value = loss(prediction, z_gpu)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
loss_accum += loss_value.detach()
correct_samples += sum(1 / (1+np.exp(-prediction[:, 0].detach().cpu().numpy())) > .5)
total_samples += y.shape[0]
ave_loss = loss_accum / i_step
train_accuracy = float(correct_samples) / total_samples
loss_history.append(float(ave_loss))
train_history.append(train_accuracy)
print("Epoch %i, Average loss: %f, Train accuracy: %f" % (epoch+1, ave_loss, train_accuracy))
if scheduler is not None:
if type(scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(ave_loss)
else:
scheduler.step()
return loss_history, train_history
| 32.558559
| 101
| 0.578583
| 418
| 3,614
| 4.782297
| 0.222488
| 0.044022
| 0.048024
| 0.069035
| 0.847924
| 0.78039
| 0.757379
| 0.757379
| 0.757379
| 0.652326
| 0
| 0.009492
| 0.329552
| 3,614
| 110
| 102
| 32.854545
| 0.815518
| 0.065578
| 0
| 0.74359
| 0
| 0
| 0.027694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.038462
| 0
| 0.102564
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f1b82185af4b277720b92d0f1200c1bacbf8b5ea
| 15,772
|
py
|
Python
|
dxm/lib/masking_api/api/application_api.py
|
experiortec/dxm-toolkit
|
b2ab6189e163c62fa8d7251cd533d2a36430d44a
|
[
"Apache-2.0"
] | 5
|
2018-08-23T15:47:05.000Z
|
2022-01-19T23:38:18.000Z
|
dxm/lib/masking_api/api/application_api.py
|
experiortec/dxm-toolkit
|
b2ab6189e163c62fa8d7251cd533d2a36430d44a
|
[
"Apache-2.0"
] | 59
|
2018-10-15T10:37:00.000Z
|
2022-03-22T20:49:25.000Z
|
dxm/lib/masking_api/api/application_api.py
|
experiortec/dxm-toolkit
|
b2ab6189e163c62fa8d7251cd533d2a36430d44a
|
[
"Apache-2.0"
] | 12
|
2019-03-08T19:59:13.000Z
|
2021-12-16T03:28:04.000Z
|
# coding: utf-8
"""
Masking API
Schema for the Masking Engine API # noqa: E501
OpenAPI spec version: 5.1.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from dxm.lib.masking_api.api_client import ApiClient
class ApplicationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_application(self, body, **kwargs): # noqa: E501
"""Create application # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_application(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Application body: The application to create (required)
:return: Application
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_application_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_application_with_http_info(body, **kwargs) # noqa: E501
return data
def create_application_with_http_info(self, body, **kwargs): # noqa: E501
"""Create application # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_application_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Application body: The application to create (required)
:return: Application
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_application" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in params or
params['body'] is None): # noqa: E501
raise ValueError("Missing the required parameter `body` when calling `create_application`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/applications', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Application', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_application(self, application_id, **kwargs): # noqa: E501
"""Delete application by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_application(application_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int application_id: The ID of the application to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_application_with_http_info(application_id, **kwargs) # noqa: E501
else:
(data) = self.delete_application_with_http_info(application_id, **kwargs) # noqa: E501
return data
def delete_application_with_http_info(self, application_id, **kwargs): # noqa: E501
"""Delete application by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_application_with_http_info(application_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int application_id: The ID of the application to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_application" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application_id' is set
if self.api_client.client_side_validation and ('application_id' not in params or
params['application_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `application_id` when calling `delete_application`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application_id' in params:
path_params['applicationId'] = params['application_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/applications/{applicationId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_applications(self, **kwargs): # noqa: E501
"""Get all applications # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_applications(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get applications. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: ApplicationList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_applications_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_applications_with_http_info(**kwargs) # noqa: E501
return data
def get_all_applications_with_http_info(self, **kwargs): # noqa: E501
"""Get all applications # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_applications_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: The page number for which to get applications. This will default to the first page if excluded
:param int page_size: The maximum number of objects to return. This will default to the DEFAULT_API_PAGE_SIZE property if not provided
:return: ApplicationList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_applications" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('page_number', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page_size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/applications', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationList', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_application_by_id(self, application_id, **kwargs): # noqa: E501
"""Get application by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_application_by_id(application_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int application_id: The ID of the application to get (required)
:return: Application
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_application_by_id_with_http_info(application_id, **kwargs) # noqa: E501
else:
(data) = self.get_application_by_id_with_http_info(application_id, **kwargs) # noqa: E501
return data
def get_application_by_id_with_http_info(self, application_id, **kwargs): # noqa: E501
"""Get application by ID # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_application_by_id_with_http_info(application_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int application_id: The ID of the application to get (required)
:return: Application
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['application_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_application_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'application_id' is set
if self.api_client.client_side_validation and ('application_id' not in params or
params['application_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `application_id` when calling `get_application_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'application_id' in params:
path_params['applicationId'] = params['application_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/applications/{applicationId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Application', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.751843
| 142
| 0.614063
| 1,809
| 15,772
| 5.092869
| 0.090105
| 0.044285
| 0.024313
| 0.03126
| 0.922175
| 0.913492
| 0.893303
| 0.871052
| 0.871052
| 0.868447
| 0
| 0.01469
| 0.300786
| 15,772
| 406
| 143
| 38.847291
| 0.820729
| 0.327796
| 0
| 0.759259
| 1
| 0
| 0.174339
| 0.037358
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.018519
| 0
| 0.12037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7b2fd2aded75d23271f53e444445b04deea428a0
| 10,204
|
py
|
Python
|
test/feature/extract/test_nmf.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | 1
|
2018-08-16T20:48:52.000Z
|
2018-08-16T20:48:52.000Z
|
test/feature/extract/test_nmf.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | 5
|
2015-01-12T20:40:46.000Z
|
2017-11-17T01:27:41.000Z
|
test/feature/extract/test_nmf.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import logging
import math
from nose.tools import *
import numpy
import numpy.linalg
from pynm.feature.extract import nmf
logger = logging.getLogger(__name__)
@istest
def can_treat_matrix_without_errors():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix)
ok_(True, 'Failed to decomposit a matrix')
@istest
def result_is_positive_matrix():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix)
ok_(numpy.amin(w) >= 0, 'W of a matrix is not positive')
ok_(numpy.amin(h) >= 0, 'H of a matrix is not positive')
matrix = numpy.zeros((4, 3))
w, h = nmf.nmf(matrix)
ok_(numpy.amin(w) >= 0, 'W of zero matrix is not positive')
ok_(numpy.amin(h) >= 0, 'W of zero matrix is not positive')
@istest
def can_reduce_dimension():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, 2)
eq_(w.shape, (4, 2), 'dim(W) is not correct')
eq_(h.shape, (2, 3), 'dim(W) is not correct')
@istest
def can_approx_original_matrix():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix)
diff += numpy.amax(abs(matrix - w.dot(h)))
ok_(diff/100.0 < 0.12, 'NMF cannot apporximate a matrix (%s > 0.12)' % diff)
@istest
def can_approx_zero_matrix():
matrix = numpy.zeros((4, 3))
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, max_iter=1000)
diff += numpy.amax(w.dot(h))
ok_(diff/100.0 < 0.12, 'NMF cannot apporximate zero matrix (%s > 0.12)' % diff)
@istest
def can_treat_matrix_without_errors_with_kl_divergent():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, distance="kl")
ok_(True, 'Failed to decomposit a matrix')
@istest
def result_is_positive_matrix_with_kl_divergent():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, distance="kl")
ok_(numpy.amin(w) >= 0, 'W of a matrix is not positive')
ok_(numpy.amin(h) >= 0, 'H of a matrix is not positive')
matrix = numpy.zeros((4, 3))
w, h = nmf.nmf(matrix, distance="kl")
ok_(numpy.amin(w) >= 0, 'W of zero matrix is not positive')
ok_(numpy.amin(h) >= 0, 'W of zero matrix is not positive')
@istest
def can_reduce_dimension_with_kl_divergent():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, 2, distance="kl")
eq_(w.shape, (4, 2), 'dim(W) is not correct')
eq_(h.shape, (2, 3), 'dim(W) is not correct')
@istest
def can_approx_original_matrix_with_kl_divergent():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, distance="kl")
diff += numpy.amax(abs(matrix - w.dot(h)))
ok_(diff/100.0 < 1, 'NMF cannot apporximate a matrix (%s > 1.)' % diff)
@istest
def can_approx_zero_matrix_with_kl_divergent():
matrix = numpy.zeros((4, 3))
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, max_iter=1000, distance="kl")
diff += numpy.amax(w.dot(h))
ok_(diff/100.0 < 0.2, 'NMF cannot apporoximate zero matrix (%s > 0.2)' % diff)
@istest
def result_is_positive_matrix_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, init=nmf.random_init)
ok_(numpy.amin(w) >= 0, 'W of a matrix is not positive')
ok_(numpy.amin(h) >= 0, 'H of a matrix is not positive')
matrix = numpy.zeros((4, 3))
w, h = nmf.nmf(matrix, init=nmf.random_init)
ok_(numpy.amin(w) >= 0, 'W of zero matrix is not positive')
ok_(numpy.amin(h) >= 0, 'W of zero matrix is not positive')
@istest
def can_reduce_dimension_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, 2, init=nmf.random_init)
eq_(w.shape, (4, 2), 'dim(W) is not correct')
eq_(h.shape, (2, 3), 'dim(W) is not correct')
@istest
def can_approx_original_matrix_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, init=nmf.random_init)
diff += numpy.amax(abs(matrix - w.dot(h)))
ok_(diff/100.0 < 0.12, 'NMF cannot apporximate a matrix (%s > 0.12)' % diff)
@istest
def can_approx_zero_matrix_with_random_init():
matrix = numpy.zeros((4, 3))
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, init=nmf.random_init, max_iter=1000)
diff += numpy.amax(w.dot(h))
ok_(diff/100.0 < 0.2, 'NMF cannot apporoximate zero matrix (%s > 0.2)' % diff)
@istest
def can_treat_matrix_without_errors_with_kl_divergent_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="kl")
ok_(True, 'Failed to decomposit a matrix')
@istest
def result_is_positive_matrix_with_kl_divergent_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="kl")
ok_(numpy.amin(w) >= 0, 'W of a matrix is not positive')
ok_(numpy.amin(h) >= 0, 'H of a matrix is not positive')
matrix = numpy.zeros((4, 3))
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="kl")
ok_(numpy.amin(w) >= 0, 'W of zero matrix is not positive')
ok_(numpy.amin(h) >= 0, 'W of zero matrix is not positive')
@istest
def can_reduce_dimension_with_kl_divergent_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, dim=2, init=nmf.random_init, distance="kl")
eq_(w.shape, (4, 2), 'dim(W) is not correct')
eq_(h.shape, (2, 3), 'dim(W) is not correct')
@istest
def can_approx_original_matrix_with_kl_divergent_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="kl")
diff += numpy.amax(abs(matrix - w.dot(h)))
ok_(diff/100.0 < 1, 'NMF cannot apporximate a matrix (%s > 1.0)' % diff)
@istest
def can_approx_zero_matrix_with_kl_divergent_with_random_init():
matrix = numpy.zeros((4, 3))
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, init=nmf.random_init, max_iter=1000, distance="kl")
diff += numpy.amax(w.dot(h))
ok_(diff/100.0 < 0.2, 'NMF cannot apporoximate zero matrix (%s > 0.2)' % diff)
@istest
def can_treat_matrix_without_errors_with_beta_divergence_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="beta")
ok_(True, 'Failed to decomposit a matrix')
@istest
def result_is_positive_matrix_with_beta_divergence_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="beta")
w_min = numpy.amin(w)
h_min = numpy.amin(h)
ok_(w_min >= 0, 'W of zero matrix is not positive (%s < 0)' % w_min)
ok_(h_min >= 0, 'H of zero matrix is not positive (%s < 0)' % h_min)
matrix = numpy.zeros((4, 3))
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="beta")
w_min = numpy.amin(w)
h_min = numpy.amin(h)
ok_(w_min >= 0, 'W of zero matrix is not positive (%s < 0)' % w_min)
ok_(h_min >= 0, 'H of zero matrix is not positive (%s < 0)' % h_min)
@istest
def can_reduce_dimension_with_beta_divergence_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
w, h = nmf.nmf(matrix, dim=2, init=nmf.random_init, distance="beta")
eq_(w.shape, (4, 2), 'dim(W) is not correct')
eq_(h.shape, (2, 3), 'dim(W) is not correct')
@istest
def can_approx_original_matrix_with_beta_divergence_with_random_init():
matrix = numpy.array([[1, 2, 3],
[0, 1, 7],
[7, 8, 1],
[9, 0, 1]])
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, init=nmf.random_init, distance="beta")
diff += numpy.amax(abs(matrix - w.dot(h)))
ok_(diff/100.0 < 1, 'NMF cannot apporximate a matrix (%s > 1.0)' % diff)
@istest
def can_approx_zero_matrix_with_beta_divergence_with_random_init():
matrix = numpy.zeros((4, 3))
diff = 0.0
for _ in range(100):
w, h = nmf.nmf(matrix, init=nmf.random_init, max_iter=1000, distance="beta")
diff += numpy.amax(w.dot(h))
ok_(diff/100.0 < 0.2, 'NMF cannot apporoximate zero matrix (%s > 0.2)' % diff)
| 33.346405
| 84
| 0.520776
| 1,548
| 10,204
| 3.271318
| 0.052972
| 0.016193
| 0.028633
| 0.045814
| 0.972156
| 0.968009
| 0.961888
| 0.951619
| 0.949645
| 0.94688
| 0
| 0.065085
| 0.322423
| 10,204
| 305
| 85
| 33.455738
| 0.667342
| 0.00196
| 0
| 0.826613
| 0
| 0
| 0.144106
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.024194
| 0
| 0.120968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9e6ece50ee01b0c747bf6669f828b9ca56a6989b
| 53,036
|
py
|
Python
|
src/dynamixel_sdk/dynamixel_item.py
|
lapo5/Dynamixel-Python
|
81e52621358078e240692da2919362ec71685aa8
|
[
"Apache-2.0"
] | 1
|
2018-09-16T06:16:01.000Z
|
2018-09-16T06:16:01.000Z
|
src/dynamixel_sdk/dynamixel_item.py
|
lapo5/Dynamixel-Python
|
81e52621358078e240692da2919362ec71685aa8
|
[
"Apache-2.0"
] | null | null | null |
src/dynamixel_sdk/dynamixel_item.py
|
lapo5/Dynamixel-Python
|
81e52621358078e240692da2919362ec71685aa8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: UTF-8
# This Python porting of the original Dynamixel Workbench Toolbox from ROBOTIS
# was written by Patrick Roncagliolo and Marco Lapolla as part of a project
# developed at the DIBRIS BIOLab of the University of Genoa, Italy.
import control_table_item as ct_item
# Type of Servo-Motors
AX_12A = 12
AX_12W = 300
AX_18A = 18
RX_10 = 10
RX_24F = 24
RX_28 = 28
RX_64 = 64
EX_106 = 107
MX_12W = 360
MX_28 = 29
MX_28_2 = 30
MX_64 = 310
MX_64_2 = 311
MX_106 = 320
MX_106_2 = 321
XL_320 = 350
XL430_W250 = 1060
XM430_W210 = 1030
XM430_W350 = 1020
XM540_W150 = 1130
XM540_W270 = 1120
XH430_V210 = 1050
XH430_V350 = 1040
XH430_W210 = 1010
XH430_W350 = 1000
PRO_L42_10_S300_R = 35072
PRO_L54_30_S400_R = 37928
PRO_L54_30_S500_R = 37896
PRO_L54_50_S290_R = 38176
PRO_L54_50_S500_R = 38152
PRO_M42_10_S260_R = 43288
PRO_M54_40_S250_R = 46096
PRO_M54_60_S250_R = 46352
PRO_H42_20_S300_R = 51200
PRO_H54_100_S500_R = 53768
PRO_H54_200_S500_R = 54024
BYTE = 1
WORD = 2
DWORD = 4
class ModelInfo:
def __init__(self):
self.velocity_to_value_ratio = None
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = None
self.value_of_0_radian_position = None
self.value_of_max_radian_position = None
self.min_radian = None
self.max_radian = None
def setAXInfo(self):
self.velocity_to_value_ratio = 86.03
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 512
self.value_of_max_radian_position = 1023
self.min_radian = -2.61799
self.max_radian = 2.61799
def setRXInfo(self):
self.velocity_to_value_ratio = 86.03
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 512
self.value_of_max_radian_position = 1023
self.min_radian = -2.61799
self.max_radian = 2.61799
def setEXInfo(self):
self.velocity_to_value_ratio = 86.03
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -2.18969008
self.max_radian = 2.18969008
def setMXInfo(self):
self.velocity_to_value_ratio = 86.81
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setMX2Info(self):
self.velocity_to_value_ratio = 41.70
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setExtMXInfo(self):
self.velocity_to_value_ratio = 86.81
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setExtMX2Info(self):
self.velocity_to_value_ratio = 41.70
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setXL320Info(self):
self.velocity_to_value_ratio = 86.03
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 512
self.value_of_max_radian_position = 1023
self.min_radian = -2.61799
self.max_radian = 2.61799
def setXLInfo(self):
self.velocity_to_value_ratio = 41.70
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setXMInfo(self):
self.velocity_to_value_ratio = 41.70
self.torque_to_current_value_ratio = 149.795386991
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setExtXMInfo(self):
self.velocity_to_value_ratio = 41.70
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setXHInfo(self):
self.velocity_to_value_ratio = 41.71
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = 0
self.value_of_0_radian_position = 2048
self.value_of_max_radian_position = 4095
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def setPROInfo(self):
self.velocity_to_value_ratio = 4792.8
self.torque_to_current_value_ratio = None
self.value_of_min_radian_position = -250961
self.value_of_0_radian_position = 0
self.value_of_max_radian_position = 250961
self.min_radian = -3.14159265
self.max_radian = 3.14159265
def getModelInfo(self, num):
if num in [AX_12A, AX_12W, AX_18A]:
self.setAXInfo()
elif num in [RX_10, RX_24F, RX_28, RX_64]:
self.setRXInfo()
elif num == EX_106:
self.setEXInfo()
elif num in [MX_12W, MX_28]:
self.setMXInfo()
elif num in [MX_64, MX_106]:
self.setExtMXInfo()
elif num == MX_28_2:
self.setMX2Info()
elif num in [MX_64_2, MX_106_2]:
self.setExtMX2Info()
elif num == XL_320:
self.setXL320Info()
elif num == XL430_W250:
self.setXLInfo()
elif num in [XM430_W210, XM430_W350]:
self.setXMInfo()
elif num in [XM540_W150, XM540_W270]:
self.setExtXMInfo()
elif num in [XH430_V210, XH430_V350, XH430_W210, XH430_W350]:
self.setXHInfo()
elif num in [PRO_L42_10_S300_R, PRO_L54_30_S400_R, PRO_L54_30_S500_R,
PRO_L54_50_S290_R, PRO_L54_50_S500_R, PRO_M42_10_S260_R,
PRO_M54_40_S250_R, PRO_M54_60_S250_R, PRO_H42_20_S300_R,
PRO_H54_100_S500_R,PRO_H54_200_S500_R]:
self.setPROInfo()
else:
self.setXMInfo()
return self
class ControlTable:
def __init__(self):
self._item = []
def setAXItem(self):
self._item = []
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(2, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(3, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(4, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(5, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(6, "CW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(8, "CCW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(11, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Min_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Max_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(14, "Max_Torque", WORD))
self._item.append(ct_item.ControlTableItem(16, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(17, "Alarm_LED", BYTE))
self._item.append(ct_item.ControlTableItem(18, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(24, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(25, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(26, "CW_Compliance_Margin", BYTE))
self._item.append(ct_item.ControlTableItem(27, "CCW_Compliance_Margin", BYTE))
self._item.append(ct_item.ControlTableItem(28, "CW_Compliance_Slope", BYTE))
self._item.append(ct_item.ControlTableItem(29, "CCW_Compliance_Slope", BYTE))
self._item.append(ct_item.ControlTableItem(30, "Goal_Position", WORD))
self._item.append(ct_item.ControlTableItem(32, "Moving_Speed", WORD))
self._item.append(ct_item.ControlTableItem(34, "Torque_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "Present_Position", WORD))
self._item.append(ct_item.ControlTableItem(38, "Present_Speed", WORD))
self._item.append(ct_item.ControlTableItem(40, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(42, "Present_Voltage", BYTE))
self._item.append(ct_item.ControlTableItem(43, "Present_Temperature", BYTE))
self._item.append(ct_item.ControlTableItem(44, "Registered", BYTE))
self._item.append(ct_item.ControlTableItem(46, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(47, "Lock", BYTE))
self._item.append(ct_item.ControlTableItem(48, "Punch", WORD))
def setRXItem(self):
self._item = []
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(2, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(3, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(4, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(5, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(6, "CW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(8, "CCW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(11, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Min_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Max_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(14, "Max_Torque", WORD))
self._item.append(ct_item.ControlTableItem(16, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(17, "Alarm_LED", BYTE))
self._item.append(ct_item.ControlTableItem(18, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(24, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(25, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(26, "CW_Compliance_Margin", BYTE))
self._item.append(ct_item.ControlTableItem(27, "CCW_Compliance_Margin", BYTE))
self._item.append(ct_item.ControlTableItem(28, "CW_Compliance_Slope", BYTE))
self._item.append(ct_item.ControlTableItem(29, "CCW_Compliance_Slope", BYTE))
self._item.append(ct_item.ControlTableItem(30, "Goal_Position", WORD))
self._item.append(ct_item.ControlTableItem(32, "Moving_Speed", WORD))
self._item.append(ct_item.ControlTableItem(34, "Torque_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "Present_Position", WORD))
self._item.append(ct_item.ControlTableItem(38, "Present_Speed", WORD))
self._item.append(ct_item.ControlTableItem(40, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(42, "Present_Voltage", BYTE))
self._item.append(ct_item.ControlTableItem(43, "Present_Temperature", BYTE))
self._item.append(ct_item.ControlTableItem(44, "Registered", BYTE))
self._item.append(ct_item.ControlTableItem(46, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(47, "Lock", BYTE))
self._item.append(ct_item.ControlTableItem(48, "Punch", WORD))
def setEXItem(self):
self._item = []
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(2, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(3, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(4, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(5, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(6, "CW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(8, "CCW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(10, "Drive_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Min_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Max_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(14, "Max_Torque", WORD))
self._item.append(ct_item.ControlTableItem(16, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(17, "Alarm_LED", BYTE))
self._item.append(ct_item.ControlTableItem(18, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(24, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(25, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(26, "CW_Compliance_Margin", BYTE))
self._item.append(ct_item.ControlTableItem(27, "CCW_Compliance_Margin", BYTE))
self._item.append(ct_item.ControlTableItem(28, "CW_Compliance_Slope", BYTE))
self._item.append(ct_item.ControlTableItem(29, "CCW_Compliance_Slope", BYTE))
self._item.append(ct_item.ControlTableItem(30, "Goal_Position", WORD))
self._item.append(ct_item.ControlTableItem(32, "Moving_Speed", WORD))
self._item.append(ct_item.ControlTableItem(34, "Torque_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "Present_Position", WORD))
self._item.append(ct_item.ControlTableItem(38, "Present_Speed", WORD))
self._item.append(ct_item.ControlTableItem(40, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(42, "Present_Voltage", BYTE))
self._item.append(ct_item.ControlTableItem(43, "Present_Temperature", BYTE))
self._item.append(ct_item.ControlTableItem(44, "Registered", BYTE))
self._item.append(ct_item.ControlTableItem(46, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(47, "Lock", BYTE))
self._item.append(ct_item.ControlTableItem(48, "Punch", WORD))
self._item.append(ct_item.ControlTableItem(56, "Sensored_Current", WORD))
def setMXItem(self):
self._item = []
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(2, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(3, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(4, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(5, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(6, "CW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(8, "CCW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(11, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Min_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Max_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(14, "Max_Torque", WORD))
self._item.append(ct_item.ControlTableItem(16, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(17, "Alarm_LED", BYTE))
self._item.append(ct_item.ControlTableItem(18, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Multi_Turn_Offset", WORD))
self._item.append(ct_item.ControlTableItem(22, "Resolution_Divider", BYTE))
self._item.append(ct_item.ControlTableItem(24, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(25, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(26, "D_gain", BYTE))
self._item.append(ct_item.ControlTableItem(27, "I_gain", BYTE))
self._item.append(ct_item.ControlTableItem(28, "P_gain", BYTE))
self._item.append(ct_item.ControlTableItem(30, "Goal_Position", WORD))
self._item.append(ct_item.ControlTableItem(32, "Moving_Speed", WORD))
self._item.append(ct_item.ControlTableItem(34, "Torque_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "Present_Position", WORD))
self._item.append(ct_item.ControlTableItem(38, "Present_Speed", WORD))
self._item.append(ct_item.ControlTableItem(40, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(42, "Present_Voltage", BYTE))
self._item.append(ct_item.ControlTableItem(43, "Present_Temperature", BYTE))
self._item.append(ct_item.ControlTableItem(44, "Registered", BYTE))
self._item.append(ct_item.ControlTableItem(46, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(47, "Lock", BYTE))
self._item.append(ct_item.ControlTableItem(48, "Punch", WORD))
self._item.append(ct_item.ControlTableItem(73, "Goal_Acceleration", BYTE))
def setMX2Item(self):
self._item = []
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(6, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(7, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(8, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(9, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(10, "Drive_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Operating_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Secondary_ID", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Protocol_Version", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Homing_Offset", DWORD))
self._item.append(ct_item.ControlTableItem(24, "Moving_Threshold", DWORD))
self._item.append(ct_item.ControlTableItem(31, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(32, "Max_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(34, "Min_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "PWM_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Acceleration_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(44, "Velocity_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(48, "Max_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(52, "Min_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(63, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(64, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(65, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(68, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(69, "Registered_Instruction", BYTE))
self._item.append(ct_item.ControlTableItem(70, "Hardware_Error_Status", BYTE))
self._item.append(ct_item.ControlTableItem(76, "Velocity_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(78, "Velocity_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(80, "Position_D_Gain", WORD))
self._item.append(ct_item.ControlTableItem(82, "Position_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(84, "Position_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(88, "Feedforward_2nd_Gain", WORD))
self._item.append(ct_item.ControlTableItem(90, "Feedforward_1st_Gain", WORD))
self._item.append(ct_item.ControlTableItem(98, "Bus_Watchdog", BYTE))
self._item.append(ct_item.ControlTableItem(100, "Goal_PWM", WORD))
self._item.append(ct_item.ControlTableItem(104, "Goal_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(108, "Profile_Acceleration", DWORD))
self._item.append(ct_item.ControlTableItem(112, "Profile_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(116, "Goal_Position", DWORD))
self._item.append(ct_item.ControlTableItem(120, "Realtime_Tick", WORD))
self._item.append(ct_item.ControlTableItem(122, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(123, "Moving_Status", BYTE))
self._item.append(ct_item.ControlTableItem(124, "Present_PWM", WORD))
self._item.append(ct_item.ControlTableItem(126, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(128, "Present_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(132, "Present_Position", DWORD))
self._item.append(ct_item.ControlTableItem(136, "Velocity_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(140, "Position_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(144, "Present_Input_Voltage", WORD))
self._item.append(ct_item.ControlTableItem(146, "Present_Temperature", BYTE))
def setExtMXItem(self):
self._item = []
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(2, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(3, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(4, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(5, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(6, "CW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(8, "CCW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(11, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Min_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Max_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(14, "Max_Torque", WORD))
self._item.append(ct_item.ControlTableItem(16, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(17, "Alarm_LED", BYTE))
self._item.append(ct_item.ControlTableItem(18, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Multi_Turn_Offset", WORD))
self._item.append(ct_item.ControlTableItem(22, "Resolution_Divider", BYTE))
self._item.append(ct_item.ControlTableItem(24, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(25, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(26, "D_gain", BYTE))
self._item.append(ct_item.ControlTableItem(27, "I_gain", BYTE))
self._item.append(ct_item.ControlTableItem(28, "P_gain", BYTE))
self._item.append(ct_item.ControlTableItem(30, "Goal_Position", WORD))
self._item.append(ct_item.ControlTableItem(32, "Moving_Speed", WORD))
self._item.append(ct_item.ControlTableItem(34, "Torque_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "Present_Position", WORD))
self._item.append(ct_item.ControlTableItem(38, "Present_Speed", WORD))
self._item.append(ct_item.ControlTableItem(40, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(42, "Present_Voltage", BYTE))
self._item.append(ct_item.ControlTableItem(43, "Present_Temperature", BYTE))
self._item.append(ct_item.ControlTableItem(44, "Registered", BYTE))
self._item.append(ct_item.ControlTableItem(46, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(47, "Lock", BYTE))
self._item.append(ct_item.ControlTableItem(48, "Punch", WORD))
self._item.append(ct_item.ControlTableItem(68, "Current", WORD))
self._item.append(ct_item.ControlTableItem(70, "Torque_Control_Mode_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(71, "Goal_Torque", WORD))
self._item.append(ct_item.ControlTableItem(73, "Goal_Acceleration", BYTE))
def setExtMX2Item(self):
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(6, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(7, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(8, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(9, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(10, "Drive_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Operating_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Secondary_ID", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Protocol_Version", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Homing_Offset", DWORD))
self._item.append(ct_item.ControlTableItem(24, "Moving_Threshold", DWORD))
self._item.append(ct_item.ControlTableItem(31, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(32, "Max_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(34, "Min_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "PWM_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Current_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Acceleration_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(44, "Velocity_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(48, "Max_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(52, "Min_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(63, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(64, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(65, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(68, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(69, "Registered_Instruction", BYTE))
self._item.append(ct_item.ControlTableItem(70, "Hardware_Error_Status", BYTE))
self._item.append(ct_item.ControlTableItem(76, "Velocity_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(78, "Velocity_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(80, "Position_D_Gain", WORD))
self._item.append(ct_item.ControlTableItem(82, "Position_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(84, "Position_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(88, "Feedforward_2nd_Gain", WORD))
self._item.append(ct_item.ControlTableItem(90, "Feedforward_1st_Gain", WORD))
self._item.append(ct_item.ControlTableItem(98, "Bus_Watchdog", BYTE))
self._item.append(ct_item.ControlTableItem(100, "Goal_PWM", WORD))
self._item.append(ct_item.ControlTableItem(102, "Goal_Current", WORD))
self._item.append(ct_item.ControlTableItem(104, "Goal_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(108, "Profile_Acceleration", DWORD))
self._item.append(ct_item.ControlTableItem(112, "Profile_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(116, "Goal_Position", DWORD))
self._item.append(ct_item.ControlTableItem(120, "Realtime_Tick", WORD))
self._item.append(ct_item.ControlTableItem(122, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(123, "Moving_Status", BYTE))
self._item.append(ct_item.ControlTableItem(124, "Present_PWM", WORD))
self._item.append(ct_item.ControlTableItem(126, "Present_Current", WORD))
self._item.append(ct_item.ControlTableItem(128, "Present_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(132, "Present_Position", DWORD))
self._item.append(ct_item.ControlTableItem(136, "Velocity_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(140, "Position_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(144, "Present_Input Voltage", WORD))
self._item.append(ct_item.ControlTableItem(146, "Present_Temperature", BYTE))
def setXL320Item(self):
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(2, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(3, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(4, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(5, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(6, "CW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(8, "CCW_Angle_Limit", WORD))
self._item.append(ct_item.ControlTableItem(11, "Control_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Min_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(14, "Max_Voltage_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(15, "Max_Torque", WORD))
self._item.append(ct_item.ControlTableItem(17, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(18, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(24, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(25, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(27, "D_gain", BYTE))
self._item.append(ct_item.ControlTableItem(28, "I_gain", BYTE))
self._item.append(ct_item.ControlTableItem(29, "P_gain", BYTE))
self._item.append(ct_item.ControlTableItem(30, "Goal_Position", WORD))
self._item.append(ct_item.ControlTableItem(32, "Moving_Speed", WORD))
self._item.append(ct_item.ControlTableItem(34, "Torque_Limit", WORD))
self._item.append(ct_item.ControlTableItem(37, "Present_Position", WORD))
self._item.append(ct_item.ControlTableItem(39, "Present_Speed", WORD))
self._item.append(ct_item.ControlTableItem(41, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(45, "Present_Voltage", BYTE))
self._item.append(ct_item.ControlTableItem(46, "Present_Temperature", BYTE))
self._item.append(ct_item.ControlTableItem(47, "Registered", BYTE))
self._item.append(ct_item.ControlTableItem(49, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(50, "Hardware_Error_Status", BYTE))
self._item.append(ct_item.ControlTableItem(51, "Punch", WORD))
def setXLItem(self):
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(6, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(7, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(8, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(9, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(10, "Drive_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Operating_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Secondary_ID", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Protocol_Version", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Homing_Offset", DWORD))
self._item.append(ct_item.ControlTableItem(24, "Moving_Threshold", DWORD))
self._item.append(ct_item.ControlTableItem(31, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(32, "Max_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(34, "Min_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "PWM_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Acceleration_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(44, "Velocity_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(48, "Max_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(52, "Min_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(63, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(64, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(65, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(68, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(69, "Registered_Instruction", BYTE))
self._item.append(ct_item.ControlTableItem(70, "Hardware_Error_Status", BYTE))
self._item.append(ct_item.ControlTableItem(76, "Velocity_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(78, "Velocity_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(80, "Position_D_Gain", WORD))
self._item.append(ct_item.ControlTableItem(82, "Position_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(84, "Position_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(88, "Feedforward_2nd_Gain", WORD))
self._item.append(ct_item.ControlTableItem(90, "Feedforward_1st_Gain", WORD))
self._item.append(ct_item.ControlTableItem(98, "Bus_Watchdog", BYTE))
self._item.append(ct_item.ControlTableItem(100, "Goal_PWM", WORD))
self._item.append(ct_item.ControlTableItem(104, "Goal_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(108, "Profile_Acceleration", DWORD))
self._item.append(ct_item.ControlTableItem(112, "Profile_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(116, "Goal_Position", DWORD))
self._item.append(ct_item.ControlTableItem(120, "Realtime_Tick", WORD))
self._item.append(ct_item.ControlTableItem(122, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(123, "Moving_Status", BYTE))
self._item.append(ct_item.ControlTableItem(124, "Present_PWM", WORD))
self._item.append(ct_item.ControlTableItem(126, "Present_Load", WORD))
self._item.append(ct_item.ControlTableItem(128, "Present_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(132, "Present_Position", DWORD))
self._item.append(ct_item.ControlTableItem(136, "Velocity_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(140, "Position_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(144, "Present_Input_Voltage", WORD))
self._item.append(ct_item.ControlTableItem(146, "Present_Temperature", BYTE))
def setXMItem(self):
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(6, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(7, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(8, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(9, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(10, "Drive_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Operating_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Secondary_ID", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Protocol_Version", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Homing_Offset", DWORD))
self._item.append(ct_item.ControlTableItem(24, "Moving_Threshold", DWORD))
self._item.append(ct_item.ControlTableItem(31, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(32, "Max_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(34, "Min_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "PWM_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Current_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Acceleration_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(44, "Velocity_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(48, "Max_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(52, "Min_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(63, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(64, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(65, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(68, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(69, "Registered_Instruction", BYTE))
self._item.append(ct_item.ControlTableItem(70, "Hardware_Error_Status", BYTE))
self._item.append(ct_item.ControlTableItem(76, "Velocity_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(78, "Velocity_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(80, "Position_D_Gain", WORD))
self._item.append(ct_item.ControlTableItem(82, "Position_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(84, "Position_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(88, "Feedforward_2nd_Gain", WORD))
self._item.append(ct_item.ControlTableItem(90, "Feedforward_1st_Gain", WORD))
self._item.append(ct_item.ControlTableItem(98, "Bus_Watchdog", BYTE))
self._item.append(ct_item.ControlTableItem(100, "Goal_PWM", WORD))
self._item.append(ct_item.ControlTableItem(102, "Goal_Current", WORD))
self._item.append(ct_item.ControlTableItem(104, "Goal_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(108, "Profile_Acceleration", DWORD))
self._item.append(ct_item.ControlTableItem(112, "Profile_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(116, "Goal_Position", DWORD))
self._item.append(ct_item.ControlTableItem(120, "Realtime_Tick", WORD))
self._item.append(ct_item.ControlTableItem(122, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(123, "Moving_Status", BYTE))
self._item.append(ct_item.ControlTableItem(124, "Present_PWM", WORD))
self._item.append(ct_item.ControlTableItem(126, "Present_Current", WORD))
self._item.append(ct_item.ControlTableItem(128, "Present_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(132, "Present_Position", DWORD))
self._item.append(ct_item.ControlTableItem(136, "Velocity_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(140, "Position_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(144, "Present_Input_Voltage", WORD))
self._item.append(ct_item.ControlTableItem(146, "Present_Temperature", BYTE))
def setExtXMItem(self):
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(6, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(7, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(8, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(9, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(10, "Drive_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Operating_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Secondary_ID", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Protocol_Version", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Homing_Offset", DWORD))
self._item.append(ct_item.ControlTableItem(24, "Moving_Threshold", DWORD))
self._item.append(ct_item.ControlTableItem(31, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(32, "Max Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(34, "Min Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "PWM_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Current_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Acceleration_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(44, "Velocity_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(48, "Max_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(52, "Min_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(56, "External_Port_Mode_1", BYTE))
self._item.append(ct_item.ControlTableItem(57, "External_Port_Mode_2", BYTE))
self._item.append(ct_item.ControlTableItem(58, "External_Port_Mode_3", BYTE))
self._item.append(ct_item.ControlTableItem(63, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(64, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(65, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(68, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(69, "Registered_Instruction", BYTE))
self._item.append(ct_item.ControlTableItem(70, "Hardware_Error_Status", BYTE))
self._item.append(ct_item.ControlTableItem(76, "Velocity_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(78, "Velocity_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(80, "Position_D_Gain", WORD))
self._item.append(ct_item.ControlTableItem(82, "Position_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(84, "Position_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(88, "Feedforward_2nd_Gain", WORD))
self._item.append(ct_item.ControlTableItem(90, "Feedforward_1st_Gain", WORD))
self._item.append(ct_item.ControlTableItem(98, "Bus_Watchdog", BYTE))
self._item.append(ct_item.ControlTableItem(100, "Goal_PWM", WORD))
self._item.append(ct_item.ControlTableItem(102, "Goal_Current", WORD))
self._item.append(ct_item.ControlTableItem(104, "Goal_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(108, "Profile_Acceleration", DWORD))
self._item.append(ct_item.ControlTableItem(112, "Profile_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(116, "Goal_Position", DWORD))
self._item.append(ct_item.ControlTableItem(120, "Realtime_Tick", WORD))
self._item.append(ct_item.ControlTableItem(122, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(123, "Moving_Status", BYTE))
self._item.append(ct_item.ControlTableItem(124, "Present_PWM", WORD))
self._item.append(ct_item.ControlTableItem(126, "Present_Current", WORD))
self._item.append(ct_item.ControlTableItem(128, "Present_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(132, "Present_Position", DWORD))
self._item.append(ct_item.ControlTableItem(136, "Velocity_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(140, "Position_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(144, "Present_Input_Voltage", WORD))
self._item.append(ct_item.ControlTableItem(146, "Present_Temperature", BYTE))
def setXHItem(self):
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(6, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(7, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(8, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(9, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(10, "Drive_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Operating_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(12, "Secondary_ID", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Protocol_Version", BYTE))
self._item.append(ct_item.ControlTableItem(20, "Homing_Offset", DWORD))
self._item.append(ct_item.ControlTableItem(24, "Moving_Threshold", DWORD))
self._item.append(ct_item.ControlTableItem(31, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(32, "Max_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(34, "Min_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(36, "PWM_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Current_Limit", WORD))
self._item.append(ct_item.ControlTableItem(40, "Acceleration_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(44, "Velocity_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(48, "Max_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(52, "Min_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(63, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(64, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(65, "LED", BYTE))
self._item.append(ct_item.ControlTableItem(68, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(69, "Registered_Instruction", BYTE))
self._item.append(ct_item.ControlTableItem(70, "Hardware_Error_Status", BYTE))
self._item.append(ct_item.ControlTableItem(76, "Velocity_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(78, "Velocity_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(80, "Position_D_Gain", WORD))
self._item.append(ct_item.ControlTableItem(82, "Position_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(84, "Position_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(88, "Feedforward_2nd_Gain", WORD))
self._item.append(ct_item.ControlTableItem(90, "Feedforward_1st_Gain", WORD))
self._item.append(ct_item.ControlTableItem(98, "Bus_Watchdog", BYTE))
self._item.append(ct_item.ControlTableItem(100, "Goal_PWM", WORD))
self._item.append(ct_item.ControlTableItem(102, "Goal_Current", WORD))
self._item.append(ct_item.ControlTableItem(104, "Goal_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(108, "Profile_Acceleration", DWORD))
self._item.append(ct_item.ControlTableItem(112, "Profile_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(116, "Goal_Position", DWORD))
self._item.append(ct_item.ControlTableItem(120, "Realtime_Tick", WORD))
self._item.append(ct_item.ControlTableItem(122, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(123, "Moving_Status", BYTE))
self._item.append(ct_item.ControlTableItem(124, "Present_PWM", WORD))
self._item.append(ct_item.ControlTableItem(126, "Present_Current", WORD))
self._item.append(ct_item.ControlTableItem(128, "Present_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(132, "Present_Position", DWORD))
self._item.append(ct_item.ControlTableItem(136, "Velocity_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(140, "Position_Trajectory", DWORD))
self._item.append(ct_item.ControlTableItem(144, "Present_Input_Voltage", WORD))
self._item.append(ct_item.ControlTableItem(146, "Present_Temperature", BYTE))
def setPROItem(self):
self._item.append(ct_item.ControlTableItem(0, "Model_Number", WORD))
self._item.append(ct_item.ControlTableItem(6, "Firmware_Version", BYTE))
self._item.append(ct_item.ControlTableItem(7, "ID", BYTE))
self._item.append(ct_item.ControlTableItem(8, "Baud_Rate", BYTE))
self._item.append(ct_item.ControlTableItem(9, "Return_Delay_Time", BYTE))
self._item.append(ct_item.ControlTableItem(11, "Operating_Mode", BYTE))
self._item.append(ct_item.ControlTableItem(13, "Homing_Offset", DWORD))
self._item.append(ct_item.ControlTableItem(17, "Moving_Threshold", DWORD))
self._item.append(ct_item.ControlTableItem(21, "Temperature_Limit", BYTE))
self._item.append(ct_item.ControlTableItem(22, "Max_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(24, "Min_Voltage_Limit", WORD))
self._item.append(ct_item.ControlTableItem(26, "Acceleration_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(30, "Torque_Limit", WORD))
self._item.append(ct_item.ControlTableItem(32, "Velocity_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(36, "Max_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(40, "Min_Position_Limit", DWORD))
self._item.append(ct_item.ControlTableItem(44, "External_Port_Mode_1", BYTE))
self._item.append(ct_item.ControlTableItem(45, "External_Port_Mode_2", BYTE))
self._item.append(ct_item.ControlTableItem(46, "External_Port_Mode_3", BYTE))
self._item.append(ct_item.ControlTableItem(47, "External_Port_Mode_4", BYTE))
self._item.append(ct_item.ControlTableItem(48, "Shutdown", BYTE))
self._item.append(ct_item.ControlTableItem(562, "Torque_Enable", BYTE))
self._item.append(ct_item.ControlTableItem(563, "LED_RED", BYTE))
self._item.append(ct_item.ControlTableItem(564, "LED_GREEN", BYTE))
self._item.append(ct_item.ControlTableItem(565, "LED_BLUE", BYTE))
self._item.append(ct_item.ControlTableItem(586, "Velocity_I_Gain", WORD))
self._item.append(ct_item.ControlTableItem(588, "Velocity_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(594, "Position_P_Gain", WORD))
self._item.append(ct_item.ControlTableItem(596, "Goal_Position", DWORD))
self._item.append(ct_item.ControlTableItem(600, "Goal_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(604, "Goal_Torque", WORD))
self._item.append(ct_item.ControlTableItem(606, "Goal_Acceleration", DWORD))
self._item.append(ct_item.ControlTableItem(610, "Moving", BYTE))
self._item.append(ct_item.ControlTableItem(611, "Present_Position", DWORD))
self._item.append(ct_item.ControlTableItem(615, "Present_Velocity", DWORD))
self._item.append(ct_item.ControlTableItem(621, "Present_Current", WORD))
self._item.append(ct_item.ControlTableItem(623, "Present_Input_Voltage", WORD))
self._item.append(ct_item.ControlTableItem(625, "Present_Temperature", BYTE))
self._item.append(ct_item.ControlTableItem(626, "External_Port_Mode_1", WORD))
self._item.append(ct_item.ControlTableItem(628, "External_Port_Mode_2", WORD))
self._item.append(ct_item.ControlTableItem(630, "External_Port_Mode_3", WORD))
self._item.append(ct_item.ControlTableItem(632, "External_Port_Mode_4", WORD))
self._item.append(ct_item.ControlTableItem(890, "Registered_Instruction", BYTE))
self._item.append(ct_item.ControlTableItem(891, "Status_Return_Level", BYTE))
self._item.append(ct_item.ControlTableItem(892, "Hardware_Error_Status", BYTE))
def getControlTableItem(self, num):
if num in [AX_12A, AX_12W, AX_18A]:
self.setAXItem()
elif num in [RX_10, RX_24F, RX_28, RX_64]:
self.setRXItem()
elif num == EX_106:
self.setEXItem()
elif num in [MX_12W, MX_28]:
self.setMXItem()
elif num in [MX_64, MX_106]:
self.setExtMXItem()
elif num == MX_28_2:
self.setMX2Item()
elif num in [MX_64_2, MX_106_2]:
self.setExtMX2Item()
elif num == XL_320:
self.setXL320Item()
elif num == XL430_W250:
self.setXLItem()
elif num in [XM430_W210, XM430_W350]:
self.setXMItem()
elif num in [XM540_W150, XM540_W270]:
self.setExtXMItem()
elif num in [XH430_V210, XH430_V350, XH430_W210, XH430_W350]:
self.setXHItem()
elif num in [PRO_L42_10_S300_R, PRO_L54_30_S400_R, PRO_L54_30_S500_R,
PRO_L54_50_S290_R, PRO_L54_50_S500_R, PRO_M42_10_S260_R,
PRO_M54_40_S250_R, PRO_M54_60_S250_R, PRO_H42_20_S300_R,
PRO_H54_100_S500_R,PRO_H54_200_S500_R]:
self.setPROItem()
else:
self.setXMItem()
return self._item
def getTheNumberOfControlItem(self):
return len(self._item)
| 62.690307
| 91
| 0.713383
| 6,904
| 53,036
| 5.149334
| 0.051854
| 0.125791
| 0.21659
| 0.247532
| 0.952744
| 0.94574
| 0.939974
| 0.930466
| 0.91342
| 0.875503
| 0
| 0.05062
| 0.160419
| 53,036
| 845
| 92
| 62.764497
| 0.747777
| 0.005129
| 0
| 0.774275
| 0
| 0
| 0.144755
| 0.010179
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039092
| false
| 0
| 0.001261
| 0.001261
| 0.046658
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
7b41895e0a78d41b2d25a506d935af11849ed42c
| 4,355
|
py
|
Python
|
app/modules/fhir.py
|
NTUT-108-SE/CMS-Backend
|
1daa37960ba61f935da5174516f0a2e411b68706
|
[
"MIT"
] | null | null | null |
app/modules/fhir.py
|
NTUT-108-SE/CMS-Backend
|
1daa37960ba61f935da5174516f0a2e411b68706
|
[
"MIT"
] | null | null | null |
app/modules/fhir.py
|
NTUT-108-SE/CMS-Backend
|
1daa37960ba61f935da5174516f0a2e411b68706
|
[
"MIT"
] | 1
|
2019-10-12T02:48:16.000Z
|
2019-10-12T02:48:16.000Z
|
import requests
class FHIR:
def get_all(self):
# Todo GET
# Get all data from FHIR
pass
def get_id(self, id):
# Todo GET
# Get this id data from FHIR
pass
def query_patient(self, patient_id):
# Todo GET
# Query data by patient_id
pass
def create(self, data):
# Todo POST
pass
def delete(self, id):
# Todo DELETE
# Delete this id resource from FHIR
pass
def update(self, id, data):
# Todo PUT
# Update the data of this ID resource
pass
class ConditionFHIR(FHIR):
def __init__(self, uri="http://localhost:8080/hapi-fhir-jpaserver/fhir/"):
self.uri = uri + "Condition"
def get_all(self, offset=0, count=20):
res = requests.get(self.uri + "?_getpagesoffset={}&_count={}".format(offset, count))
return res.json(), res.status_code
def get_id(self, id):
res = requests.get(self.uri + "/{}".format(id))
return res.json(), res.status_code
def query_patient(self, patient_id, offset=0, count=20):
res = requests.get(
self.uri + "?patient={}&_getpagesoffset={}&_count={}".format(patient_id, offset, count)
)
return res.json(), res.status_code
def create(self, data):
res = requests.post(self.uri, json=data)
return res.json(), res.status_code
def delete(self, id):
res = requests.delete(self.uri + "/{}".format(id))
return res.json(), res.status_code
def update(self, id, data):
res = requests.put(self.uri + "/{}".format(id), json=data)
return res.json(), res.status_code
class PatientFHIR(FHIR):
def __init__(self, uri="http://localhost:8080/hapi-fhir-jpaserver/fhir/"):
self.uri = uri + "Patient"
def get_all(self, offset=0, count=20):
res = requests.get(self.uri + "?_getpagesoffset={}&_count={}".format(offset, count))
return res.json(), res.status_code
def get_id(self, id):
res = requests.get(self.uri + "/{}".format(id))
return res.json(), res.status_code
def query_patient(self, identifier):
res = requests.get(self.uri + "?identifier={}".format(identifier))
return res.json(), res.status_code
def create(self, data):
res = requests.post(self.uri, json=data)
return res.json(), res.status_code
def delete(self, id):
res = requests.delete(self.uri + "/{}".format(id))
return res.json(), res.status_code
def update(self, id, data):
res = requests.put(self.uri + "/{}".format(id), json=data)
return res.json(), res.status_code
class InvoiceFHIR(FHIR):
def __init__(self, uri="http://localhost:8080/hapi-fhir-jpaserver/fhir/"):
self.uri = uri + "Invoice"
def get_all(self, offset=0, count=20):
res = requests.get(self.uri + "?_getpagesoffset={}&_count={}".format(offset, count))
return res.json(), res.status_code
def get_id(self, id):
res = requests.get(self.uri + "/{}".format(id))
return res.json(), res.status_code
def query_patient(self, patient_id, offset=0, count=20):
res = requests.get(
self.uri + "?patient={}&_getpagesoffset={}&_count={}".format(patient_id, offset, count)
)
return res.json(), res.status_code
def create(self, data):
res = requests.post(self.uri, json=data)
return res.json(), res.status_code
class MedicationFHIR(FHIR):
def __init__(self, uri="http://localhost:8080/hapi-fhir-jpaserver/fhir/"):
self.uri = uri + "MedicationKnowledge"
def get_all(self, offset=0, count=20):
res = requests.get(self.uri + "?_getpagesoffset={}&_count={}".format(offset, count))
return res.json(), res.status_code
def get_id(self, id):
res = requests.get(self.uri + "/{}".format(id))
return res.json(), res.status_code
def create(self, data):
res = requests.post(self.uri, json=data)
return res.json(), res.status_code
def delete(self, id):
res = requests.delete(self.uri + "/{}".format(id))
return res.json(), res.status_code
def update(self, id, data):
res = requests.put(self.uri + "/{}".format(id), json=data)
return res.json(), res.status_code
| 30.886525
| 99
| 0.600689
| 575
| 4,355
| 4.429565
| 0.088696
| 0.079702
| 0.107185
| 0.13192
| 0.861013
| 0.827248
| 0.816254
| 0.816254
| 0.816254
| 0.814291
| 0
| 0.010369
| 0.247072
| 4,355
| 140
| 100
| 31.107143
| 0.766392
| 0.046383
| 0
| 0.827957
| 0
| 0
| 0.113499
| 0.047332
| 0
| 0
| 0
| 0.007143
| 0
| 1
| 0.333333
| false
| 0.064516
| 0.010753
| 0
| 0.623656
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 10
|
7b4be9a3fa1bda8766f1371a20a6a6d001f5968d
| 13,953
|
py
|
Python
|
src/networks/wav2vec2_components/multi_branch_encoder.py
|
nikvaessen/w2v2-speaker-few-samples
|
98139c2234a63532ebc8dffeacb284f3d7403ca2
|
[
"MIT"
] | null | null | null |
src/networks/wav2vec2_components/multi_branch_encoder.py
|
nikvaessen/w2v2-speaker-few-samples
|
98139c2234a63532ebc8dffeacb284f3d7403ca2
|
[
"MIT"
] | null | null | null |
src/networks/wav2vec2_components/multi_branch_encoder.py
|
nikvaessen/w2v2-speaker-few-samples
|
98139c2234a63532ebc8dffeacb284f3d7403ca2
|
[
"MIT"
] | null | null | null |
########################################################################################
#
# Implement a multi-branch encoder for multi-task learning.
#
# Author(s): Nik Vaessen
########################################################################################
import copy
from typing import Optional
import torch as t
import torch.nn as nn
import numpy as np
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.wav2vec2 import configuration_wav2vec2
from src.networks.wav2vec2_components.base_components import (
Wav2vec2Encoder,
Wav2Vec2EncoderStableLayerNorm,
)
########################################################################################
# implementation of multi-branch encoder
class Wav2vec2MultiBranchEncoder(nn.Module):
def __init__(
self,
cfg: configuration_wav2vec2.Wav2Vec2Config,
branch_idx: int,
enable_gradient_checkpointing: bool,
pretrained_weights: Optional[str] = None,
):
super().__init__()
base_encoder = Wav2vec2Encoder(
cfg,
enable_gradient_checkpointing=enable_gradient_checkpointing,
pretrained_weights=pretrained_weights,
)
self.config = base_encoder.encoder.config
self.pos_conv_embed = base_encoder.encoder.pos_conv_embed
self.layer_norm = base_encoder.encoder.layer_norm
self.dropout = base_encoder.encoder.dropout
self.gradient_checkpointing = base_encoder.encoder.gradient_checkpointing
if branch_idx == len(base_encoder.encoder.layers):
raise ValueError(
f"{branch_idx=} should be < {len(base_encoder.encoder.layers)}"
)
self.shared_layers = base_encoder.encoder.layers[0:branch_idx]
self.branch1 = copy.deepcopy(base_encoder.encoder.layers)[branch_idx:]
self.branch2 = copy.deepcopy(self.branch1)
del base_encoder
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
if not return_dict:
raise ValueError("only return_dict=True is supported")
shared_output = self._inner_forward(
hidden_states,
self.shared_layers,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
add_pos_embedding=True,
)
branch1_input = shared_output.last_hidden_state
branch2_input = t.clone(branch1_input)
branch1_output = self._inner_forward(
branch1_input,
self.branch1,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
add_pos_embedding=False,
)
branch2_output = self._inner_forward(
branch2_input,
self.branch2,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
add_pos_embedding=False,
)
if output_attentions:
branch1_output.attentions = (
shared_output.attentions + branch1_output.attentions
)
branch2_output.attentions = (
shared_output.attentions + branch2_output.attentions
)
if output_hidden_states:
branch1_output.hidden_states = (
shared_output.hidden_states + branch1_output.hidden_states
)
branch2_output.hidden_states = (
shared_output.hidden_states + branch2_output.hidden_states
)
return branch1_output, branch2_output
def _inner_forward(
self,
hidden_states,
layers: nn.ModuleList,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
add_pos_embedding: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
# extend attention_mask
attention_mask = (
1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0],
1,
attention_mask.shape[-1],
attention_mask.shape[-1],
)
if add_pos_embedding:
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = (
True
if self.training and (dropout_probability < self.config.layerdrop)
else False
)
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = t.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class Wav2vec2MultiBranchEncoderStableLayerNorm(nn.Module):
def __init__(
self,
cfg: configuration_wav2vec2.Wav2Vec2Config,
branch_idx: int,
enable_gradient_checkpointing: bool,
pretrained_weights: Optional[str] = None,
):
super().__init__()
base_encoder = Wav2Vec2EncoderStableLayerNorm(
cfg,
enable_gradient_checkpointing=enable_gradient_checkpointing,
pretrained_weights=pretrained_weights,
)
self.config = base_encoder.encoder.config
self.pos_conv_embed = base_encoder.encoder.pos_conv_embed
self.layer_norm = base_encoder.encoder.layer_norm
self.dropout = base_encoder.encoder.dropout
self.gradient_checkpointing = base_encoder.encoder.gradient_checkpointing
if branch_idx == len(base_encoder.encoder.layers):
raise ValueError(
f"{branch_idx=} should be < {len(base_encoder.encoder.layers)}"
)
self.shared_layers = base_encoder.encoder.layers[0:branch_idx]
self.branch1 = copy.deepcopy(base_encoder.encoder.layers)[branch_idx:]
self.branch2 = copy.deepcopy(self.branch1)
del base_encoder
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
if not return_dict:
raise ValueError("only return_dict=True is supported")
shared_output = self._inner_forward(
hidden_states,
self.shared_layers,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
add_pos_embedding=True,
)
branch1_output = self._inner_forward(
shared_output.hidden_states,
self.branch1,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
add_pos_embedding=False,
)
branch2_output = self._inner_forward(
shared_output.hidden_states,
self.branch2,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
add_pos_embedding=False,
)
if output_attentions:
branch1_output.attentions = (
shared_output.attentions + branch1_output.attentions
)
branch2_output.attentions = (
shared_output.attentions + branch2_output.attentions
)
if output_hidden_states:
branch1_output.hidden_states = (
shared_output.hidden_states + branch1_output.hidden_states
)
branch2_output.hidden_states = (
shared_output.hidden_states + branch2_output.hidden_states
)
return branch1_output, branch2_output
def _inner_forward(
self,
hidden_states,
layers: nn.ModuleList,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
add_pos_embedding: bool = True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (
1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0],
1,
attention_mask.shape[-1],
attention_mask.shape[-1],
)
if add_pos_embedding:
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = (
True
if self.training and (dropout_probability < self.config.layerdrop)
else False
)
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = t.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
| 34.708955
| 126
| 0.593134
| 1,372
| 13,953
| 5.680029
| 0.120262
| 0.141666
| 0.087771
| 0.040036
| 0.902605
| 0.892339
| 0.892339
| 0.884897
| 0.884897
| 0.875914
| 0
| 0.013898
| 0.329607
| 13,953
| 401
| 127
| 34.795511
| 0.819222
| 0.047373
| 0
| 0.828025
| 0
| 0
| 0.014449
| 0.005226
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031847
| false
| 0
| 0.028662
| 0.006369
| 0.098726
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7b573bfac3397fa32ef6ebc7740446e7ee8b70d6
| 256,879
|
py
|
Python
|
tests/test_example.py
|
cariad/ansiscape
|
07118b68729fcdc2198fc69c35bb1e9ef1bb5b80
|
[
"MIT"
] | null | null | null |
tests/test_example.py
|
cariad/ansiscape
|
07118b68729fcdc2198fc69c35bb1e9ef1bb5b80
|
[
"MIT"
] | 30
|
2021-09-05T13:56:21.000Z
|
2021-10-05T11:14:11.000Z
|
tests/test_example.py
|
cariad/ansiscape
|
07118b68729fcdc2198fc69c35bb1e9ef1bb5b80
|
[
"MIT"
] | null | null | null |
from ansiscape.enums import (
Blink,
Calligraphy,
Font,
Frame,
Ideogram,
MetaInterpretation,
NamedColor,
Underline,
Weight,
)
from ansiscape.example import make_example
def test_example() -> None:
example = make_example()
assert list(example.resolved) == [
{
"underline": Underline.DOUBLE,
"weight": Weight.HEAVY,
},
"ansiscape",
{
"underline": MetaInterpretation.REVERT,
"weight": MetaInterpretation.REVERT,
},
"\n\nWelcome to the ",
{"weight": Weight.HEAVY},
"ansiscape",
{"weight": MetaInterpretation.REVERT},
" example!\n\nThese are ",
{"weight": Weight.HEAVY},
"heavy",
{"weight": MetaInterpretation.REVERT},
" and ",
{"weight": Weight.LIGHT},
"light",
{"weight": MetaInterpretation.REVERT},
".\n" "\n" "These are ",
{"calligraphy": Calligraphy.ITALIC},
"italic",
{"calligraphy": MetaInterpretation.REVERT},
" and ",
{"calligraphy": Calligraphy.BLACKLETTER},
"blackletter",
{"calligraphy": MetaInterpretation.REVERT},
".\n" "\n" "These are ",
{"underline": Underline.SINGLE},
"single underlined",
{"underline": MetaInterpretation.REVERT},
", ",
{"underline": Underline.DOUBLE},
"double underlined",
{"underline": MetaInterpretation.REVERT},
" and ",
{"overline": True},
"overlined",
{"overline": MetaInterpretation.REVERT},
".\n" "\n" "These are ",
{"blink": Blink.SLOW},
"blinking slowly",
{"blink": MetaInterpretation.REVERT},
" and ",
{"blink": Blink.FAST},
"blinking fast",
{"blink": MetaInterpretation.REVERT},
".\n" "\n" "These are ",
{"invert": True},
"inverted",
{"invert": MetaInterpretation.REVERT},
", ",
{"conceal": True},
"concealed",
{"conceal": MetaInterpretation.REVERT},
" (that's ",
{"calligraphy": Calligraphy.ITALIC},
"concealed",
{"calligraphy": MetaInterpretation.REVERT},
") and ",
{"strike": True},
"struck",
{"strike": MetaInterpretation.REVERT},
".\n" "\n" "These are the ",
{"font": Font.ALT_0},
"first alternative font",
{"font": MetaInterpretation.REVERT},
", the ",
{"font": Font.ALT_1},
"second alternative font",
{"font": MetaInterpretation.REVERT},
", the ",
{"font": Font.ALT_2},
"third alternative font",
{"font": MetaInterpretation.REVERT},
", the ",
{"font": Font.ALT_3},
"fourth alternative font",
{"font": MetaInterpretation.REVERT},
", the ",
{"font": Font.ALT_4},
"fifth alternative font",
{"font": MetaInterpretation.REVERT},
", the ",
{"font": Font.ALT_5},
"sixth alternative font",
{"font": MetaInterpretation.REVERT},
", the ",
{"font": Font.ALT_6},
"seventh alternative font",
{"font": MetaInterpretation.REVERT},
", the ",
{"font": Font.ALT_7},
"eighth alternative font",
{"font": MetaInterpretation.REVERT},
" and the ",
{"font": Font.ALT_8},
"ninth alternative font",
{"font": MetaInterpretation.REVERT},
".\n" "\n",
{"proportional_spacing": True},
"This entire line uses proportional spacing.",
{"proportional_spacing": MetaInterpretation.REVERT},
"\n" "\n" "These are ",
{"foreground": NamedColor.BLACK},
"black",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.RED},
"red",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.GREEN},
"green",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.YELLOW},
"yellow",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BLUE},
"blue",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.MAGENTA},
"magenta",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.CYAN},
"cyan",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.WHITE},
"white",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BRIGHT_BLACK},
"bright black",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BRIGHT_RED},
"bright red",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BRIGHT_GREEN},
"bright green",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BRIGHT_YELLOW},
"bright yellow",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BRIGHT_BLUE},
"bright blue",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BRIGHT_MAGENTA},
"bright magenta",
{"foreground": MetaInterpretation.REVERT},
", ",
{"foreground": NamedColor.BRIGHT_CYAN},
"bright cyan",
{"foreground": MetaInterpretation.REVERT},
" and ",
{"foreground": NamedColor.BRIGHT_WHITE},
"bright white",
{"foreground": MetaInterpretation.REVERT},
" foreground.\n" "\n" "These are ",
{"background": NamedColor.BLACK},
"black",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.RED},
"red",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.GREEN},
"green",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.YELLOW},
"yellow",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BLUE},
"blue",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.MAGENTA},
"magenta",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.CYAN},
"cyan",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.WHITE},
"white",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BRIGHT_BLACK},
"bright black",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BRIGHT_RED},
"bright red",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BRIGHT_GREEN},
"bright green",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BRIGHT_YELLOW},
"bright yellow",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BRIGHT_BLUE},
"bright blue",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BRIGHT_MAGENTA},
"bright magenta",
{"background": MetaInterpretation.REVERT},
", ",
{"background": NamedColor.BRIGHT_CYAN},
"bright cyan",
{"background": MetaInterpretation.REVERT},
" and ",
{"background": NamedColor.BRIGHT_WHITE},
"bright white",
{"background": MetaInterpretation.REVERT},
" background.\n\nHere's some foreground RGB:\n\n",
{"foreground": (0.0, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.0, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.1, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.1, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.2, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.2, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.30000000000000004, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.30000000000000004,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.30000000000000004,
0.6000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.30000000000000004,
0.7000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.6000000000000001,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.6000000000000001,
0.6000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.6000000000000001,
0.7000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.7000000000000001,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.7000000000000001,
0.6000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.30000000000000004,
0.7000000000000001,
0.7000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.30000000000000004, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.4, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.4, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.5, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.5, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.6000000000000001, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.6000000000000001,
0.30000000000000004,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.6000000000000001,
0.30000000000000004,
0.6000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.6000000000000001,
0.30000000000000004,
0.7000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.6000000000000001,
0.6000000000000001,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.6000000000000001,
0.7000000000000001,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.6000000000000001, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.7000000000000001, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.7000000000000001,
0.30000000000000004,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.7000000000000001,
0.30000000000000004,
0.6000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.7000000000000001,
0.30000000000000004,
0.7000000000000001,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.7000000000000001,
0.6000000000000001,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{
"foreground": (
0.7000000000000001,
0.7000000000000001,
0.30000000000000004,
1,
)
},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.7000000000000001, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.8, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.8, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n",
{"foreground": (0.9, 0.0, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.0, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.1, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.2, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.30000000000000004, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.4, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.5, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.6000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.7000000000000001, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.8, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.0, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.1, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.2, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.30000000000000004, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.4, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.5, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.6000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.7000000000000001, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.8, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
{"foreground": (0.9, 0.9, 0.9, 1)},
"X",
{"foreground": MetaInterpretation.REVERT},
"\n\nAnd some background RGB:\n\n",
{"background": (0.0, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.0, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.1, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.1, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.2, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.2, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.30000000000000004, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.30000000000000004,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.30000000000000004,
0.6000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.30000000000000004,
0.7000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.6000000000000001,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.6000000000000001,
0.6000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.6000000000000001,
0.7000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.7000000000000001,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.7000000000000001,
0.6000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.30000000000000004,
0.7000000000000001,
0.7000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.30000000000000004, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.4, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.4, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.5, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.5, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.6000000000000001, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.6000000000000001,
0.30000000000000004,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.6000000000000001,
0.30000000000000004,
0.6000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.6000000000000001,
0.30000000000000004,
0.7000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.6000000000000001,
0.6000000000000001,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.6000000000000001,
0.7000000000000001,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.6000000000000001, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.7000000000000001, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.7000000000000001,
0.30000000000000004,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.7000000000000001,
0.30000000000000004,
0.6000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.7000000000000001,
0.30000000000000004,
0.7000000000000001,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.7000000000000001,
0.6000000000000001,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{
"background": (
0.7000000000000001,
0.7000000000000001,
0.30000000000000004,
1,
)
},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.7000000000000001, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.8, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.8, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n",
{"background": (0.9, 0.0, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.0, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.1, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.2, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.30000000000000004, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.4, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.5, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.6000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.7000000000000001, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.8, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.0, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.1, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.2, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.30000000000000004, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.4, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.5, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.6000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.7000000000000001, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.8, 1)},
" ",
{"background": MetaInterpretation.REVERT},
{"background": (0.9, 0.9, 0.9, 1)},
" ",
{"background": MetaInterpretation.REVERT},
"\n\nThese are ",
{"frame": Frame.BOX},
"framed",
{"frame": MetaInterpretation.REVERT},
" and ",
{"frame": Frame.CIRCLE},
"encircled",
{"frame": MetaInterpretation.REVERT},
".\n\nThese are the ",
{"ideogram": Ideogram.SINGLE_LINE_UNDER_OR_RIGHT},
"single line under/right",
{"ideogram": MetaInterpretation.REVERT},
", ",
{"ideogram": Ideogram.DOUBLE_LINE_UNDER_OR_RIGHT},
"double line under/right",
{"ideogram": MetaInterpretation.REVERT},
", ",
{"ideogram": Ideogram.SINGLE_LINE_OVER_OR_LEFT},
"single line over/left",
{"ideogram": MetaInterpretation.REVERT},
", ",
{"ideogram": Ideogram.DOUBLE_LINE_OVER_OR_LEFT},
"double line over/left",
{"ideogram": MetaInterpretation.REVERT},
" and ",
{"ideogram": Ideogram.STRESS},
"stress",
{"ideogram": MetaInterpretation.REVERT},
" ideograms.\n\nNot all terminals support all codes, so please don't be too sad if some of the examples didn't work for you.\n\n",
]
| 39.110688
| 138
| 0.527719
| 23,705
| 256,879
| 5.716684
| 0.0054
| 0.365719
| 0.254911
| 0.326313
| 0.986156
| 0.982031
| 0.968099
| 0.966756
| 0.965841
| 0.962624
| 0
| 0.224764
| 0.279338
| 256,879
| 6,567
| 139
| 39.116644
| 0.507257
| 0
| 0
| 0.663924
| 0
| 0.000152
| 0.173148
| 0.000082
| 0
| 0
| 0
| 0
| 0.000152
| 1
| 0.000152
| false
| 0
| 0.000305
| 0
| 0.000457
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c8847354b6aac2703cb31ae548e2b02013841f1a
| 2,731
|
py
|
Python
|
easyrequest_hay_app/migrations/0003_auto_20180118_1216.py
|
birkin/easyrequest_hay_project
|
0718b3e22485354b45eb27615aba05c56b2b833b
|
[
"MIT"
] | null | null | null |
easyrequest_hay_app/migrations/0003_auto_20180118_1216.py
|
birkin/easyrequest_hay_project
|
0718b3e22485354b45eb27615aba05c56b2b833b
|
[
"MIT"
] | 4
|
2018-07-06T17:32:23.000Z
|
2021-02-05T16:06:03.000Z
|
easyrequest_hay_app/migrations/0003_auto_20180118_1216.py
|
Brown-University-Library/easyrequest_hay_project
|
b993496ec29352409e581f84606684ebcfabe1c9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-18 17:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('easyrequest_hay_app', '0002_itemrequest'),
]
operations = [
migrations.AlterField(
model_name='itemrequest',
name='item_author',
field=models.CharField(blank=True, help_text='used by Aeon', max_length=200, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='item_barcode',
field=models.CharField(blank=True, help_text='used by Millennium', max_length=50, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='item_bib',
field=models.CharField(blank=True, help_text='used by Millennium & Aeon', max_length=50, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='item_callnumber',
field=models.CharField(blank=True, help_text='used by Millennium & Aeon', max_length=200, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='item_digital_version_url',
field=models.CharField(blank=True, help_text='used by Aeon', max_length=200, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='item_id',
field=models.CharField(blank=True, help_text='used by Millennium', max_length=50, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='item_publish_info',
field=models.CharField(blank=True, help_text='used by Aeon', max_length=200, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='item_title',
field=models.CharField(blank=True, help_text='used by Millennium & Aeon', max_length=200, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='patron_barcode',
field=models.CharField(blank=True, help_text='used by Millennium', max_length=50, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='patron_email',
field=models.CharField(blank=True, help_text='used by Millennium', max_length=100, null=True),
),
migrations.AlterField(
model_name='itemrequest',
name='patron_name',
field=models.CharField(blank=True, help_text='used by Millennium', max_length=100, null=True),
),
]
| 38.464789
| 113
| 0.606371
| 296
| 2,731
| 5.408784
| 0.216216
| 0.137414
| 0.171768
| 0.19925
| 0.837602
| 0.837602
| 0.837602
| 0.80762
| 0.80762
| 0.768894
| 0
| 0.025253
| 0.274991
| 2,731
| 70
| 114
| 39.014286
| 0.783333
| 0.024899
| 0
| 0.68254
| 1
| 0
| 0.187218
| 0.009023
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.031746
| 0
| 0.079365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c8c322fa4d8b44b73e49e25687b94b69a31f9440
| 7,204
|
py
|
Python
|
src/hypoxia/option.py
|
JoshKarpel/hypoxia
|
31ad7b46b09ca1970e14f9c81275651917a81072
|
[
"WTFPL"
] | 1
|
2018-03-07T19:44:21.000Z
|
2018-03-07T19:44:21.000Z
|
src/hypoxia/option.py
|
JoshKarpel/hypoxia
|
31ad7b46b09ca1970e14f9c81275651917a81072
|
[
"WTFPL"
] | null | null | null |
src/hypoxia/option.py
|
JoshKarpel/hypoxia
|
31ad7b46b09ca1970e14f9c81275651917a81072
|
[
"WTFPL"
] | null | null | null |
import abc
from typing import Callable, TypeVar, Generic
from .exceptions import Panic
from . import result
T = TypeVar('T')
U = TypeVar('U')
class Option(abc.ABC, Generic[T]):
def __init__(self, value: T):
self._val = value
def __hash__(self):
return hash(self._val)
def __repr__(self):
return f'{self.__class__.__name__}({repr(self._val)})'
def __eq__(self, other):
return self.__class__ == other.__class__ and self._val == other._val
@abc.abstractmethod
def is_some(self) -> bool:
"""Returns ``True`` if the ``Option`` is a ``Some``, and ``False`` if it is a ``Nun``."""
raise NotImplementedError
@abc.abstractmethod
def is_nun(self) -> bool:
"""Returns ``True`` if the ``Option`` is a ``Nun``, and ``False`` if it is a ``Some``."""
raise NotImplementedError
@abc.abstractmethod
def unwrap(self) -> T:
"""If the ``Option`` is a ``Some``, return its value. If it is a ``Nun``, this raises a :class:`Panic`."""
raise NotImplementedError
@abc.abstractmethod
def unwrap_or(self, default: T) -> T:
"""If the ``Option`` is a ``Some``, return its value. If it is a ``Nun``, return ``default`` instead."""
raise NotImplementedError
@abc.abstractmethod
def unwrap_or_else(self, func: Callable[[], T]) -> T:
"""If the ``Option`` is a ``Some``, return its value. If it is a ``Nun``, return ``func()``."""
raise NotImplementedError
@abc.abstractmethod
def map(self, func: Callable[[T], U]) -> 'Option[U]':
"""If the ``Option`` is a ``Some``, return ``Some(func(value))``. If it is a ``Nun``, return ``Nun()``."""
raise NotImplementedError
@abc.abstractmethod
def map_or(self, func: Callable[[T], U], default: U) -> U:
"""If the ``Option`` is a ``Some``, return ``func(value)``. If it is a ``Nun``, return ``default``."""
raise NotImplementedError
@abc.abstractmethod
def map_or_else(self, func: Callable[[T], U], default_func: Callable[[], U]) -> U:
"""If the ``Option`` is a ``Some``, return ``func(value)``. If it is a ``Nun``, return ``default_func()``."""
raise NotImplementedError
@abc.abstractmethod
def ok_or(self, err: Exception) -> 'result.Result[T]':
"""If the ``Option`` is a ``Some``, return ``Ok(value)``. If it is a ``Nun``, return ``Err(err)``."""
raise NotImplementedError
@abc.abstractmethod
def ok_or_else(self, err_func: Callable[[], Exception]) -> 'result.Result[T]':
"""If the ``Option`` is a ``Some``, return ``Ok(value)``. If it is a ``Nun``, return ``Err(err())``."""
raise NotImplementedError
@abc.abstractmethod
def and_(self, other: 'Option[U]') -> 'Option[U]':
"""If either ``Option`` is ``Nun``, return ``Nun``. If both are ``Some``, return ``other``."""
raise NotImplementedError
@abc.abstractmethod
def and_then(self, func: Callable[[], U]) -> 'Option[U]':
"""If the ``Option`` is a ``Some``, return ``func()``. If it is a ``Nun``, return ``Nun()``."""
raise NotImplementedError
@abc.abstractmethod
def or_(self, other: 'Option[T]') -> 'Option[T]':
"""If the ``Option`` is a ``Some``, return it. If it is a ``Nun``, return ``other``."""
raise NotImplementedError
@abc.abstractmethod
def or_else(self, func: Callable[[], T]) -> 'Option[T]':
"""If the ``Option`` is a ``Some``, return it. If it is a ``Nun``, return ``func()``."""
raise NotImplementedError
@abc.abstractmethod
def get_or_insert(self, value: T) -> T:
"""If the ``Option`` is a ``Some``, return its value. If it is a ``Nun``, convert this ``Option`` into ``Some(value)`` and return the value."""
raise NotImplementedError
@abc.abstractmethod
def get_or_insert_with(self, func: Callable[[], T]) -> T:
"""If the ``Option`` is a ``Some``, return its value. If it is a ``Nun``, convert this ``Option`` into ``Some(func())`` and return the value."""
raise NotImplementedError
class Some(Option):
def __iter__(self):
yield self._val
def is_some(self) -> bool:
return True
def is_nun(self) -> bool:
return False
def unwrap(self) -> T:
return self._val
def unwrap_or(self, default: T) -> T:
return self._val
def unwrap_or_else(self, func: Callable[[], T]) -> T:
return self._val
def map(self, func: Callable[[T], U]) -> Option[U]:
return Some(func(self._val))
def map_or(self, func: Callable[[T], U], default: U) -> U:
return func(self._val)
def map_or_else(self, func: Callable[[T], U], default_func: Callable[[], U]) -> U:
return func(self._val)
def ok_or(self, err: Exception) -> 'result.Result[T]':
return result.Ok(self._val)
def ok_or_else(self, err_func: Callable[[], Exception]) -> 'result.Result[T]':
return result.Ok(self._val)
def and_(self, other: 'Option[U]') -> 'Option[U]':
if other.is_some():
return other
return Nun()
def and_then(self, func: Callable[[T], U]) -> 'Option[U]':
return Some(func(self._val))
def or_(self, other: 'Option[T]') -> 'Option[T]':
return self
def or_else(self, func: Callable[[], T]) -> 'Option[T]':
return self
def get_or_insert(self, value: T) -> T:
return self._val
def get_or_insert_with(self, func: Callable[[], T]) -> T:
return self._val
class Nun(Option):
def __init__(self):
super().__init__(None)
def __repr__(self):
return 'Nun'
def __iter__(self):
"""This produces an empty iterator."""
for _ in []:
yield
def is_some(self) -> bool:
return False
def is_nun(self) -> bool:
return True
def unwrap(self) -> T:
raise Panic('unwrapped Nun')
def unwrap_or(self, default: T) -> T:
return default
def unwrap_or_else(self, func: Callable[[], T]) -> T:
return func()
def map(self, func: Callable[[T], U]) -> Option[U]:
return self
def map_or(self, func: Callable[[T], U], default: U) -> U:
return default
def map_or_else(self, func: Callable[[T], U], default_func: Callable[[], U]) -> U:
return default_func()
def ok_or(self, err: Exception) -> 'result.Result[T]':
return result.Err(err)
def ok_or_else(self, err_func: Callable[[], Exception]) -> 'result.Result[T]':
return result.Err(err_func())
def and_(self, other: 'Option[U]') -> 'Option[U]':
return Nun()
def and_then(self, func: Callable[[], U]) -> 'Option[U]':
return Nun()
def or_(self, other: 'Option[T]') -> 'Option[T]':
return other
def or_else(self, func: Callable[[], T]) -> 'Option[T]':
return Some(func())
def get_or_insert(self, value: T) -> T:
self.__class__ = Some
self._val = value
return self._val
def get_or_insert_with(self, func: Callable[[], T]) -> T:
self.__class__ = Some
self._val = func()
return self._val
| 32.017778
| 152
| 0.577596
| 963
| 7,204
| 4.163032
| 0.076843
| 0.022449
| 0.083811
| 0.080569
| 0.831379
| 0.821901
| 0.747069
| 0.655525
| 0.58244
| 0.501372
| 0
| 0
| 0.246391
| 7,204
| 224
| 153
| 32.160714
| 0.738442
| 0.223071
| 0
| 0.784722
| 0
| 0
| 0.059699
| 0.007984
| 0
| 0
| 0
| 0
| 0
| 1
| 0.388889
| false
| 0
| 0.027778
| 0.222222
| 0.6875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
cdd24b7b2686df80cda3bb1e206dc848008fe024
| 62,680
|
py
|
Python
|
tests/test_arguments.py
|
synesissoftware/CLASP.Python
|
601f15baa8f79fc95f3e92175dfbe63fe85dff18
|
[
"BSD-3-Clause"
] | 1
|
2019-08-21T23:30:41.000Z
|
2019-08-21T23:30:41.000Z
|
tests/test_arguments.py
|
synesissoftware/CLASP.Python
|
601f15baa8f79fc95f3e92175dfbe63fe85dff18
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_arguments.py
|
synesissoftware/CLASP.Python
|
601f15baa8f79fc95f3e92175dfbe63fe85dff18
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from pyclasp import Arguments
from pyclasp import specification, option
from pyclasp import Flag
from pyclasp import Option
import pyclasp as clasp
import unittest
class Arguments_tester_1(unittest.TestCase):
def test_empty_args_via_clasp_parse(self):
argv = ()
with self.assertRaises(IndexError):
clasp.parse(argv)
def test_no_args_via_clasp_parse(self):
argv = ( 'myprog', )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
self.assertFalse(args.values)
def test_no_args_via_Arguments_constructor(self):
argv = ( 'myprog', )
args = Arguments(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
self.assertFalse(args.values)
def test_one_value(self):
argv = ( 'myprog', 'value1', )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
def test_two_values(self):
argv = ( 'myprog', 'value1', 'value2' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
self.assertTrue(args.values)
def test_ten_values(self):
argv = [ 'myprog', ] + [ "value%d" % i for i in range(0, 10) ]
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
self.assertTrue(args.values)
self.assertEqual(10, len(args.values))
def test_one_flag(self):
argv = ( 'myprog', '-f1', )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
def test_two_flags(self):
argv = ( 'myprog', '-f1', '--flag2' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertTrue(args.flags)
self.assertEqual(2, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
flag = args.flags[1]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 2)
self.assertEqual(flag.given_name , '--flag2')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 2)
self.assertEqual(flag.given_label , 'flag2')
self.assertEqual(flag.name , '--flag2')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--flag2')
self.assertEqual(flag , '--flag2')
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
self.assertFalse(args.values)
def test_three_flags(self):
argv = ( 'myprog', '-f1', '--flag2', '---x' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertTrue(args.flags)
self.assertEqual(3, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
flag = args.flags[1]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 2)
self.assertEqual(flag.given_name , '--flag2')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 2)
self.assertEqual(flag.given_label , 'flag2')
self.assertEqual(flag.name , '--flag2')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--flag2')
self.assertEqual(flag , '--flag2')
flag = args.flags[2]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 3)
self.assertEqual(flag.given_name , '---x')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 3)
self.assertEqual(flag.given_label , 'x')
self.assertEqual(flag.name , '---x')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '---x')
self.assertEqual(flag , '---x')
self.assertIsInstance(args.options, ( tuple ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple ))
self.assertFalse(args.values)
def test_one_option(self):
argv = ( 'myprog', '-o1=v1', )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o1')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o1')
self.assertEqual(option.name , '-o1')
self.assertEqual(option.value , 'v1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '-o1=v1')
self.assertEqual(option , '-o1=v1')
self.assertIsInstance(args.values, ( tuple ))
self.assertFalse(args.values)
def test_two_options(self):
argv = ( 'myprog', '-o1=v1', '--option2=value2' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertTrue(args.options)
self.assertEqual(2, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o1')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o1')
self.assertEqual(option.name , '-o1')
self.assertEqual(option.value , 'v1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '-o1=v1')
self.assertEqual(option , '-o1=v1')
option = args.options[1]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 2)
self.assertEqual(option.given_name , '--option2')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 2)
self.assertEqual(option.given_label , 'option2')
self.assertEqual(option.name , '--option2')
self.assertEqual(option.value , 'value2')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option2=value2')
self.assertEqual(option , '--option2=value2')
self.assertIsInstance(args.values, ( tuple ))
self.assertFalse(args.values)
def test_three_options(self):
argv = ( 'myprog', '-o1=v1', '--option2=value2', '---the-third-option=the third value' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple ))
self.assertFalse(args.flags)
self.assertIsInstance(args.options, ( tuple ))
self.assertTrue(args.options)
self.assertEqual(3, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o1')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o1')
self.assertEqual(option.name , '-o1')
self.assertEqual(option.value , 'v1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '-o1=v1')
self.assertEqual(option , '-o1=v1')
option = args.options[1]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 2)
self.assertEqual(option.given_name , '--option2')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 2)
self.assertEqual(option.given_label , 'option2')
self.assertEqual(option.name , '--option2')
self.assertEqual(option.value , 'value2')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option2=value2')
self.assertEqual(option , '--option2=value2')
option = args.options[2]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 3)
self.assertEqual(option.given_name , '---the-third-option')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 3)
self.assertEqual(option.given_label , 'the-third-option')
self.assertEqual(option.name , '---the-third-option')
self.assertEqual(option.value , 'the third value')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '---the-third-option=the third value')
self.assertEqual(option , '---the-third-option=the third value')
self.assertIsInstance(args.values, ( tuple ))
self.assertFalse(args.values)
def test_one_flag_and_one_option_and_one_value(self):
argv = ( 'myprog', '-f1', 'value1', '--first-option=val1' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 3)
self.assertEqual(option.given_name , '--first-option')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 2)
self.assertEqual(option.given_label , 'first-option')
self.assertEqual(option.name , '--first-option')
self.assertEqual(option.value , 'val1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--first-option=val1')
self.assertEqual(option , '--first-option=val1')
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value1', args.values[0])
def test_double_hyphen_1(self):
argv = ( 'myprog', '-f1', 'value1', '--', '-f2' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(2, len(args.values))
self.assertEqual('value1', args.values[0])
self.assertEqual('-f2', args.values[1])
def test_double_hyphen_2(self):
argv = ( 'myprog', '-f1', 'value1', '--', '-f2', '--', '--option1=v1' )
args = clasp.parse(argv)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(4, len(args.values))
self.assertEqual('value1', args.values[0])
self.assertEqual('-f2', args.values[1])
self.assertEqual('--', args.values[2])
self.assertEqual('--option1=v1', args.values[3])
def test_one_flag_and_one_option_and_one_value_with_empty_specifications(self):
specifications_list = ( tuple(), list(), None )
for specifications in specifications_list:
argv = ( 'myprog', '-f1', 'value1', '--first-option=val1' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 3)
self.assertEqual(option.given_name , '--first-option')
self.assertIsNone(option.argument_specification)
self.assertEqual(option.given_hyphens , 2)
self.assertEqual(option.given_label , 'first-option')
self.assertEqual(option.name , '--first-option')
self.assertEqual(option.value , 'val1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--first-option=val1')
self.assertEqual(option , '--first-option=val1')
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value1', args.values[0])
def test_alias_of_flag_with_one_specification(self):
flag_verbose = clasp.flag('--verbose', alias = '-v', extras = { 'x-name': 'v-val' })
specifications = (
flag_verbose,
)
argv = ( 'myprog', '--verbose', '--succinct', 'value', '-v' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(2, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 4)
self.assertEqual(flag.given_name , '-v')
self.assertEqual(flag.argument_specification, flag_verbose)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'v')
self.assertEqual(flag.name , '--verbose')
self.assertEqual(flag.extras , { 'x-name': 'v-val' })
self.assertEqual(str(flag) , '--verbose')
self.assertEqual(flag , '--verbose')
flag = args.flags[1]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 2)
self.assertEqual(flag.given_name , '--succinct')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 2)
self.assertEqual(flag.given_label , 'succinct')
self.assertEqual(flag.name , '--succinct')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--succinct')
self.assertEqual(flag , '--succinct')
self.assertIsInstance(args.options, ( tuple, ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value', args.values[0])
def alias_of_flag_with_two_specifications(self):
flag_expand = clasp.flag('--expand', aliases = ( '-x', '--x', ), extras = { 'some-value': ( 'e', 'x', 't', 'r', 'a', 's', ) })
specifications = (
flag_expand,
)
argv = ( 'myprog', '-f1', 'value1', '-x', '--delete', '--x', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(4, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
flag = args.flags[1]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 3)
self.assertEqual(flag.given_name , '-x')
self.assertEqual(flag.argument_specification, flag_expand)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'x')
self.assertEqual(flag.name , '--expand')
self.assertTrue(flag.extras)
self.assertEqual(str(flag) , '--expand')
self.assertEqual(flag , '--expand')
flag = args.flags[2]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 4)
self.assertEqual(flag.given_name , '--delete')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 2)
self.assertEqual(flag.given_label , 'delete')
self.assertEqual(flag.name , '--delete')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--delete')
self.assertEqual(flag , '--delete')
flag = args.flags[3]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 5)
self.assertEqual(flag.given_name , '--x')
self.assertEqual(flag.argument_specification, specifications[x])
self.assertEqual(flag.given_hyphens , 2)
self.assertEqual(flag.given_label , 'x')
self.assertEqual(flag.name , '--expand')
self.assertTrue(flag.extras)
self.assertIsInstance(flag.extras, dict)
self.assertEqual(1, len(flag.extras))
self.assertEqual(( 'e', 'x', 't', 'r', 'a', 's', ), flag.extras['some-value'])
self.assertEqual(str(flag) , '--expand')
self.assertEqual(flag , '--expand')
self.assertIsInstance(args.options, ( tuple, ))
self.assertFalse(args.options)
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value1', args.values[0])
def test_alias_of_option_with_one_specification(self):
option_option = clasp.option('--option', alias = '-o')
specifications = (
option_option,
)
argv = ( 'myprog', '-f1', 'value1', '-o=value2', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 3)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification, option_option)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'value2')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=value2')
self.assertEqual(option , '--option=value2')
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value1', args.values[0])
def test_alias_of_option_with_separate_value(self):
option_option = clasp.option('--option', alias = '-o')
specifications = (
option_option,
)
argv = ( 'myprog', '-o', 'value-1', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertFalse(args.flags)
self.assertEqual(0, len(args.flags))
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification, option_option)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'value-1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=value-1')
self.assertEqual(option , '--option=value-1')
self.assertIsInstance(args.values, ( tuple, ))
self.assertFalse(args.values)
self.assertEqual(0, len(args.values))
def test_alias_of_option_that_has_default_with_separate_value(self):
option_option = clasp.option('--option', alias = '-o', default_value = 'def-val-1')
specifications = (
option_option,
)
argv = ( 'myprog', '-o', 'value-1', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertFalse(args.flags)
self.assertEqual(0, len(args.flags))
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification, option_option)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'value-1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=value-1')
self.assertEqual(option , '--option=value-1')
self.assertIsInstance(args.values, ( tuple, ))
self.assertFalse(args.values)
self.assertEqual(0, len(args.values))
def test_alias_of_option_that_has_default_with_separate_value_that_resembles_flag(self):
option_option = clasp.option('--option', alias = '-o', default_value = 'def-val-1')
specifications = (
option_option,
)
argv = ( 'myprog', '-o', '-o', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertFalse(args.flags)
self.assertEqual(0, len(args.flags))
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification, option_option)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , '-o')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=-o')
self.assertEqual(option , '--option=-o')
self.assertIsInstance(args.values, ( tuple, ))
self.assertFalse(args.values)
self.assertEqual(0, len(args.values))
def test_alias_of_option_that_has_default_with_missing_separate_value(self):
option_option = clasp.option('--option', alias = '-o', default_value = 'def-val-1')
specifications = (
option_option,
)
argv = ( 'myprog', '-o', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertFalse(args.flags)
self.assertEqual(0, len(args.flags))
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification, option_option)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'def-val-1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=def-val-1')
self.assertEqual(option , '--option=def-val-1')
self.assertIsInstance(args.values, ( tuple, ))
self.assertFalse(args.values)
self.assertEqual(0, len(args.values))
def test_alias_of_option_with_attached_empty(self):
specifications = (
clasp.option('--option', alias = '-o', default_value = 'def-val-1'),
)
argv = ( 'myprog', '-o=', 'value-2', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertFalse(args.flags)
self.assertEqual(0, len(args.flags))
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'def-val-1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=def-val-1')
self.assertEqual(option , '--option=def-val-1')
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value-2', args.values[0])
def test_flag_alias_of_option_with_value(self):
option_verbosity = clasp.option('--verbosity')
specifications = (
option_verbosity,
clasp.flag('--verbosity=high', alias = '-v'),
)
argv = ( 'myprog', '-v', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertFalse(args.flags)
self.assertEqual(0, len(args.flags))
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-v')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'v')
self.assertEqual(option.name , '--verbosity')
self.assertEqual(option.value , 'high')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--verbosity=high')
self.assertEqual(option , '--verbosity=high')
self.assertIsInstance(args.values, ( tuple, ))
self.assertFalse(args.values)
self.assertEqual(0, len(args.values))
def test_alias_of_option_with_value_allowing_multiple(self):
option_option = clasp.option('--option', alias = '-o', default_value = 'default-value', on_multiple='allow')
specifications = (
option_option,
)
argv = ( 'myprog', '-f1', 'value-1', '-o=', '-o=given-value-1', '--option=given-value-2', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(3, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 3)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'default-value')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=default-value')
self.assertEqual(option , '--option=default-value')
option = args.options[1]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 4)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'given-value-1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=given-value-1')
self.assertEqual(option , '--option=given-value-1')
option = args.options[2]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 5)
self.assertEqual(option.given_name , '--option')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 2)
self.assertEqual(option.given_label , 'option')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'given-value-2')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=given-value-2')
self.assertEqual(option , '--option=given-value-2')
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value-1', args.values[0])
def test_alias_of_option_with_value_ignoring_multiple(self):
option_option = clasp.option('--option', alias = '-o', default_value = 'default-value', on_multiple='ignore')
specifications = (
option_option,
)
argv = ( 'myprog', '-f1', 'value-1', '-o=', '-o=given-value-1', '--option=given-value-2', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(3, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 3)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'default-value')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=default-value')
self.assertEqual(option , '--option=default-value')
option = args.options[1]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 4)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'given-value-1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=given-value-1')
self.assertEqual(option , '--option=given-value-1')
option = args.options[2]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 5)
self.assertEqual(option.given_name , '--option')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 2)
self.assertEqual(option.given_label , 'option')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'given-value-2')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=given-value-2')
self.assertEqual(option , '--option=given-value-2')
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value-1', args.values[0])
def test_alias_of_option_with_value_replacing_multiple(self):
option_option = clasp.option('--option', alias = '-o', default_value = 'default-value', on_multiple='replace')
specifications = (
option_option,
)
argv = ( 'myprog', '-f1', 'value-1', '-o=', '-o=given-value-1', '--option=given-value-2', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(1, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-f1')
self.assertIsNone(flag.argument_specification)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'f1')
self.assertEqual(flag.name , '-f1')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '-f1')
self.assertEqual(flag , '-f1')
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(3, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 3)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'default-value')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=default-value')
self.assertEqual(option , '--option=default-value')
option = args.options[1]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 4)
self.assertEqual(option.given_name , '-o')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'o')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'given-value-1')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=given-value-1')
self.assertEqual(option , '--option=given-value-1')
option = args.options[2]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 5)
self.assertEqual(option.given_name , '--option')
self.assertEqual(option.argument_specification , specifications[0])
self.assertEqual(option.given_hyphens , 2)
self.assertEqual(option.given_label , 'option')
self.assertEqual(option.name , '--option')
self.assertEqual(option.value , 'given-value-2')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--option=given-value-2')
self.assertEqual(option , '--option=given-value-2')
self.assertIsInstance(args.values, ( tuple, ))
self.assertTrue(args.values)
self.assertEqual(1, len(args.values))
self.assertEqual('value-1', args.values[0])
def test_flags_combined(self):
flag_compile = clasp.flag('--compile', alias = '-c')
flag_debug = clasp.flag('--debug', alias = '-d')
flag_execute = clasp.flag('--execute', alias = '-e')
specifications = (
flag_compile,
flag_debug,
flag_execute,
)
self.assertEqual(flag_compile, specifications[0])
self.assertEqual(flag_debug, specifications[1])
self.assertEqual(flag_execute, specifications[2])
argv = ( 'myprog', '-ced', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(3, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-ced')
self.assertEqual(flag.argument_specification, flag_compile)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'ced')
self.assertEqual(flag.name , '--compile')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--compile')
self.assertEqual(flag , '--compile')
self.assertEqual(flag, flag_compile)
self.assertEqual(flag_compile, flag)
flag = args.flags[1]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-ced')
self.assertEqual(flag.argument_specification, flag_execute)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'ced')
self.assertEqual(flag.name , '--execute')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--execute')
self.assertEqual(flag , '--execute')
self.assertEqual(flag, flag_execute)
self.assertEqual(flag_execute, flag)
flag = args.flags[2]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-ced')
self.assertEqual(flag.argument_specification, flag_debug)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'ced')
self.assertEqual(flag.name , '--debug')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--debug')
self.assertEqual(flag , '--debug')
self.assertEqual(flag_debug, flag)
self.assertEqual(flag, flag_debug)
self.assertIsInstance(args.options, ( tuple, ))
self.assertFalse(args.options)
self.assertEqual(0, len(args.options))
self.assertIsInstance(args.values, ( tuple, ))
self.assertFalse(args.values)
self.assertEqual(0, len(args.values))
def test_flags_of_flags_and_options_combined(self):
flag_compile = clasp.flag('--compile', alias = '-c')
flag_execute = clasp.flag('--execute', alias = '-e')
option_mode = clasp.option('--mode', alias = '-m')
specifications = (
flag_compile,
clasp.flag('--mode=debug', alias = '-d'),
flag_execute,
option_mode,
)
argv = ( 'myprog', '-ced', )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(2, len(args.flags))
flag = args.flags[0]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-ced')
self.assertEqual(flag.argument_specification, flag_compile)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'ced')
self.assertEqual(flag.name , '--compile')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--compile')
self.assertEqual(flag , '--compile')
self.assertEqual(flag, flag_compile)
self.assertEqual(flag_compile, flag)
flag = args.flags[1]
self.assertIsInstance(flag, ( Flag, ))
self.assertEqual(flag.given_index , 1)
self.assertEqual(flag.given_name , '-ced')
self.assertEqual(flag.argument_specification, flag_execute)
self.assertEqual(flag.given_hyphens , 1)
self.assertEqual(flag.given_label , 'ced')
self.assertEqual(flag.name , '--execute')
self.assertEqual(flag.extras , {})
self.assertEqual(str(flag) , '--execute')
self.assertEqual(flag , '--execute')
self.assertEqual(flag, flag_execute)
self.assertEqual(flag_execute, flag)
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(1, len(args.options))
option = args.options[0]
self.assertIsInstance(option, ( Option, ))
self.assertEqual(option.given_index , 1)
self.assertEqual(option.given_name , '-ced')
self.assertEqual(option.argument_specification, option_mode)
self.assertEqual(option.given_hyphens , 1)
self.assertEqual(option.given_label , 'ced')
self.assertEqual(option.name , '--mode')
self.assertEqual(option.extras , {})
self.assertEqual(str(option) , '--mode=debug')
self.assertEqual(option , '--mode=debug')
self.assertEqual(option_mode, option)
self.assertEqual(option, option_mode)
self.assertIsInstance(args.values, ( tuple, ))
self.assertFalse(args.values)
self.assertEqual(0, len(args.values))
def test_first_unused_Flag_via_get_first_unused_flag(self):
flag_compile = clasp.flag('--compile', alias = '-c')
flag_debug = clasp.flag('--debug', alias = '-d')
specifications = (
flag_compile,
flag_debug,
)
argv = ( 'dir1/myprog', '-cd' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(2, len(args.flags))
# now check the 'unused', iteratively using and testing
self.assertIsNone(args.get_first_unused_option())
# before any use()d
fu = args.get_first_unused_flag()
self.assertIsNotNone(fu)
self.assertEqual(flag_compile, fu)
# after use() (1st time)
fu.use()
fu = args.get_first_unused_flag()
self.assertIsNotNone(fu)
self.assertEqual(flag_debug, fu)
# after use() (2nd time)
fu.use()
fu = args.get_first_unused_flag()
self.assertIsNone(fu)
def test_first_unused_Flag_via_get_first_unused_flag_or_option(self):
flag_compile = clasp.flag('--compile', alias = '-c')
flag_debug = clasp.flag('--debug', alias = '-d')
specifications = (
flag_compile,
flag_debug,
)
argv = ( 'dir1/myprog', '-cd' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(2, len(args.flags))
# now check the 'unused', iteratively using and testing
self.assertIsNone(args.get_first_unused_option())
# before any use()d
fu = args.get_first_unused_flag_or_option()
self.assertIsNotNone(fu)
self.assertEqual(flag_compile, fu)
# after use() (1st time)
fu.use()
fu = args.get_first_unused_flag_or_option()
self.assertIsNotNone(fu)
self.assertEqual(flag_debug, fu)
# after use() (2nd time)
fu.use()
fu = args.get_first_unused_flag_or_option()
self.assertIsNone(fu)
def test_first_unused_Flag_via_get_first_unused(self):
flag_compile = clasp.flag('--compile', alias = '-c')
flag_debug = clasp.flag('--debug', alias = '-d')
specifications = (
flag_compile,
flag_debug,
)
argv = ( 'dir1/myprog', '-cd' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.flags, ( tuple, ))
self.assertTrue(args.flags)
self.assertEqual(2, len(args.flags))
# now check the 'unused', iteratively using and testing
self.assertIsNone(args.get_first_unused_option())
# before any use()d
fu = args.get_first_unused()
self.assertIsNotNone(fu)
self.assertEqual(flag_compile, fu)
# after use() (1st time)
fu.use()
fu = args.get_first_unused()
self.assertIsNotNone(fu)
self.assertEqual(flag_debug, fu)
# after use() (2nd time)
fu.use()
fu = args.get_first_unused()
self.assertIsNone(fu)
def test_first_unused_Option_via_get_first_unused_option(self):
option_mode = clasp.option('--mode', alias = '-m')
option_option = clasp.option('--option', alias = '-o', default_value = 'default-value', on_multiple='replace')
specifications = (
option_mode,
option_option,
)
argv = ( 'dir1/myprog', '--mode=verbose', '--option=ignore' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(2, len(args.options))
# now check the 'unused', iteratively using and testing
self.assertIsNone(args.get_first_unused_flag())
# before any use()d
fu = args.get_first_unused_option()
self.assertIsNotNone(fu)
self.assertEqual(option_mode, fu)
# after use() (1st time)
fu.use()
fu = args.get_first_unused_option()
self.assertIsNotNone(fu)
self.assertEqual(option_option, fu)
# after use() (2nd time)
fu.use()
fu = args.get_first_unused_option()
self.assertIsNone(fu)
def test_first_unused_Option_via_get_first_unused_flag_or_option(self):
option_mode = clasp.option('--mode', alias = '-m')
option_option = clasp.option('--option', alias = '-o', default_value = 'default-value', on_multiple='replace')
specifications = (
option_mode,
option_option,
)
argv = ( 'dir1/myprog', '--mode=verbose', '--option=ignore' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(2, len(args.options))
# now check the 'unused', iteratively using and testing
self.assertIsNone(args.get_first_unused_flag())
# before any use()d
fu = args.get_first_unused_flag_or_option()
self.assertIsNotNone(fu)
self.assertEqual(option_mode, fu)
# after use() (1st time)
fu.use()
fu = args.get_first_unused_flag_or_option()
self.assertIsNotNone(fu)
self.assertEqual(option_option, fu)
# after use() (2nd time)
fu.use()
fu = args.get_first_unused_flag_or_option()
self.assertIsNone(fu)
def test_first_unused_Option_via_get_first_unused(self):
option_mode = clasp.option('--mode', alias = '-m')
option_option = clasp.option('--option', alias = '-o', default_value = 'default-value', on_multiple='replace')
specifications = (
option_mode,
option_option,
)
argv = ( 'dir1/myprog', '--mode=verbose', '--option=ignore' )
args = clasp.parse(argv, specifications)
self.assertEqual('myprog', args.program_name)
self.assertIsInstance(args.options, ( tuple, ))
self.assertTrue(args.options)
self.assertEqual(2, len(args.options))
# now check the 'unused', iteratively using and testing
self.assertIsNone(args.get_first_unused_flag())
# before any use()d
fu = args.get_first_unused()
self.assertIsNotNone(fu)
self.assertEqual(option_mode, fu)
# after use() (1st time)
fu.use()
fu = args.get_first_unused()
self.assertIsNotNone(fu)
self.assertEqual(option_option, fu)
# after use() (2nd time)
fu.use()
fu = args.get_first_unused()
self.assertIsNone(fu)
if '__main__' == __name__:
unittest.main()
| 37.066824
| 136
| 0.56723
| 6,206
| 62,680
| 5.617467
| 0.02417
| 0.253858
| 0.134932
| 0.068843
| 0.944926
| 0.933165
| 0.920171
| 0.915352
| 0.902788
| 0.894814
| 0
| 0.011224
| 0.306366
| 62,680
| 1,690
| 137
| 37.088757
| 0.790625
| 0.011615
| 0
| 0.851443
| 0
| 0
| 0.061155
| 0.008575
| 0
| 0
| 0
| 0
| 0.745331
| 1
| 0.03056
| false
| 0
| 0.005093
| 0
| 0.036503
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a814113243dd7e14350d08a8430cd81ce14afbc9
| 213
|
py
|
Python
|
src/apps/episodes/admin.py
|
ckcollab/foolsnetwork
|
8043e5486d7bb95eea18d673e04787a43ac728ea
|
[
"MIT"
] | null | null | null |
src/apps/episodes/admin.py
|
ckcollab/foolsnetwork
|
8043e5486d7bb95eea18d673e04787a43ac728ea
|
[
"MIT"
] | null | null | null |
src/apps/episodes/admin.py
|
ckcollab/foolsnetwork
|
8043e5486d7bb95eea18d673e04787a43ac728ea
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from . import models
admin.site.register(models.Character)
admin.site.register(models.CharacterAppearance)
admin.site.register(models.Episode)
admin.site.register(models.Notes)
| 21.3
| 47
| 0.826291
| 28
| 213
| 6.285714
| 0.428571
| 0.204545
| 0.386364
| 0.522727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070423
| 213
| 9
| 48
| 23.666667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
a82b93b7d0002f45668d38793a153bab0d51f352
| 11,444
|
py
|
Python
|
leetcode/1488. Avoid Flood in The City/main.py
|
huangshiyu13/AlgProblems
|
70e687e448ab3761d1f8d338ba10bed81b03d6e3
|
[
"MIT"
] | null | null | null |
leetcode/1488. Avoid Flood in The City/main.py
|
huangshiyu13/AlgProblems
|
70e687e448ab3761d1f8d338ba10bed81b03d6e3
|
[
"MIT"
] | null | null | null |
leetcode/1488. Avoid Flood in The City/main.py
|
huangshiyu13/AlgProblems
|
70e687e448ab3761d1f8d338ba10bed81b03d6e3
|
[
"MIT"
] | 1
|
2021-05-28T03:13:01.000Z
|
2021-05-28T03:13:01.000Z
|
# from collections import defaultdict,OrderedDict,Callable
class DrySuite():
def __init__(self, linked_lakes=None, dry_days=None, next_suite=None):
self.linked_lakes = linked_lakes
self.next_suit = next_suite
self.dry_days = dry_days
class RainDay():
def __init__(self, rain_lake=None, pre_day=None, next_day=None, drySuit=None):
self.rain_lake = rain_lake
self.pre_day = pre_day
self.next_day = next_day
self.drySuit = drySuit
class ChainManager():
def __init__(self):
pass
def new_dry_day(self, dry_day):
pass
def new_rain_day(self, rain_lake):
pass
class Solution:
def avoidFlood(self, rains):
chainManager = ChainManager()
ans = [-1 if r > 0 else 1 for r in rains]
for i in range(len(rains)):
lake = rains[i]
if lake == 0:
chainManager.new_dry_day(i)
else:
chainManager.new_rain_day(lake)
return ans
def check_ans(rains, ans):
a = []
for i in range(len(ans)):
if rains[i] > 0:
assert ans[i] == -1, 'i={},rain={},ans={}'.format(i, rains[i], ans[i])
if rains[i] in a:
print('wrong for i={},rain={}'.format(i, rains[i]))
for ii, r in enumerate(rains):
if r == rains[i]:
print(ii, r)
for ii, l in enumerate(ans):
if l == rains[i]:
print(ii)
a.append(rains[i])
if rains[i] == 0:
if ans[i] in a:
a.remove(ans[i])
if __name__ == '__main__':
import time
# rains = [1,2,0,0,2,1]
rains = [1, 2, 0, 0, 2, 1]
rains = [1, 2, 0, 1, 2]
# 300
# 49546
# 519
# 49546
# 229
rains = [98284, 57875, 0, 0, 94301, 94503, 16548, 0, 0, 37144, 0, 0, 0, 63939, 0, 0, 0, 0, 57020, 47710, 3285,
71226, 0, 24745, 0, 0, 70243, 0, 51703, 80321, 95971, 22206, 0, 43959, 84602, 77192, 0, 0, 0, 0, 0, 6407,
6477, 99867, 0, 24520, 0, 0, 0, 0, 9799, 43282, 52055, 96659, 51254, 40585, 79473, 0, 0, 0, 0, 9481, 0, 0,
0, 35881, 54126, 8792, 0, 0, 0, 22570, 0, 0, 879, 2319, 0, 4889, 46458, 0, 0, 0, 36638, 69875, 57212,
57875, 0, 0, 96659, 75448, 51766, 6379, 57212, 99867, 86167, 0, 93231, 52568, 16312, 0, 0, 19402, 0, 0,
8602, 0, 0, 0, 3285, 39361, 36638, 0, 0, 22206, 0, 38549, 94503, 14659, 0, 16548, 0, 0, 54126, 11157,
70915, 0, 0, 81337, 19893, 54920, 51766, 51244, 17717, 69787, 46075, 0, 42139, 0, 0, 0, 4428, 0, 0, 0, 0,
0, 9292, 80984, 17717, 54920, 0, 0, 18568, 0, 19946, 0, 69683, 0, 0, 0, 13735, 79530, 42193, 0, 1149,
78534, 0, 0, 0, 55452, 14864, 24745, 26551, 0, 0, 24233, 0, 79712, 0, 62236, 0, 0, 47800, 0, 6695, 40585,
0, 52402, 879, 68267, 0, 96631, 64057, 84363, 0, 0, 0, 63335, 96878, 0, 47800, 0, 0, 61952, 0, 0, 24297, 0,
0, 0, 14584, 0, 42139, 65252, 64136, 11157, 6695, 0, 14877, 0, 0, 0, 44942, 73999, 0, 0, 45572, 0, 86167,
0, 0, 0, 0, 42193, 0, 84363, 0, 63939, 0, 76503, 0, 51198, 0, 9481, 0, 6407, 0, 64277, 0, 0, 0, 1149, 3072,
0, 24233, 783, 0, 0, 9036, 39361, 22947, 0, 51703, 0, 0, 52055, 97859, 25989, 0, 0, 0, 0, 77192, 0, 0,
12234, 57020, 79530, 0, 0, 19946, 0, 0, 0, 0, 61952, 36135, 0, 0, 34886, 0, 97617, 66393, 0, 0, 80321, 0,
0, 75482, 97859, 49546, 22947, 0, 73999, 0, 0, 68267, 0, 18529, 65252, 2319, 18568, 0, 0, 0, 69939, 508, 0,
0, 0, 65617, 24520, 82199, 93231, 64015, 0, 39813, 0, 0, 0, 16814, 74810, 0, 0, 55452, 0, 43282, 39813, 0,
0, 80942, 0, 70915, 4428, 0, 43683, 0, 82199, 90187, 0, 13584, 36135, 0, 74810, 961, 0, 44942, 0, 0, 32578,
90187, 0, 69683, 0, 38549, 0, 0, 13735, 33424, 0, 59757, 64277, 6477, 0, 0, 94301, 52568, 0, 0, 0, 0, 0, 0,
0, 63505, 0, 0, 0, 51254, 0, 0, 46458, 64015, 0, 0, 66393, 58429, 0, 64136, 69875, 62236, 0, 0, 0, 0, 4770,
0, 89776, 0, 961, 34886, 62059, 0, 65617, 0, 0, 81337, 63335, 0, 0, 0, 43683, 0, 45572, 0, 0, 97617, 0,
9799, 32578, 0, 0, 0, 22570, 95971, 0, 64057, 4770, 0, 46075, 0, 0, 0, 0, 0, 96631, 0, 0, 26551, 0, 8602,
18529, 0, 0, 0, 0, 0, 60713, 0, 70243, 0, 59757, 75482, 0, 0, 78534, 0, 0, 51198, 16814, 0, 0, 58429, 0, 0,
9036, 96878, 0, 0, 71226, 51244, 76503, 0, 0, 64319, 0, 24297, 14864, 0, 25989, 12234, 62059, 0, 84602, 0,
0, 19893, 0, 783, 75448, 69939, 67299, 0, 0, 0, 75087, 0, 0, 37144, 33424, 0, 0, 67299, 0, 0, 20507, 0, 0,
69787, 49546, 14877, 9292, 0, 0, 14659, 89776, 0, 0, 0, 0, 75087, 35881, 3072, 19402, 14584, 0, 0, 13584,
508, 0, 80942, 0, 6379, 0, 0, 60713, 0, 0, 4889, 4528, 0, 0, 0, 0, 0, 0, 8792, 0, 79712, 0, 0, 0, 16312, 0,
47710, 64319, 98284, 0, 4528, 80984, 43959, 79473, 63505, 52402, 20507, 0]
rains = [1, 2, 0, 2, 3, 0, 1]
rains = [0, 11475, 23148, 0, 91836, 0, 0, 0, 0, 18987, 0, 3057, 0, 0, 0, 69217, 0, 0, 65289, 0, 0, 0, 35467, 33617,
0, 0, 0,
0, 55602, 67935, 0, 0, 2530, 84750, 0, 0, 4411, 0, 0, 81775, 0, 46174, 33617, 0, 60322, 60801, 56836,
72787, 4022,
91465, 21256, 0, 0, 0, 0, 0, 2530, 0, 14817, 57045, 0, 0, 0, 2583, 62414, 4452, 28481, 54082, 36928, 25662,
14817,
95392, 22974, 1040, 0, 93616, 0, 0, 59731, 0, 61094, 0, 65368, 82028, 22053, 54082, 0, 0, 4452, 81775,
98696, 0, 0,
5717, 91465, 0, 0, 20971, 0, 0, 0, 0, 0, 0, 0, 8644, 82028, 55602, 0, 77965, 0, 59578, 0, 0, 0, 42529, 0,
0, 0, 0,
0, 36928, 0, 20971, 25671, 0, 0, 0, 59289, 0, 0, 0, 0, 0, 0, 59289, 72266, 0, 0, 0, 92138, 77364, 59578,
46174, 0,
2583, 60322, 0, 0, 0, 0, 0, 0, 72787, 4022, 0, 95082, 0, 0, 0, 0, 22974, 22053, 60801, 0, 67634, 27785, 0,
91836,
95392, 0, 77364, 28481, 4411, 0, 91988, 0, 0, 0, 27785, 69763, 0, 77965, 7509, 67935, 0, 62414, 18987,
84750, 0, 0,
9118, 0, 9118, 64611, 0, 0, 59731, 0, 0, 69217, 0, 65368, 0, 0, 90771, 0, 0, 56836, 8644, 0, 25662, 1040,
7509,
90771, 0, 0, 5717, 0, 0, 0, 93616, 0, 0, 92138, 91988, 0, 0, 61094, 57045, 0, 0, 0, 95082, 0, 23148, 0,
98696,
25671, 11475, 0, 35467, 21256, 65289, 68210, 69763, 0, 0, 72266, 3057, 67634, 64611, 42529, 68210]
rains = [98284, 57875, 0, 0, 94301, 94503, 16548, 0, 0, 37144, 0, 0, 0, 63939, 0, 0, 0, 0, 57020, 47710, 3285,
71226, 0, 24745, 0, 0, 70243, 0, 51703, 80321, 95971, 22206, 0, 43959, 84602, 77192, 0, 0, 0, 0, 0, 6407,
6477, 99867, 0, 24520, 0, 0, 0, 0, 9799, 43282, 52055, 96659, 51254, 40585, 79473, 0, 0, 0, 0, 9481, 0, 0,
0, 35881, 54126, 8792, 0, 0, 0, 22570, 0, 0, 879, 2319, 0, 4889, 46458, 0, 0, 0, 36638, 69875, 57212,
57875, 0, 0, 96659, 75448, 51766, 6379, 57212, 99867, 86167, 0, 93231, 52568, 16312, 0, 0, 19402, 0, 0,
8602, 0, 0, 0, 3285, 39361, 36638, 0, 0, 22206, 0, 38549, 94503, 14659, 0, 16548, 0, 0, 54126, 11157,
70915, 0, 0, 81337, 19893, 54920, 51766, 51244, 17717, 69787, 46075, 0, 42139, 0, 0, 0, 4428, 0, 0, 0, 0,
0, 9292, 80984, 17717, 54920, 0, 0, 18568, 0, 19946, 0, 69683, 0, 0, 0, 13735, 79530, 42193, 0, 1149,
78534, 0, 0, 0, 55452, 14864, 24745, 26551, 0, 0, 24233, 0, 79712, 0, 62236, 0, 0, 47800, 0, 6695, 40585,
0, 52402, 879, 68267, 0, 96631, 64057, 84363, 0, 0, 0, 63335, 96878, 0, 47800, 0, 0, 61952, 0, 0, 24297, 0,
0, 0, 14584, 0, 42139, 65252, 64136, 11157, 6695, 0, 14877, 0, 0, 0, 44942, 73999, 0, 0, 45572, 0, 86167,
0, 0, 0, 0, 42193, 0, 84363, 0, 63939, 0, 76503, 0, 51198, 0, 9481, 0, 6407, 0, 64277, 0, 0, 0, 1149, 3072,
0, 24233, 783, 0, 0, 9036, 39361, 22947, 0, 51703, 0, 0, 52055, 97859, 25989, 0, 0, 0, 0, 77192, 0, 0,
12234, 57020, 79530, 0, 0, 19946, 0, 0, 0, 0, 61952, 36135, 0, 0, 34886, 0, 97617, 66393, 0, 0, 80321, 0,
0, 75482, 97859, 49546, 22947, 0, 73999, 0, 0, 68267, 0, 18529, 65252, 2319, 18568, 0, 0, 0, 69939, 508, 0,
0, 0, 65617, 24520, 82199, 93231, 64015, 0, 39813, 0, 0, 0, 16814, 74810, 0, 0, 55452, 0, 43282, 39813, 0,
0, 80942, 0, 70915, 4428, 0, 43683, 0, 82199, 90187, 0, 13584, 36135, 0, 74810, 961, 0, 44942, 0, 0, 32578,
90187, 0, 69683, 0, 38549, 0, 0, 13735, 33424, 0, 59757, 64277, 6477, 0, 0, 94301, 52568, 0, 0, 0, 0, 0, 0,
0, 63505, 0, 0, 0, 51254, 0, 0, 46458, 64015, 0, 0, 66393, 58429, 0, 64136, 69875, 62236, 0, 0, 0, 0, 4770,
0, 89776, 0, 961, 34886, 62059, 0, 65617, 0, 0, 81337, 63335, 0, 0, 0, 43683, 0, 45572, 0, 0, 97617, 0,
9799, 32578, 0, 0, 0, 22570, 95971, 0, 64057, 4770, 0, 46075, 0, 0, 0, 0, 0, 96631, 0, 0, 26551, 0, 8602,
18529, 0, 0, 0, 0, 0, 60713, 0, 70243, 0, 59757, 75482, 0, 0, 78534, 0, 0, 51198, 16814, 0, 0, 58429, 0, 0,
9036, 96878, 0, 0, 71226, 51244, 76503, 0, 0, 64319, 0, 24297, 14864, 0, 25989, 12234, 62059, 0, 84602, 0,
0, 19893, 0, 783, 75448, 69939, 67299, 0, 0, 0, 75087, 0, 0, 37144, 33424, 0, 0, 67299, 0, 0, 20507, 0, 0,
69787, 49546, 14877, 9292, 0, 0, 14659, 89776, 0, 0, 0, 0, 75087, 35881, 3072, 19402, 14584, 0, 0, 13584,
508, 0, 80942, 0, 6379, 0, 0, 60713, 0, 0, 4889, 4528, 0, 0, 0, 0, 0, 0, 8792, 0, 79712, 0, 0, 0, 16312, 0,
47710, 64319, 98284, 0, 4528, 80984, 43959, 79473, 63505, 52402, 20507, 0]
rains = list(range(1, 25001)) + [0] * 50000 + list(range(1, 25001))
rains = [0, 11475, 23148, 0, 91836, 0, 0, 0, 0, 18987, 0, 3057, 0, 0, 0, 69217, 0, 0, 65289, 0, 0, 0, 35467, 33617,
0, 0, 0, 0, 55602, 67935, 0, 0, 2530, 84750, 0, 0, 4411, 0, 0, 81775, 0, 46174, 33617, 0, 60322, 60801,
56836, 72787, 4022, 91465, 21256, 0, 0, 0, 0, 0, 2530, 0, 14817, 57045, 0, 0, 0, 2583, 62414, 4452, 28481,
54082, 36928, 25662, 14817, 95392, 22974, 1040, 0, 93616, 0, 0, 59731, 0, 61094, 0, 65368, 82028, 22053,
54082, 0, 0, 4452, 81775, 98696, 0, 0, 5717, 91465, 0, 0, 20971, 0, 0, 0, 0, 0, 0, 0, 8644, 82028, 55602,
0, 77965, 0, 59578, 0, 0, 0, 42529, 0, 0, 0, 0, 0, 36928, 0, 20971, 25671, 0, 0, 0, 59289, 0, 0, 0, 0, 0,
0, 59289, 72266, 0, 0, 0, 92138, 77364, 59578, 46174, 0, 2583, 60322, 0, 0, 0, 0, 0, 0, 72787, 4022, 0,
95082, 0, 0, 0, 0, 22974, 22053, 60801, 0, 67634, 27785, 0, 91836, 95392, 0, 77364, 28481, 4411, 0, 91988,
0, 0, 0, 27785, 69763, 0, 77965, 7509, 67935, 0, 62414, 18987, 84750, 0, 0, 9118, 0, 9118, 64611, 0, 0,
59731, 0, 0, 69217, 0, 65368, 0, 0, 90771, 0, 0, 56836, 8644, 0, 25662, 1040, 7509, 90771, 0, 0, 5717, 0,
0, 0, 93616, 0, 0, 92138, 91988, 0, 0, 61094, 57045, 0, 0, 0, 95082, 0, 23148, 0, 98696, 25671, 11475, 0,
35467, 21256, 65289, 68210, 69763, 0, 0, 72266, 3057, 67634, 64611, 42529, 68210]
solution = Solution()
t = time.time()
ans = solution.avoidFlood(rains)
print(time.time() - t)
# res = "\n".join("{} {}".format(x, y) for x, y in zip(rains, ans))
print(ans)
check_ans(rains, ans)
| 66.923977
| 120
| 0.521409
| 1,965
| 11,444
| 3.011196
| 0.141985
| 0.141288
| 0.092276
| 0.054081
| 0.83015
| 0.825418
| 0.825418
| 0.825418
| 0.825418
| 0.825418
| 0
| 0.61682
| 0.306973
| 11,444
| 170
| 121
| 67.317647
| 0.12924
| 0.01468
| 0
| 0.42069
| 0
| 0
| 0.004349
| 0
| 0
| 0
| 0
| 0
| 0.006897
| 1
| 0.048276
| false
| 0.02069
| 0.006897
| 0
| 0.089655
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b5219bd1895f4dd93fd8f88b4fcbf7fa4741e60c
| 174
|
py
|
Python
|
readbin/apps/accounts/admin/__init__.py
|
asnelzin/readbin
|
1b546f71955cf5753d63aaf7d7fda0d466fc1332
|
[
"MIT"
] | null | null | null |
readbin/apps/accounts/admin/__init__.py
|
asnelzin/readbin
|
1b546f71955cf5753d63aaf7d7fda0d466fc1332
|
[
"MIT"
] | null | null | null |
readbin/apps/accounts/admin/__init__.py
|
asnelzin/readbin
|
1b546f71955cf5753d63aaf7d7fda0d466fc1332
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from readbin.apps.accounts.admin.models import UserAdmin
from readbin.apps.accounts.models import User
admin.site.register(User, UserAdmin)
| 29
| 56
| 0.83908
| 25
| 174
| 5.84
| 0.52
| 0.150685
| 0.205479
| 0.315068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 174
| 5
| 57
| 34.8
| 0.918239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b52a1e100ba3b366d1701bc96940f5b957ec3083
| 124
|
py
|
Python
|
bin/amp4e_events_input_lib/__init__.py
|
kbalante/amp4e_splunk_events_input
|
ff5d15504aff79f0c6b8f886edc946ac4ac28d8f
|
[
"BSD-2-Clause"
] | 9
|
2017-07-31T16:13:51.000Z
|
2021-01-06T15:02:36.000Z
|
bin/amp4e_events_input_lib/__init__.py
|
kbalante/amp4e_splunk_events_input
|
ff5d15504aff79f0c6b8f886edc946ac4ac28d8f
|
[
"BSD-2-Clause"
] | 51
|
2017-10-24T17:25:44.000Z
|
2022-03-31T16:47:58.000Z
|
bin/amp4e_events_input_lib/__init__.py
|
kbalante/amp4e_splunk_events_input
|
ff5d15504aff79f0c6b8f886edc946ac4ac28d8f
|
[
"BSD-2-Clause"
] | 12
|
2017-08-01T08:59:39.000Z
|
2021-02-24T21:10:46.000Z
|
import sys
import os
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')))
| 24.8
| 101
| 0.725806
| 21
| 124
| 4.095238
| 0.47619
| 0.27907
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008621
| 0.064516
| 124
| 4
| 102
| 31
| 0.732759
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b56069ba379ea3ae385e5a2d93953b8a0a347447
| 198
|
py
|
Python
|
halotools/empirical_models/phase_space_models/analytic_models/satellites/nfw/__init__.py
|
pllim/halotools
|
6499cff09e7e0f169e4f425ee265403f6be816e8
|
[
"BSD-3-Clause"
] | 83
|
2015-01-15T14:54:16.000Z
|
2021-12-09T11:28:02.000Z
|
halotools/empirical_models/phase_space_models/analytic_models/satellites/nfw/__init__.py
|
pllim/halotools
|
6499cff09e7e0f169e4f425ee265403f6be816e8
|
[
"BSD-3-Clause"
] | 579
|
2015-01-14T15:57:37.000Z
|
2022-01-13T18:58:44.000Z
|
halotools/empirical_models/phase_space_models/analytic_models/satellites/nfw/__init__.py
|
pllim/halotools
|
6499cff09e7e0f169e4f425ee265403f6be816e8
|
[
"BSD-3-Clause"
] | 70
|
2015-01-14T15:15:58.000Z
|
2021-12-22T18:18:31.000Z
|
from .nfw_profile import NFWProfile
from .nfw_phase_space import NFWPhaseSpace
from .biased_nfw_phase_space import BiasedNFWPhaseSpace
from .sfr_biased_nfw_phase_space import SFRBiasedNFWPhaseSpace
| 39.6
| 62
| 0.89899
| 26
| 198
| 6.461538
| 0.461538
| 0.142857
| 0.232143
| 0.339286
| 0.297619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080808
| 198
| 4
| 63
| 49.5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a916e22e3f9256f2465eb008228ffa6d819f256e
| 34,677
|
py
|
Python
|
post_optimization_studies/mad_analyses/vbf_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_2.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/vbf_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_2.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
post_optimization_studies/mad_analyses/vbf_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_2.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
def selection_2():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(-8.0,8.0,161,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([-7.95,-7.85,-7.75,-7.65,-7.55,-7.45,-7.35,-7.25,-7.15,-7.05,-6.95,-6.85,-6.75,-6.65,-6.55,-6.45,-6.35,-6.25,-6.15,-6.05,-5.95,-5.85,-5.75,-5.65,-5.55,-5.45,-5.35,-5.25,-5.15,-5.05,-4.95,-4.85,-4.75,-4.65,-4.55,-4.45,-4.35,-4.25,-4.15,-4.05,-3.95,-3.85,-3.75,-3.65,-3.55,-3.45,-3.35,-3.25,-3.15,-3.05,-2.95,-2.85,-2.75,-2.65,-2.55,-2.45,-2.35,-2.25,-2.15,-2.05,-1.95,-1.85,-1.75,-1.65,-1.55,-1.45,-1.35,-1.25,-1.15,-1.05,-0.95,-0.85,-0.75,-0.65,-0.55,-0.45,-0.35,-0.25,-0.15,-0.05,0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25,1.35,1.45,1.55,1.65,1.75,1.85,1.95,2.05,2.15,2.25,2.35,2.45,2.55,2.65,2.75,2.85,2.95,3.05,3.15,3.25,3.35,3.45,3.55,3.65,3.75,3.85,3.95,4.05,4.15,4.25,4.35,4.45,4.55,4.65,4.75,4.85,4.95,5.05,5.15,5.25,5.35,5.45,5.55,5.65,5.75,5.85,5.95,6.05,6.15,6.25,6.35,6.45,6.55,6.65,6.75,6.85,6.95,7.05,7.15,7.25,7.35,7.45,7.55,7.65,7.75,7.85,7.95])
# Creating weights for histo: y3_PHI_0
y3_PHI_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,6.83712214407,16.9904494479,17.0764253742,17.539056978,17.4121410867,17.0682373812,17.2360932375,16.9863534514,17.1255533321,17.1419293181,16.4459339142,17.2197172515,16.7202376793,16.892189532,17.1501173111,16.8880975355,17.1951532725,17.0477653988,16.9945414443,16.8103096021,16.7243336758,17.4080450902,17.1705892936,17.4817370271,16.892189532,17.7028208377,17.4612690446,16.7857456232,16.5400978336,16.3845259668,16.9740734619,17.3015971814,17.1050813497,17.0191054233,17.4162330832,17.1255533321,16.9740734619,17.4612690446,17.1705892936,17.1787772866,17.1009893532,17.1173653392,17.2770332024,16.9495094829,17.3097851743,16.908565518,16.6915817038,16.7611816442,17.0395774058,16.7202376793,17.3015971814,17.2770332024,17.1091773462,17.1419293181,17.3752931182,16.7611816442,16.7939336162,17.2197172515,16.7775576302,16.8389655776,17.1746812901,16.6260737599,17.2279052445,7.31203373732,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_1
y3_PHI_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.21129243499,5.92969547579,5.27387254205,5.56527641345,5.97794902812,6.24453518471,5.92925487724,5.12795831796,5.7591477872,5.65006760206,5.70990489086,6.00256246541,5.2249100266,5.83066894851,5.91766713532,5.69816894761,5.68570000859,6.35377959296,5.84436755804,6.16014854569,5.69836921968,5.97753246221,5.699406629,5.77149255754,5.96493134363,6.0626601079,5.78437806247,5.62484934312,5.80754954086,5.8693454905,5.674124283,5.64956692189,5.8446439335,5.63693776521,6.10001084878,5.90517416365,5.8315100912,5.85631178424,5.89364249792,6.36841547577,6.087525888,5.17589143498,6.07518111766,5.74757206161,5.79623016352,5.57748499878,5.86903707151,5.52827414597,5.45573960799,6.28124906042,5.68692967909,5.71011717925,6.12276576127,6.06234367803,5.73429802887,5.54084322102,6.12309020202,5.27303139936,5.7233151086,5.73508309538,6.31891623117,5.96569237749,5.49287405504,1.96824505576,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_2
y3_PHI_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,14.47788304,35.5219335644,36.3544210331,37.740860239,35.7933373135,36.7975377963,35.5304539282,36.5980695527,35.8544095618,36.6263660856,37.3275275601,34.789153359,37.3676542585,35.8145679773,35.5216691109,34.959391218,35.4731129546,34.6774051937,35.7949901484,35.1195137277,36.4563720197,35.1514299689,35.743537399,35.6331073696,35.9951443185,36.384188589,35.1204847682,36.835540602,36.4649337043,36.1331932185,35.8620869797,35.5925591982,36.2248139872,36.0751126013,35.4723939714,36.0541215985,35.9240641551,36.3345250334,36.3748748646,35.3728726521,34.869650549,36.5868592002,36.1323296123,34.9893695104,35.9034780968,36.0032969264,36.7159290749,35.3711206471,35.5914972518,35.2423193582,34.858700518,35.9848926102,35.3999915402,36.244668666,36.9781141375,34.6576868738,35.5218633189,35.8521534422,35.3203455601,35.6007531271,36.033378521,35.9913345341,34.9809731093,15.561589012,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_3
y3_PHI_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,13.8005830699,33.0183245071,32.0848778871,32.7266907399,32.3864242612,31.9233348966,32.0943761145,32.8475961051,31.9517970783,31.9725566769,32.2595428941,32.0234603504,32.0446058907,33.0945500159,32.4417723575,32.502418011,32.5292673627,32.1665715799,33.151405316,32.1771138809,32.2000916285,32.6056269355,32.1167851059,31.9070562837,32.4421461115,32.9414652415,32.2929816919,31.5884269131,32.3923515125,32.7103633765,33.1111780076,32.0511831491,32.8925278392,31.7957344593,33.4798091669,32.5187819373,32.7771028456,32.8697979068,32.3026180457,32.2936723243,31.9228798917,31.6229504101,32.7058945783,33.2926843382,32.8094285063,32.6280074891,32.7882667158,32.6274021701,32.0345023444,32.5726715804,33.2609558713,31.7827668195,32.2709830173,31.9837083596,32.7378464851,32.1657712587,32.8214942613,32.6933535057,32.1281033529,32.4242993568,32.4466555351,32.3586161491,32.5452290972,13.8178366933,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_4
y3_PHI_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.80595139246,4.3845655694,4.59567483129,4.40025005603,4.50294275918,4.50700717765,4.56536805812,4.46258717232,4.55530721953,4.55230099285,4.46355317316,4.47830773367,4.39837016229,4.49100202684,4.45760485265,4.47236342147,4.38064945812,4.43992423151,4.38165955029,4.35099603822,4.27309468437,4.42611963863,4.52661579216,4.38064945812,4.41723323258,4.38560371968,4.48816414886,4.38361961008,4.50298685051,4.47724152528,4.49212835977,4.50399694267,4.42401127166,4.52266360616,4.47028712091,4.45369675798,4.36985510022,4.50484269444,4.43783189775,4.48736649672,4.49817287953,4.4546427173,4.56616571027,4.4823080193,4.35200613038,4.34694364467,4.40721648531,4.38072962417,4.46839520226,4.36289267924,4.4357716304,4.39447810082,4.33727962799,4.49119843365,4.51679545169,4.42878515961,4.45170062347,4.53658844811,4.52578206529,4.40316409175,4.43187155233,4.34319187378,4.42126558463,1.78709192963,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_5
y3_PHI_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.309295417963,0.746123601694,0.736291192735,0.761778637208,0.76256002865,0.753448196234,0.760770390185,0.758503434777,0.749152343742,0.738087632867,0.77618816757,0.746652131185,0.773859196968,0.767297989459,0.767317994361,0.770591196301,0.769835011034,0.777929394174,0.788726839568,0.7660540847,0.751207647295,0.739360344683,0.761026853019,0.789998751189,0.749679672938,0.792282510714,0.783959671603,0.748163301425,0.755459489004,0.762053904649,0.759014760052,0.751448506306,0.75623207829,0.780920126909,0.767578058077,0.759008758582,0.752718017339,0.794066147709,0.753948718863,0.76182264799,0.772590886229,0.743145672096,0.77842031445,0.760274668732,0.756470136615,0.752688009987,0.766797866928,0.735083696897,0.768297434325,0.779412957649,0.776137355121,0.769590951239,0.75645893387,0.734571571425,0.772125572226,0.759774146103,0.78267815763,0.764544514853,0.75875389614,0.744612831553,0.787734196369,0.752961677036,0.750717127117,0.335506799801,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_6
y3_PHI_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0741764853965,0.197273668932,0.196089158804,0.20959051494,0.190692934929,0.177253458426,0.194665427414,0.188387293812,0.195844539319,0.199267412707,0.180112217488,0.185508041495,0.190959947044,0.197020852145,0.19465902952,0.190372740318,0.199254416984,0.184980615093,0.183840790258,0.194399614904,0.19065224832,0.190393433507,0.199555317945,0.191560649327,0.186323073234,0.196976866622,0.191562148834,0.194698416556,0.199307799414,0.193827003375,0.188668201354,0.194998917648,0.205520854436,0.183539689363,0.184656321832,0.199823529666,0.201019536009,0.187800287021,0.174619925233,0.186959463771,0.200433228988,0.188930315081,0.19008653452,0.202400581449,0.196407354073,0.199812133417,0.193805410482,0.18691507838,0.183276276063,0.198091699684,0.196431446143,0.194732905204,0.192985980191,0.198415992945,0.1866871534,0.188391592397,0.180359736019,0.183834292397,0.192993377756,0.197775103889,0.19696557034,0.17925170072,0.185256924149,0.0818637252023,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_7
y3_PHI_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00425328456669,0.0106696735777,0.0111867527415,0.0103038911133,0.0104093151833,0.010194146316,0.010666543041,0.0101881199282,0.0112068644489,0.0102580268653,0.0114864335262,0.00999756789145,0.00982175075015,0.0109945159983,0.00978161534238,0.0105829614839,0.0106876312076,0.00987114366192,0.0108348963459,0.0111003130505,0.0106893913487,0.010535265851,0.0110144600732,0.0108368031654,0.0107973885773,0.0105792190886,0.0106663502636,0.0104933158216,0.0112952235319,0.011229972587,0.0106479861249,0.0109865576461,0.0110091545051,0.0105328267984,0.011293593306,0.0105612656495,0.0103673693446,0.010819813613,0.011400777517,0.0109655281508,0.0112494137645,0.0113174264546,0.0109243324676,0.0111240162839,0.0106429571503,0.0103644022497,0.0111835677243,0.00997326537191,0.0107111165188,0.0111810029473,0.0108192059453,0.0109929025356,0.0111669385818,0.0112523682871,0.0107338642471,0.0105368290239,0.011056313714,0.0113148365327,0.0112719184256,0.0113780381704,0.0115964171997,0.0103245182906,0.0104705387577,0.00425447056652,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_8
y3_PHI_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000454216896718,0.000710402331465,0.000793041304089,0.000510495246345,0.000823126733144,0.000707465682248,0.00107861358108,0.00071022824179,0.000794126690828,0.000766573286181,0.00101987985403,0.000622470111758,0.000847971795597,0.000595482201475,0.000823305130499,0.000791025607079,0.000792149911476,0.000767674418232,0.000766641763434,0.000736250043499,0.000850930577375,0.00113302418021,0.000851791073526,0.000791833222749,0.000762406571604,0.00110646391854,0.000738337337116,0.000710370692301,0.00079417971985,0.000738151067105,0.000934747329039,0.000650998983042,0.000594642352478,0.000652568760934,0.000681543105713,0.000681314055986,0.00107653549698,0.0010486058388,0.000937558609923,0.000907795217059,0.000596488267492,0.000650318221112,0.00093479129708,0.000739507837663,0.000764305812721,0.000651886810678,0.000737939693719,0.000737470899337,0.000852118754263,0.000652811476403,0.000880068613981,0.000565727126889,0.00079433078572,0.000679157988035,0.00076235101739,0.000566737649126,0.000965099685694,0.000650389223556,0.000708373414336,0.000652243011226,0.000653148515203,0.000878072524342,0.000766320469946,0.00027945032146,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_9
y3_PHI_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,13.0375632468,20.8492540491,26.0397256058,31.2795107607,28.6898585245,20.8462200169,39.1146355202,23.4744725259,28.6622522923,20.8521111922,26.0622712722,31.2722890719,26.0628865386,28.6973109382,18.2535337485,36.4782191958,31.2908508887,23.4562683327,33.8790534036,15.6400476319,33.891612528,26.066858852,28.6700700204,2.61222059059,33.8848945885,44.2876796575,31.2954923043,20.8576139806,28.6690317585,36.4897054495,18.234483564,20.8459508378,39.0762582814,31.2699818231,23.4645205926,36.4978846466,33.8871095474,33.8750041819,39.1031761844,20.8466814666,46.903715284,39.1056372498,33.8992725941,31.2703394467,23.4437168991,31.2905855551,33.9025142787,26.0528461608,26.0668896153,26.0773914429,36.4982230431,28.6876589473,28.6677435445,18.275421849,26.0754610447,26.0716694658,26.0522655032,23.4467086317,31.2816142026,26.0467780964,23.4670893297,23.4439553148,10.4344289866,13.0373555944,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_10
y3_PHI_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,55.824957344,129.544118577,131.663109299,124.267438071,107.435172771,111.624905265,112.716124117,111.632485044,117.936975895,131.63155895,125.360773105,113.753553471,129.546927328,115.843494786,148.526501713,121.130063633,132.684609574,124.27909631,125.329261232,139.031193209,136.924053107,119.035081958,141.137871599,127.446328152,155.847067718,129.539462977,132.684801955,109.553855684,113.753438043,122.171802303,139.024767711,131.662878443,126.374039509,133.766132466,133.768594932,110.581704505,113.74774359,120.083977171,137.98576084,116.90431757,125.32102736,124.299450133,107.41993626,141.136909698,141.133446855,124.27890393,120.106177844,131.629558196,135.870040581,109.526960935,124.293640252,124.256703257,143.250821583,134.802869251,147.434320959,144.283864669,140.086860204,122.193041075,131.651104776,134.834535029,148.523500582,125.336225395,145.352459612,49.4994201007,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_11
y3_PHI_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,45.6054480031,114.928738313,116.795016364,116.774728644,102.270700037,115.408112527,116.55940217,105.726374879,111.25312615,108.954312389,116.998393064,112.172797665,103.192830669,104.80520484,116.09001812,112.868420523,117.241423034,112.395386221,105.25326373,115.631584828,113.786516666,112.156621283,110.787315506,104.111618439,121.155108508,116.326631331,111.480440823,106.418078519,109.872792769,107.337634762,106.641819786,109.413245189,101.80704112,113.32835234,109.405137786,102.489484645,105.728565031,108.719466669,105.951076739,110.330227044,108.951738,110.795691875,105.503325239,113.566041414,116.995127049,107.795261156,107.326568734,108.720004601,116.089095951,108.489423913,114.946528491,113.777026009,110.793962808,108.025957115,110.103796117,109.171636896,109.868681431,123.001713619,101.806080528,110.790427827,110.791695809,105.265905131,115.859053195,49.2922414652,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_12
y3_PHI_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,4.37396255512,10.772356349,10.3562864185,10.5219911253,10.8840893031,10.3567403829,9.96874315036,9.69200182462,9.96852386247,10.5502561811,9.63670665023,9.94164763006,10.2455768078,9.47122123128,10.2455845022,9.60809535043,9.88530218206,9.47073648961,9.38721857818,9.80251138313,10.7154107446,10.4124202729,10.0238690499,10.1061520242,10.1909279557,10.1899430837,10.6606580192,10.2452959655,10.108464165,10.6048473259,9.63635655902,10.9108962867,10.8543969524,10.0782716841,10.3858210357,10.9087880452,10.3276328,10.7708674995,9.22106760123,9.96940486121,10.2168308576,9.96932022377,9.77473106905,10.5788367036,10.1353250088,10.5781749928,10.7726371913,10.4398620372,10.2460807853,9.69275971437,9.83078028604,9.49720492347,10.7999673882,10.024365333,10.3557708996,9.35977296668,10.9938409719,9.66390604374,10.5503023469,9.27525864183,9.4427830535,10.1076370265,10.1363752824,4.31948682498,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_13
y3_PHI_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.544093096752,1.28067808623,1.36069528394,1.46230950955,1.4115315237,1.35107064683,1.47193293304,1.64296335956,1.51203589108,1.33077522959,1.65374275894,1.30052992427,1.39142603845,1.58202977653,1.50228928486,1.3812394939,1.47223815922,1.61319986257,1.43163882938,1.31083176301,1.37085330836,1.48218015875,1.6334048649,1.66348026299,1.36127418211,1.41151453298,1.47196448724,1.4519172559,1.47189045624,1.50230081428,1.36075960596,1.42178724476,1.44168034599,1.40172423632,1.41137617996,1.44193095913,1.45200281632,1.32082291427,1.34077184214,1.52241722213,1.62310788065,1.37087333314,1.36120803965,1.4823622022,1.50208782345,1.37080961793,1.30077325568,1.34107100021,1.13912656211,1.52217692478,1.26029528813,1.69404839203,1.36100597143,1.42183457606,1.20990626845,1.21976877566,1.61311248171,1.36112733372,1.4114999695,1.40121815554,1.45197186893,1.46186714397,1.56233631629,0.534265117107,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_14
y3_PHI_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.147146226343,0.319770130385,0.345159511045,0.339473797329,0.342287337262,0.339515733613,0.407409461985,0.347945773156,0.356413593829,0.305532069376,0.319627200756,0.36780879798,0.353638719919,0.345173092245,0.314065641525,0.367860468099,0.345219337579,0.396047806861,0.35649508103,0.311253525118,0.362163520075,0.365019958134,0.319660518941,0.356491695349,0.356480807304,0.31688256714,0.387650162136,0.413006994083,0.299898949146,0.331025482799,0.313984731428,0.370607371661,0.367848425845,0.333869917077,0.373433146216,0.367751587654,0.319732118414,0.325397325672,0.322507415533,0.359306158649,0.356473574257,0.316859252105,0.350801403268,0.331018172805,0.319768629912,0.33945875413,0.3734049835,0.424399812868,0.288521404403,0.291459060348,0.356497466397,0.356459415952,0.333771963151,0.370597907142,0.350831374245,0.319673292195,0.325341346503,0.350848456548,0.364952436869,0.384782720612,0.34798170755,0.345079024158,0.393243077395,0.172559768459,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_15
y3_PHI_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0075932686663,0.0258565337607,0.0244568608516,0.0244085379947,0.0182325128962,0.0198093223428,0.0304366873514,0.0259473060433,0.0137504586048,0.0320268154235,0.0121477682444,0.0228123828606,0.0152172918943,0.0259103636979,0.0197830987137,0.0198048670439,0.0152639484458,0.0243029225944,0.0182854210474,0.0182573302108,0.0228626556493,0.0258739413342,0.0197956491842,0.0152406792589,0.0228914200987,0.0243383877188,0.0289325571834,0.0228466780258,0.0137383335739,0.0212813790861,0.0197575132441,0.0319239653811,0.0182884936673,0.0213438832657,0.0121743345888,0.0273807068647,0.019804453422,0.0243749637134,0.0182361645868,0.0304870665001,0.0198245790824,0.00915037907394,0.0273849258082,0.0228432981439,0.0213510448336,0.0198245909002,0.0213585727524,0.033496461342,0.0243301743694,0.0213476176806,0.0213440841677,0.0213439069012,0.0167507419469,0.0197864431423,0.02592078697,0.0227973152052,0.0137294702473,0.0152949819068,0.0167867979597,0.00761402539554,0.0197572768887,0.016818942291,0.0121713565111,0.0061254899553,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y3_PHI_16
y3_PHI_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180406121736,0.00198429969742,0.00144412419203,0.00216653000546,0.000722458574535,0.00126368968713,0.000902756362704,0.00108315312608,0.00144423202495,0.00144469339577,0.000903043275279,0.0018064693574,0.00126314127974,0.00180620170071,0.00108213410504,0.00144476464252,0.0016251899046,0.00126432821203,0.00108383016274,0.00198679641451,0.00108400154005,0.00126441601883,0.00126358262445,0.000722492079762,0.00144416848055,0.000541889663611,0.00198506877717,0.000903866656604,0.000902182537552,0.000902539156405,0.00198695161688,0.00162621624287,0.000541534585228,0.00234719635113,0.0019866338949,0.00144415615679,0.00270892032189,0.00198758513525,0.00126335155392,0.00198571423419,0.000722191302955,0.00108328599164,0.00180613430514,0.000722770904869,0.00144381879381,0.000901796264648,0.00108311345898,0.00144377643088,0.00162588042037,0.00162413776345,0.00306913424673,0.00126448033347,0.00126420998094,0.000541503005589,0.00180468587802,0.00144459788662,0.00126438906061,0.0012652555751,0.00144427862417,0.00144338322586,0.000902232602834,0.00216762720537,0.00126452616245,0.000360782781979,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights+y3_PHI_12_weights+y3_PHI_13_weights+y3_PHI_14_weights+y3_PHI_15_weights+y3_PHI_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights+y3_PHI_12_weights+y3_PHI_13_weights+y3_PHI_14_weights+y3_PHI_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights+y3_PHI_12_weights+y3_PHI_13_weights+y3_PHI_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights+y3_PHI_12_weights+y3_PHI_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights+y3_PHI_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights+y3_PHI_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y3_PHI_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\eta [ j_{1} ] ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights+y3_PHI_12_weights+y3_PHI_13_weights+y3_PHI_14_weights+y3_PHI_15_weights+y3_PHI_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y3_PHI_0_weights+y3_PHI_1_weights+y3_PHI_2_weights+y3_PHI_3_weights+y3_PHI_4_weights+y3_PHI_5_weights+y3_PHI_6_weights+y3_PHI_7_weights+y3_PHI_8_weights+y3_PHI_9_weights+y3_PHI_10_weights+y3_PHI_11_weights+y3_PHI_12_weights+y3_PHI_13_weights+y3_PHI_14_weights+y3_PHI_15_weights+y3_PHI_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_2.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_2.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_2.eps')
# Running!
if __name__ == '__main__':
selection_2()
| 178.747423
| 1,560
| 0.741731
| 7,547
| 34,677
| 3.311382
| 0.173711
| 0.259133
| 0.384618
| 0.507383
| 0.377056
| 0.375375
| 0.374255
| 0.371134
| 0.369613
| 0.369613
| 0
| 0.555243
| 0.047582
| 34,677
| 193
| 1,561
| 179.673575
| 0.201441
| 0.037979
| 0
| 0.185841
| 0
| 0.00885
| 0.030849
| 0.006002
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00885
| false
| 0
| 0.035398
| 0
| 0.044248
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a93d11dae5f6b12ab8926a0f93c21a6c8b84c04b
| 81
|
py
|
Python
|
spid_cie_oidc/authority/utils.py
|
peppelinux/spid-cie-oidc-authority
|
816636fece10f410f5d6fce85fd79bb409d0c8b8
|
[
"Apache-2.0"
] | 4
|
2022-03-08T09:05:13.000Z
|
2022-03-16T17:59:43.000Z
|
spid_cie_oidc/authority/utils.py
|
peppelinux/spid-cie-oidc-authority
|
816636fece10f410f5d6fce85fd79bb409d0c8b8
|
[
"Apache-2.0"
] | 64
|
2022-03-08T01:11:40.000Z
|
2022-03-31T17:23:49.000Z
|
spid_cie_oidc/authority/utils.py
|
peppelinux/spid-cie-oidc-authority
|
816636fece10f410f5d6fce85fd79bb409d0c8b8
|
[
"Apache-2.0"
] | 8
|
2022-03-09T12:00:08.000Z
|
2022-03-31T13:52:14.000Z
|
from secrets import token_hex
def random_token(n=254):
return token_hex(n)
| 13.5
| 29
| 0.753086
| 14
| 81
| 4.142857
| 0.714286
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044776
| 0.17284
| 81
| 5
| 30
| 16.2
| 0.820896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
a947c523c534bc99056abcf27d1848e30d4ca2e3
| 419
|
py
|
Python
|
lib/__init__.py
|
tijko/PyChat
|
5c80612d9ee0eea6fdec740a2eba200683ca4a4e
|
[
"MIT"
] | null | null | null |
lib/__init__.py
|
tijko/PyChat
|
5c80612d9ee0eea6fdec740a2eba200683ca4a4e
|
[
"MIT"
] | null | null | null |
lib/__init__.py
|
tijko/PyChat
|
5c80612d9ee0eea6fdec740a2eba200683ca4a4e
|
[
"MIT"
] | null | null | null |
try:
from tkinter import Tk, Entry, Label, Button, Scrollbar,\
Checkbutton, TclError, IntVar, Text,\
NORMAL, DISABLED, WORD, CURRENT, END, N, S, E, W
except ImportError:
from Tkinter import Tk, Entry, Label, Button, Scrollbar,\
Checkbutton, TclError, IntVar, Text,\
NORMAL, DISABLED, WORD, CURRENT, END, N, S, E, W
| 46.555556
| 72
| 0.548926
| 45
| 419
| 5.111111
| 0.533333
| 0.095652
| 0.147826
| 0.165217
| 0.913043
| 0.913043
| 0.913043
| 0.913043
| 0.913043
| 0.913043
| 0
| 0
| 0.355609
| 419
| 8
| 73
| 52.375
| 0.851852
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
8d13a4d5a6aae3b0f90bcf618a2397c0d1555931
| 6,284
|
py
|
Python
|
tests/components/tasmota/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/tasmota/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/tasmota/test_init.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the Tasmota binary sensor platform."""
import copy
import json
from unittest.mock import call
from openpeerpower.components import websocket_api
from openpeerpower.components.tasmota.const import DEFAULT_PREFIX
from openpeerpower.helpers import device_registry as dr
from .test_common import DEFAULT_CONFIG
from tests.common import MockConfigEntry, async_fire_mqtt_message
async def test_device_remove(
opp, mqtt_mock, caplog, device_reg, entity_reg, setup_tasmota
):
"""Test removing a discovered device through device registry."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(opp, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await opp.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
device_reg.async_remove_device(device_entry.id)
await opp.async_block_till_done()
# Verify device entry is removed
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is None
# Verify retained discovery topic has been cleared
mqtt_mock.async_publish.assert_has_calls(
[
call(f"tasmota/discovery/{mac}/config", "", 0, True),
call(f"tasmota/discovery/{mac}/sensors", "", 0, True),
],
any_order=True,
)
async def test_device_remove_non_tasmota_device(
opp, device_reg, opp_ws_client, mqtt_mock, setup_tasmota
):
"""Test removing a non Tasmota device through device registry."""
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_opp(opp)
mac = "12:34:56:AB:CD:EF"
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, mac)},
)
assert device_entry is not None
device_reg.async_remove_device(device_entry.id)
await opp.async_block_till_done()
# Verify device entry is removed
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is None
# Verify no Tasmota discovery message was sent
mqtt_mock.async_publish.assert_not_called()
async def test_device_remove_stale_tasmota_device(
opp, device_reg, opp_ws_client, mqtt_mock, setup_tasmota
):
"""Test removing a stale (undiscovered) Tasmota device through device registry."""
config_entry = opp.config_entries.async_entries("tasmota")[0]
mac = "12:34:56:AB:CD:EF"
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, mac)},
)
assert device_entry is not None
device_reg.async_remove_device(device_entry.id)
await opp.async_block_till_done()
# Verify device entry is removed
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is None
# Verify retained discovery topic has been cleared
mac = mac.replace(":", "")
mqtt_mock.async_publish.assert_has_calls(
[
call(f"tasmota/discovery/{mac}/config", "", 0, True),
call(f"tasmota/discovery/{mac}/sensors", "", 0, True),
],
any_order=True,
)
async def test_tasmota_ws_remove_discovered_device(
opp, device_reg, entity_reg, opp_ws_client, mqtt_mock, setup_tasmota
):
"""Test Tasmota websocket device removal."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(opp, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await opp.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "tasmota/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
# Verify device entry is cleared
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is None
async def test_tasmota_ws_remove_discovered_device_twice(
opp, device_reg, opp_ws_client, mqtt_mock, setup_tasmota
):
"""Test Tasmota websocket device removal."""
config = copy.deepcopy(DEFAULT_CONFIG)
mac = config["mac"]
async_fire_mqtt_message(opp, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await opp.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "tasmota/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
await client.send_json(
{"id": 6, "type": "tasmota/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
assert response["error"]["message"] == "Device not found"
async def test_tasmota_ws_remove_non_tasmota_device(
opp, device_reg, opp_ws_client, mqtt_mock, setup_tasmota
):
"""Test Tasmota websocket device removal of device belonging to other domain."""
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_opp(opp)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "tasmota/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
| 32.729167
| 86
| 0.70592
| 851
| 6,284
| 4.918919
| 0.13866
| 0.089345
| 0.052795
| 0.047778
| 0.854993
| 0.818442
| 0.813426
| 0.791687
| 0.774009
| 0.774009
| 0
| 0.005303
| 0.189847
| 6,284
| 191
| 87
| 32.900524
| 0.816932
| 0.065245
| 0
| 0.705882
| 0
| 0
| 0.091788
| 0.053467
| 0
| 0
| 0
| 0
| 0.147059
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a5edd9f5b3d0d282c5be46e78b39f7ea53358fae
| 19,924
|
py
|
Python
|
tests/test_record.py
|
Semtexcz/invenio-records-draft
|
8df87f08bae350b8b50f0bee4edf12c3fbaf3944
|
[
"MIT"
] | null | null | null |
tests/test_record.py
|
Semtexcz/invenio-records-draft
|
8df87f08bae350b8b50f0bee4edf12c3fbaf3944
|
[
"MIT"
] | null | null | null |
tests/test_record.py
|
Semtexcz/invenio-records-draft
|
8df87f08bae350b8b50f0bee4edf12c3fbaf3944
|
[
"MIT"
] | null | null | null |
import uuid
import pytest
from invenio_pidstore.errors import PIDDoesNotExistError
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records import Record
from invenio_records_draft.api import RecordContext
from invenio_records_draft.proxies import current_drafts
from invenio_records_draft.record import (
DraftEnabledRecordMixin,
InvalidRecordException,
MarshmallowValidator,
)
from tests.helpers import disable_test_authenticated
class TestDraftRecord(DraftEnabledRecordMixin, Record):
schema = None
def validate(self, **kwargs):
self['$schema'] = self.schema
return super().validate(**kwargs)
draft_validator = MarshmallowValidator(
'sample.records.marshmallow:MetadataSchemaV1',
'records/record-v1.0.0.json'
)
class TestPublishedRecord(DraftEnabledRecordMixin, Record):
schema = None
def validate(self, **kwargs):
self['$schema'] = self.schema
return super().validate(**kwargs)
def test_publish_record(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
draft_uuid = uuid.uuid4()
rec = TestDraftRecord.create({
'id': '1'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
with pytest.raises(InvalidRecordException):
# title is required but not in rec, so should fail
with disable_test_authenticated():
current_drafts.publish(RecordContext(record=rec, record_pid=draft_pid))
with pytest.raises(PIDDoesNotExistError):
# no record should be created
PersistentIdentifier.get(pid_type='recid', pid_value='1')
# make the record valid
rec['title'] = 'blah'
rec.commit()
# and publish it again
with disable_test_authenticated():
current_drafts.publish(RecordContext(record=rec, record_pid=draft_pid))
# draft should be gone
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.DELETED
rec = TestDraftRecord.get_record(draft_uuid, with_deleted=True)
assert rec.model.json is None
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.REGISTERED
rec = TestPublishedRecord.get_record(published_pid.object_uuid)
assert rec.model.json is not None
def test_publish_record_marshmallow(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
draft_uuid = uuid.uuid4()
rec = TestDraftRecord.create({
'id': '1'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
with pytest.raises(InvalidRecordException):
# title is required but not in rec, so should fail
with disable_test_authenticated():
current_drafts.publish(RecordContext(record=rec, record_pid=draft_pid))
with pytest.raises(PIDDoesNotExistError):
# no record should be created
PersistentIdentifier.get(pid_type='recid', pid_value='1')
# make the record valid
rec['title'] = 'blah'
rec.commit()
assert rec['invenio_draft_validation']['valid']
# and publish it again
with disable_test_authenticated():
current_drafts.publish(RecordContext(record=rec, record_pid=draft_pid))
# draft should be gone
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.DELETED
rec = TestDraftRecord.get_record(draft_uuid, with_deleted=True)
assert rec.model.json is None
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.REGISTERED
rec = TestPublishedRecord.get_record(published_pid.object_uuid)
assert rec.model.json is not None
def test_publish_record_with_previous_version(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
assert published_record.revision_id == 0
draft_uuid = uuid.uuid4()
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
draft_record = TestDraftRecord.create({
'id': '1',
'title': '22'
}, id_=draft_uuid)
assert draft_record.revision_id == 0
print(draft_record['invenio_draft_validation'])
# and publish it again
with disable_test_authenticated():
current_drafts.publish(RecordContext(record=draft_record, record_pid=draft_pid))
# draft should be gone
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.DELETED
rec = TestDraftRecord.get_record(draft_uuid, with_deleted=True)
assert rec.model.json is None
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.REGISTERED
rec = TestPublishedRecord.get_record(published_pid.object_uuid)
assert rec.model.json is not None
assert rec['title'] == '22'
assert rec.revision_id == 1
def test_publish_deleted_published(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11',
'$schema': 'records/record-v1.0.0.json'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
draft_uuid = uuid.uuid4()
rec = TestDraftRecord.create({
'id': '1',
'title': '22'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
with db.session.begin_nested():
published_record.delete()
published_pid.status = PIDStatus.DELETED
db.session.add(published_pid)
with db.session.begin_nested():
rec = TestDraftRecord.get_record(draft_uuid)
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
with disable_test_authenticated():
current_drafts.publish(RecordContext(record=rec, record_pid=draft_pid))
with db.session.begin_nested():
# draft should be gone
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.DELETED
rec = TestDraftRecord.get_record(draft_uuid, with_deleted=True)
assert rec.model.json is None
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.REGISTERED
rec = TestPublishedRecord.get_record(published_pid.object_uuid)
assert rec['title'] == '22'
# revision 0 original, 1 deleted, 2 temporarily reverted to orig, 3 published
assert rec.revision_id == 3
def test_publish_redirected_published(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
draft_uuid = uuid.uuid4()
rec = TestDraftRecord.create({
'id': '1',
'title': '22'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
with db.session.begin_nested():
published_record.delete()
published_pid.status = PIDStatus.REDIRECTED
db.session.add(published_pid)
with db.session.begin_nested():
rec = TestDraftRecord.get_record(draft_uuid)
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
with pytest.raises(NotImplementedError):
with disable_test_authenticated():
current_drafts.publish(RecordContext(record=rec, record_pid=draft_pid))
def test_unpublish_record(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
with disable_test_authenticated():
current_drafts.unpublish(RecordContext(record=published_record,
record_pid=published_pid))
# published version should be gone
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.DELETED
rec = TestDraftRecord.get_record(published_uuid, with_deleted=True)
assert rec.model.json is None
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(draft_pid.object_uuid)
assert rec.model.json is not None
assert rec['title'] == '11'
assert rec.revision_id == 1
def test_unpublish_record_existing_draft(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
draft_uuid = uuid.uuid4()
draft_record = TestDraftRecord.create({
'id': '1',
'title': '22'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
assert draft_record.revision_id == 0
with disable_test_authenticated():
current_drafts.unpublish(RecordContext(record=published_record,
record_pid=published_pid))
# published version should be gone
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.DELETED
rec = TestDraftRecord.get_record(published_uuid, with_deleted=True)
assert rec.model.json is None
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(draft_pid.object_uuid)
assert rec.model.json is not None
assert rec['title'] == '22' # should not be changed on a newer record
assert rec.revision_id == 1
def test_unpublish_record_redirected_draft(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
draft_uuid = uuid.uuid4()
draft_record = TestDraftRecord.create({
'id': '1',
'title': '22'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
assert draft_record.revision_id == 0
with db.session.begin_nested():
draft_record.delete()
draft_pid.status = PIDStatus.REDIRECTED
db.session.add(draft_pid)
with db.session.begin_nested():
with pytest.raises(NotImplementedError):
with disable_test_authenticated():
current_drafts.unpublish(
RecordContext(record=published_record, record_pid=published_pid))
def test_draft_record(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
with disable_test_authenticated():
current_drafts.edit(RecordContext(record=published_record, record_pid=published_pid))
# published version should be there unchanged
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(published_uuid, with_deleted=True)
assert rec['title'] == '11'
assert rec.revision_id == 0
# draft version should appear
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(draft_pid.object_uuid)
assert rec.model.json is not None
assert rec['title'] == '11'
assert rec.revision_id == 1
def test_draft_record_existing_draft(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
draft_uuid = uuid.uuid4()
draft_record = TestDraftRecord.create({
'id': '1',
'title': '22'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
assert draft_record.revision_id == 0
with disable_test_authenticated():
current_drafts.edit(RecordContext(record=published_record, record_pid=published_pid))
# published version should be there unchanged
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(published_uuid, with_deleted=True)
assert rec['title'] == '11'
assert rec.revision_id == 0
# draft version should be there unchanged
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(draft_pid.object_uuid)
assert rec.model.json is not None
assert rec['title'] == '22' # should not be changed on a newer record
assert rec.revision_id == 1
def test_draft_record_deleted_draft(app, db, schemas):
TestDraftRecord.schema = schemas['draft']
TestPublishedRecord.schema = schemas['published']
with db.session.begin_nested():
published_uuid = uuid.uuid4()
published_record = TestPublishedRecord.create({
'id': '1',
'title': '11'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
assert published_record.revision_id == 0
draft_uuid = uuid.uuid4()
draft_record = TestDraftRecord.create({
'id': '1',
'title': '22'
}, id_=draft_uuid)
draft_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
assert draft_record.revision_id == 0
with db.session.begin_nested():
draft_record.delete()
draft_pid.status = PIDStatus.DELETED
db.session.add(draft_pid)
with db.session.begin_nested():
with disable_test_authenticated():
current_drafts.edit(RecordContext(record=published_record, record_pid=published_pid))
# published version should be there unchanged
published_pid = PersistentIdentifier.get(pid_type='recid', pid_value='1')
assert published_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(published_uuid, with_deleted=True)
assert rec['title'] == '11'
assert rec.revision_id == 0
# draft version should be there unchanged
draft_pid = PersistentIdentifier.get(pid_type='drecid', pid_value='1')
assert draft_pid.status == PIDStatus.REGISTERED
rec = TestDraftRecord.get_record(draft_pid.object_uuid)
assert rec.model.json is not None
assert rec['title'] == '11'
assert rec.revision_id == 4
| 39.375494
| 97
| 0.655591
| 2,188
| 19,924
| 5.736289
| 0.056216
| 0.028683
| 0.028683
| 0.052585
| 0.933551
| 0.933551
| 0.930205
| 0.923751
| 0.92152
| 0.918333
| 0
| 0.010817
| 0.243676
| 19,924
| 505
| 98
| 39.453465
| 0.822085
| 0.040454
| 0
| 0.845771
| 0
| 0
| 0.046666
| 0.00749
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.032338
| false
| 0
| 0.022388
| 0
| 0.072139
| 0.002488
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
570343d8371a37859e0ddcf3d197742446157ad5
| 9,095
|
py
|
Python
|
visualise_ba.py
|
ttk21/lab_05
|
916bf12185af9668e7c4d71fa282e1bf95a685cc
|
[
"BSD-3-Clause"
] | null | null | null |
visualise_ba.py
|
ttk21/lab_05
|
916bf12185af9668e7c4d71fa282e1bf95a685cc
|
[
"BSD-3-Clause"
] | null | null | null |
visualise_ba.py
|
ttk21/lab_05
|
916bf12185af9668e7c4d71fa282e1bf95a685cc
|
[
"BSD-3-Clause"
] | 1
|
2020-12-19T20:13:20.000Z
|
2020-12-19T20:13:20.000Z
|
import matplotlib
import numpy as np
import visgeom as vg
from matplotlib import pyplot as plt
def visualise_moba(true_pose_w_c, true_box_w, measurement, x, cost):
# Visualize (press a key to jump to the next iteration).
# Use Qt 5 backend in visualisation.
matplotlib.use('qt5agg')
# Create figure and axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Plot box and true state
vg.plot_pose(ax, true_pose_w_c.to_tuple(), scale=1, alpha=0.4)
vg.utils.plot_as_box(ax, true_box_w)
# Normalised in 3d.
xn_3d = np.vstack((measurement.xn, np.ones((1, measurement.num))))
# Plot initial state (to run axis equal first time).
ax.set_title('Cost: ' + str(cost[0]))
artists = vg.plot_pose(ax, x[0].to_tuple(), scale=1)
artists.extend(vg.plot_camera_image_plane(ax, measurement.camera.K(), x[0].to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, x[0] * xn_3d, alpha=0.4))
artists.extend(
vg.utils.plot_as_box(ax, x[0] * measurement.camera.project_to_normalised_3d(x[0].inverse() * measurement.x_w)))
vg.plot.axis_equal(ax)
plt.draw()
while True:
if plt.waitforbuttonpress():
break
# Plot iterations
for i in range(1, len(x)):
for artist in artists:
artist.remove()
ax.set_title('Cost: ' + str(cost[i]))
artists = vg.plot_pose(ax, x[i].to_tuple(), scale=1)
artists.extend(vg.plot_camera_image_plane(ax, measurement.camera.K(), x[i].to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, x[i] * xn_3d, alpha=0.4))
artists.extend(vg.utils.plot_as_box(ax, x[i] * measurement.camera.project_to_normalised_3d(
x[i].inverse() * measurement.x_w)))
plt.draw()
while True:
if plt.waitforbuttonpress():
break
plt.close()
def visualise_multicam_moba(true_pose_w_c, true_box_w, measurement, x, cost):
# Visualize (press a key to jump to the next iteration).
# Use Qt 5 backend in visualisation.
matplotlib.use('qt5agg')
# Create figure and axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Plot box and true state
for true_pose in true_pose_w_c:
vg.plot_pose(ax, true_pose.to_tuple(), scale=1, alpha=0.4)
vg.utils.plot_as_box(ax, true_box_w)
# Plot initial state (to run axis equal first time).
ax.set_title('Cost: ' + str(cost[0]))
artists = []
for pose, meas in zip(x[0], measurement):
# Normalised in 3d.
xn_3d = np.vstack((meas.xn, np.ones((1, meas.num))))
artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
artists.extend(vg.plot_camera_image_plane(ax, meas.camera.K(), pose.to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
artists.extend(
vg.utils.plot_as_box(ax, pose * meas.camera.project_to_normalised_3d(pose.inverse() * meas.x_w)))
vg.plot.axis_equal(ax)
plt.draw()
while True:
if plt.waitforbuttonpress():
break
# Plot iterations
for i in range(1, len(x)):
for artist in artists:
artist.remove()
ax.set_title('Cost: ' + str(cost[i]))
artists = []
for pose, meas in zip(x[i], measurement):
# Normalised in 3d.
xn_3d = np.vstack((meas.xn, np.ones((1, meas.num))))
artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
artists.extend(vg.plot_camera_image_plane(ax, meas.camera.K(), pose.to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
artists.extend(vg.utils.plot_as_box(ax, pose * meas.camera.project_to_normalised_3d(
pose.inverse() * meas.x_w)))
plt.draw()
while True:
if plt.waitforbuttonpress():
break
plt.close()
def visualise_soba(true_pose_w_c, true_box_w, measurement, x, cost):
# Visualize (press a key to jump to the next iteration).
# Use Qt 5 backend in visualisation.
matplotlib.use('qt5agg')
# Create figure and axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Plot true box
vg.utils.plot_as_box(ax, true_box_w, alpha=0.4)
# Plot camera poses
for true_pose in true_pose_w_c:
vg.plot_pose(ax, true_pose.to_tuple(), scale=1)
# Plot initial state (to run axis equal first time).
ax.set_title('Cost: ' + str(cost[0]))
artists = []
# Extract points as matrix.
x_w = np.zeros((3, len(x[0])))
for j, state in enumerate(x[0]):
x_w[:, [j]] = state
artists.extend(vg.utils.plot_as_box(ax, x_w))
for meas in measurement:
# Normalised in 3d.
xn_3d = np.vstack((meas.xn, np.ones((1, meas.num))))
artists.extend(vg.plot_camera_image_plane(ax, meas.camera.K(), meas.pose_w_c.to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, meas.pose_w_c * xn_3d, alpha=0.4))
artists.extend(
vg.utils.plot_as_box(ax, meas.pose_w_c * meas.camera.project_to_normalised_3d(meas.pose_c_w * x_w)))
vg.plot.axis_equal(ax)
plt.draw()
while True:
if plt.waitforbuttonpress():
break
# Plot iterations
for i in range(1, len(x)):
for artist in artists:
artist.remove()
ax.set_title('Cost: ' + str(cost[i]))
artists = []
# Extract points as matrix.
x_w = np.zeros((3, len(x[i])))
for j, state in enumerate(x[i]):
x_w[:, [j]] = state
artists.extend(vg.utils.plot_as_box(ax, x_w))
for meas in measurement:
# Normalised in 3d.
xn_3d = np.vstack((meas.xn, np.ones((1, meas.num))))
artists.extend(vg.plot_camera_image_plane(ax, meas.camera.K(), meas.pose_w_c.to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, meas.pose_w_c * xn_3d, alpha=0.4))
artists.extend(
vg.utils.plot_as_box(ax, meas.pose_w_c * meas.camera.project_to_normalised_3d(meas.pose_c_w * x_w)))
plt.draw()
while True:
if plt.waitforbuttonpress():
break
plt.close()
def visualise_full(true_pose_w_c, true_box_w, measurement, x, cost):
# Visualize (press a key to jump to the next iteration).
# Use Qt 5 backend in visualisation.
matplotlib.use('qt5agg')
# Create figure and axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Plot true state
for true_pose in true_pose_w_c:
vg.plot_pose(ax, true_pose.to_tuple(), scale=1, alpha=0.4)
vg.utils.plot_as_box(ax, true_box_w, alpha=0.4)
num_cameras = x[0].num_poses
num_points = x[0].num_points
# Plot initial state (to run axis equal first time).
ax.set_title('Cost: ' + str(cost[0]))
artists = []
# Extract points as matrix.
x_w = np.zeros((3, num_points))
for j in range(num_points):
x_w[:, [j]] = x[0].get_point(j)
artists.extend(vg.utils.plot_as_box(ax, x_w))
for i in range(num_cameras):
pose = x[0].get_pose(i)
# Normalised in 3d.
xn_3d = np.vstack((measurement[i].xn, np.ones((1, measurement[i].num))))
artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
artists.extend(vg.plot_camera_image_plane(ax, measurement[i].camera.K(), pose.to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
artists.extend(
vg.utils.plot_as_box(ax, pose * measurement[i].camera.project_to_normalised_3d(pose.inverse() * x_w)))
vg.plot.axis_equal(ax)
plt.draw()
while True:
if plt.waitforbuttonpress():
break
# Plot iterations
for it in range(1, len(x)):
for artist in artists:
artist.remove()
ax.set_title('Cost: ' + str(cost[it]))
artists = []
# Extract points as matrix.
x_w = np.zeros((3, num_points))
for j in range(num_points):
x_w[:, [j]] = x[it].get_point(j)
artists.extend(vg.utils.plot_as_box(ax, x_w))
for i in range(num_cameras):
pose = x[it].get_pose(i)
# Normalised in 3d.
xn_3d = np.vstack((measurement[i].xn, np.ones((1, measurement[i].num))))
artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
artists.extend(vg.plot_camera_image_plane(ax, measurement[i].camera.K(), pose.to_tuple()))
artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
artists.extend(
vg.utils.plot_as_box(ax, pose * measurement[i].camera.project_to_normalised_3d(pose.inverse() * x_w)))
plt.draw()
while True:
if plt.waitforbuttonpress():
break
plt.close()
| 32.715827
| 119
| 0.610445
| 1,405
| 9,095
| 3.763701
| 0.079004
| 0.078669
| 0.090772
| 0.059002
| 0.957262
| 0.953101
| 0.93646
| 0.91528
| 0.908283
| 0.908283
| 0
| 0.015964
| 0.249258
| 9,095
| 277
| 120
| 32.833935
| 0.758494
| 0.115338
| 0
| 0.794444
| 0
| 0
| 0.011487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.022222
| 0
| 0.044444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57079c9d0f7b8a1104c06e1bea819ee6a36ea3cf
| 2,912
|
py
|
Python
|
email_campaigns/migrations/0003_auto_20200814_1417.py
|
diffractive/newstream
|
cf1a1f230e18d01c63b50ab9d360aa44ac5a486f
|
[
"MIT"
] | 1
|
2020-05-03T12:33:42.000Z
|
2020-05-03T12:33:42.000Z
|
email_campaigns/migrations/0003_auto_20200814_1417.py
|
diffractive/newstream
|
cf1a1f230e18d01c63b50ab9d360aa44ac5a486f
|
[
"MIT"
] | 14
|
2020-07-06T20:05:57.000Z
|
2022-03-12T00:39:11.000Z
|
email_campaigns/migrations/0003_auto_20200814_1417.py
|
diffractive/newstream
|
cf1a1f230e18d01c63b50ab9d360aa44ac5a486f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-08-14 14:17
from django.db import migrations, models
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('email_campaigns', '0002_auto_20200814_1417'),
]
operations = [
migrations.AddField(
model_name='emailtemplate',
name='html_body_en',
field=wagtail.core.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='html_body_id_id',
field=wagtail.core.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='html_body_ms',
field=wagtail.core.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='html_body_tl',
field=wagtail.core.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='html_body_zh_hant',
field=wagtail.core.fields.RichTextField(blank=True, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='plain_text_en',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='plain_text_id_id',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='plain_text_ms',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='plain_text_tl',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='plain_text_zh_hant',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='subject_en',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='subject_id_id',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='subject_ms',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='subject_tl',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='emailtemplate',
name='subject_zh_hant',
field=models.CharField(max_length=255, null=True),
),
]
| 32.355556
| 75
| 0.571085
| 280
| 2,912
| 5.742857
| 0.192857
| 0.16791
| 0.214552
| 0.251866
| 0.858209
| 0.858209
| 0.858209
| 0.858209
| 0.800995
| 0.796642
| 0
| 0.023104
| 0.316277
| 2,912
| 89
| 76
| 32.719101
| 0.78453
| 0.015453
| 0
| 0.722892
| 1
| 0
| 0.150785
| 0.008028
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.024096
| 0
| 0.060241
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
570b6359116ba2cabc7c9c5c9556313b0daa91f4
| 25,092
|
py
|
Python
|
scripts/multi_agent_sarsa.py
|
fb1n15/maddpg
|
ce0fd8028c09dc4f13c5c4ab015c9ad980469443
|
[
"MIT"
] | null | null | null |
scripts/multi_agent_sarsa.py
|
fb1n15/maddpg
|
ce0fd8028c09dc4f13c5c4ab015c9ad980469443
|
[
"MIT"
] | null | null | null |
scripts/multi_agent_sarsa.py
|
fb1n15/maddpg
|
ce0fd8028c09dc4f13c5c4ab015c9ad980469443
|
[
"MIT"
] | null | null | null |
import json
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# from tqdm import tqdm
from tqdm import tqdm_notebook as tqdm # for use in Jupyter Lab
from classes_in_reverse_auction_v1 import FogNodeAgent
from classes_in_reverse_auction_v1 import ReverseAuctionMDP
from classes_in_reverse_auction_v1 import pd
from generate_simulation_data import generate_synthetic_data_edge_cloud
# (dataframe) set the maximum number of rows and columns to display to unlimited
pd.set_option("display.max_rows", None, "display.max_columns", None)
def train_multi_agent_sarsa(avg_resource_capacity, avg_unit_cost, seed=0,
total_number_of_steps=500, num_fog_nodes=6,
resource_coefficient_original=3, alpha=0.001,
beta=0.1,
epsilon_tuple=(0.2, 0.1, 0.05),
epsilon_steps_tuple=(500, 500, 100),
plot_bool=False, num_actions=4, time_length=100,
high_value_proportion=0.2, high_value_slackness=0,
low_value_slackness=6,
valuation_coefficient_ratio=10,
resource_ratio=1.2, trained_agents=None,
verbose=False, auction_type="second-price"):
"""run multi-agent sarsa
Args:
:param auction_type: the type of the reverse reverse_auction
:param verbose: whether print the details of the execution
:param number_of_runs: number of trials
:param total_number_of_steps: steps of RL (allocate how many tasks)
:param num_fog_nodes: number of fog nodes
:param resource_coefficient_original: a coefficient for computing the resource coefficient
:param alpha: step size of weights
:param beta: step size of estimated average rewards
:param epsilon_tuple: probability of exploration
:param epsilon_steps_tuple: number of steps to run for each epsilon
:param plot_bool: whether plot the results
:param num_actions: number of actions
:param time_length: tasks arrive within this time length
:param low_value_slackness: deadline slackness of low-value tasks
:param resource_ratio: resource demand ratio between high-value and low-value tasks
:param valuation_coefficient_ratio: valuation coefficient ratio between high-value
and low-value tasks
:param high_value_slackness: deadline slackness of high-value tasks
:param high_value_proportion: the proportion of high-value tasks
"""
# record the allocation scheme
allocation_scheme = pd.DataFrame(
columns=['node_id', 'start_time', 'end_time'])
# run the trials
result_sarsa_list = [] # a list of the lists of average rewards for each step of sarsa
social_welfare_list = [] # a list of total social welfare of each trial
if verbose:
print(f"seed={seed}")
sw_list = [] # a list of social welfare after a new task arrives
# initialise some parameters
n_steps = total_number_of_steps + 1
np.random.seed(seed)
# generate the two types of tasks
number_of_tasks = total_number_of_steps
V = {} # value function: state -> value
pi = {} # policy: (state+price) -> probability
actions = list(
range(num_actions)) # bid [reject, 1/3, 2/3, 1] the value of the task
# generate a seqence of analytics tasks
# compute the resource coefficient
resource_coefficient = (
resource_coefficient_original * number_of_tasks / time_length)
# generate the synthetic data for simulations
df_tasks, df_nodes, n_time, n_tasks, num_fog_nodes = \
generate_synthetic_data_edge_cloud(avg_resource_capacity, avg_unit_cost,
n_tasks=total_number_of_steps,
n_time=time_length,
seed=seed, n_nodes=num_fog_nodes,
p_high_value_tasks=high_value_proportion,
high_value_slackness_lower_limit=high_value_slackness,
high_value_slackness_upper_limit=high_value_slackness + 2,
low_value_slackness_lower_limit=low_value_slackness,
low_value_slackness_upper_limit=low_value_slackness + 2,
resource_demand_high=resource_ratio,
vc_ratio=valuation_coefficient_ratio,
k_resource=resource_coefficient)
if verbose:
print("resource coefficient: ", resource_coefficient)
print(f"low value slackness = {low_value_slackness}")
print(f"high value slackness = {high_value_slackness}")
print('df_tasks:')
print(df_tasks.head(10))
print('df_nodes:')
print(df_nodes.head())
average_reward_sarsa_list = []
agents_list = []
# with tqdm(total=100) as pbar:
mdp = ReverseAuctionMDP(df_tasks, df_nodes,
num_nodes=num_fog_nodes, num_actions=num_actions) # several fog nodes
# generate several agents representing several fog nodes
if trained_agents is not None:
for i in range(mdp.num_fog_nodes):
agent = FogNodeAgent(n_steps=n_steps - 1, alpha=alpha, beta=beta,
fog_index=i,
df_tasks=df_tasks,
df_nodes=df_nodes, num_actions=num_actions,
epsilon=epsilon_tuple[0], mdp=mdp,
trained_agent=trained_agents[i])
agents_list.append(agent)
else:
for i in range(mdp.num_fog_nodes):
agent = FogNodeAgent(n_steps=n_steps - 1, alpha=alpha, beta=beta,
fog_index=i,
df_tasks=df_tasks,
df_nodes=df_nodes, num_actions=num_actions,
epsilon=epsilon_tuple[0], mdp=mdp)
agents_list.append(agent)
# actions taken by each node
actions = {i: [] for i in range(mdp.num_fog_nodes)}
# the reverse reverse_auction
for k in tqdm(range(total_number_of_steps)):
# fog nodes decide their bidding price, and allocation scheme for the current task
if verbose:
print()
print(f"step: {k}")
# epsilon decreases as the number of steps increases
if k < epsilon_steps_tuple[0]:
epsilon = epsilon_tuple[0]
elif k < epsilon_steps_tuple[0] + epsilon_steps_tuple[1]:
epsilon = epsilon_tuple[1]
elif k < (epsilon_steps_tuple[0] + epsilon_steps_tuple[1] +
epsilon_steps_tuple[2]):
epsilon = epsilon_tuple[2]
else:
epsilon = 0
if verbose:
print(f'epsilon = {epsilon}')
# change the epsilon of all agents
for i in range(mdp.num_fog_nodes):
agents_list[i].epsilon = epsilon
bids_list = [] # bidding price for one time step
max_usage_time_list = [] # maximum usage time a fog node can offer
start_time_list = [] # start time according to the planned allocation
relative_start_time_list = [] # relative start time according to the current task
for i in range(mdp.num_fog_nodes):
(bidding_price, max_usage_time, relative_start_time, action) = \
agents_list[i].differential_sarsa_decide_action(verbose=verbose)
# tranfer relative start_time to absolute start_time
start_time = int(
df_tasks.loc[k, 'arrive_time'] + relative_start_time + 1)
bids_list.append(bidding_price)
max_usage_time_list.append(max_usage_time)
start_time_list.append(start_time)
relative_start_time_list.append(relative_start_time)
actions[i].append(action)
# find the winner
(winner_index, winner_num_time, winner_utility, max_utility) = \
mdp.step(bids_list, max_usage_time_list, start_time_list,
verbose=verbose,
auction_type=auction_type)
sw_list.append(
mdp.social_welfare) # a list of social welfare after a new task arrives
# modify the allocation scheme
if winner_num_time is not None and winner_num_time > 0:
allocation_scheme.loc[k] = [winner_index,
start_time_list[winner_index],
start_time_list[winner_index] + max_usage_time_list[
winner_index] - 1]
else: # the task is rejected
allocation_scheme.loc[k] = [None, None, None]
if verbose:
print()
print(f"nodes' bids = {bids_list}")
print(f"nodes' usage times = {max_usage_time_list}")
print(f"nodes' start times = {start_time_list}")
print(f"winner's index = {winner_index}")
print(f"number of usage time = {winner_num_time}")
print(f"winner's utility = {winner_utility}")
print(f"user's utility = {max_utility}")
# print(f"social_welfare_list={sw_list}")
if k < total_number_of_steps - 1:
# update sarsa weights
for i in range(mdp.num_fog_nodes):
if verbose:
print(f"updating weights of node{i}:")
if i == winner_index: # if fog node i wins this task
agents_list[i].differential_sarsa_update_weights(1,
max_usage_time_list[i], relative_start_time_list[i],
winner_revenue=winner_utility,
verbose=verbose)
else: # if fog node i lose the reverse_auction
agents_list[i].differential_sarsa_update_weights(0,
max_usage_time_list[i],
relative_start_time_list[i],
winner_revenue=winner_utility,
verbose=verbose)
else:
if verbose:
print("This is the last task.") # no need to update weights
if verbose:
print(f"social welfare = {sw_list[-1]}")
social_welfare_list.append(sw_list[-1])
# generate a list of average rewards of recent 100 tasks
average_reward_sarsa_list = []
for i in range(total_number_of_steps):
if i < 100:
average_reward = sw_list[i] / (i + 1)
average_reward_sarsa_list.append(average_reward)
else:
average_reward = (sw_list[i] - sw_list[i - 100]) / 100
average_reward_sarsa_list.append(average_reward)
result_sarsa_list.append(average_reward_sarsa_list.copy())
# print(result_sarsa_list)
# print the total value of tasks
total_value = 0
for i in range(total_number_of_steps):
total_value += (df_tasks.loc[i, "valuation_coefficient"] *
df_tasks.loc[i, "usage_time"])
if verbose:
print(f"total value of tasks = {total_value}")
print("df_tasks:")
print(df_tasks.head())
print("df_nodes:")
print(df_nodes.head())
if plot_bool: # plot the result
fig, axes = plt.subplots(1 + 2 * num_fog_nodes, 1, figsize=(12, 30))
fig.suptitle('Figures')
# plot the social welfare
result_df = None
for item in result_sarsa_list:
result_sarsa_y = item.copy()
x_list = range(len(result_sarsa_y))
auction_df = pd.DataFrame({
'algorithm': 'reverse reverse_auction',
'steps': x_list,
'average_social_welfare (recent 100 tasks)': result_sarsa_y
})
result_df = pd.concat([result_df, auction_df])
# print(result_df)
sns.lineplot(ax=axes[0], data=result_df, x="steps",
y="average_social_welfare (recent 100 tasks)")
# plt.show()
# plot the learned average rewards of each node
for i in range(num_fog_nodes):
avg_reward = agents_list[i].list_avg_reward
x_list = range(len(avg_reward))
avg_reward_df = pd.DataFrame({
'steps': x_list,
'average rewards of node 1': avg_reward
})
sns.lineplot(ax=axes[i + 1], data=avg_reward_df, x="steps",
y="average rewards of node 1")
# plot the actions taken by each node
for i in range(num_fog_nodes):
actions_of_i = actions[i]
x_list = range(len(actions_of_i))
actions_of_i_df = pd.DataFrame({
'steps': x_list,
'action options': actions_of_i
})
sns.lineplot(ax=axes[i + num_fog_nodes + 1], data=actions_of_i_df,
x='steps', y='action options')
# TODO: plot the actions taken by each node
plt.show()
else:
pass
return sw_list, total_value, df_tasks, df_nodes, agents_list, allocation_scheme
# execute sarsa (do not update weights)
def execute_multi_agent_sarsa(avg_resource_capacity, avg_unit_cost,
number_of_runs=50, total_number_of_steps=500,
num_fog_nodes=6,
resource_coefficient_original=3,
plot_bool=False, num_actions=4, time_length=100,
high_value_proportion=0.2, high_value_slackness=0,
low_value_slackness=6,
valuation_coefficient_ratio=10,
resource_ratio=1.2, agents_list=None,
bool_decay=True, training_seed=0, verbose=False,
auction_type="second-price"):
"""execute multi-agent sarsa
Args:
agents_list: a list of trained agents
:param number_of_runs: number of trials
:param total_number_of_steps: steps of RL (allocate how many tasks)
:param num_fog_nodes: number of fog nodes
:param resource_coefficient_original: a coefficient for computing the resource coefficient
:param alpha: step size of weights
:param beta: step size of estimated average rewards
:param epsilon_tuple: probability of exploration
:param epsilon_steps_tuple: number of steps to run for each epsilon except for the last one
:param plot_bool: whether plot the results
:param num_actions: number of actions
:param time_length: tasks arrive within this time length
:param low_value_slackness: deadline slackness of low-value tasks
:param resource_ratio: resource demand ratio between high-value and low-value tasks
:param valuation_coefficient_ratio: valuation coefficient ratio between high-value
and low-value tasks
:param high_value_slackness: deadline slackness of high-value tasks
:param high_value_proportion: the proportion of high-value tasks
"""
# run the trials
result_sarsa_list = [] # a list of the lists of average rewards for each step of sarsa
social_welfare_list = [] # a list of total social welfare of each trial
# record the allocation scheme
allocation_scheme = pd.DataFrame(
columns=['node_id', 'start_time', 'end_time'])
for j in tqdm(range(number_of_runs)): # run all the trials
# for j in tqdm(range(number_of_runs - 1, number_of_runs)): # just run one trial
if verbose:
print(f"run ID = {j}")
sw_list = [] # a list of social welfare after a new task arrives
# initialise some parameters
n_steps = total_number_of_steps + 1
np.random.seed(j)
# generate the two types of tasks
number_of_tasks = total_number_of_steps
V = {} # value function: state -> value
pi = {} # policy: (state+price) -> probability
actions = list(range(
num_actions)) # bid [reject, 1/3, 2/3, 1] the value of the task
# generate a seqence of analytics tasks
# compute the resource coefficient
resource_coefficient = (
resource_coefficient_original * number_of_tasks / time_length)
# generate the synthetic data for simulations
df_tasks, df_nodes, n_time, n_tasks, num_fog_nodes = \
generate_synthetic_data_edge_cloud(avg_resource_capacity,
avg_unit_cost, n_tasks=total_number_of_steps,
n_time=time_length,
seed=j, n_nodes=num_fog_nodes,
p_high_value_tasks=high_value_proportion,
high_value_slackness_lower_limit=high_value_slackness,
high_value_slackness_upper_limit=high_value_slackness + 2,
low_value_slackness_lower_limit=low_value_slackness,
low_value_slackness_upper_limit=low_value_slackness + 2,
resource_demand_high=resource_ratio,
vc_ratio=valuation_coefficient_ratio,
k_resource=resource_coefficient)
if verbose:
print("resource coefficient: ", resource_coefficient)
print(f"low value slackness = {low_value_slackness}")
print(f"high value slackness = {high_value_slackness}")
print("df_tasks:")
print(df_tasks.head())
print("df_nodes:")
print(df_nodes.head())
# print the total value of tasks
total_value = 0
for i in range(total_number_of_steps):
total_value += (df_tasks.loc[i, "valuation_coefficient"] *
df_tasks.loc[i, "usage_time"])
if verbose:
print(f"total_number_of_steps={total_number_of_steps}")
print(f"total value of tasks = {total_value}")
# with tqdm(total=100) as pbar:
mdp = ReverseAuctionMDP(df_tasks, df_nodes,
num_nodes=num_fog_nodes,
num_actions=num_actions) # several fog nodes
# reset the states of the fog node agents
for i in range(mdp.num_fog_nodes):
agents_list[i].reset_state(df_tasks)
# actions taken by each node
actions = {i: [] for i in range(mdp.num_fog_nodes)}
# the reverse reverse_auction
for k in tqdm(range(total_number_of_steps)):
# fog nodes decide their bidding price, and allocation scheme for the current task
if verbose:
print()
print(f"step: {k}")
bids_list = [] # bidding price for one time step
max_usage_time_list = [] # maximum usage time a fog node can offer
start_time_list = [] # start time according to the planned allocation
relative_start_time_list = [] # relative start time according to the current task
for i in range(mdp.num_fog_nodes):
(bidding_price, max_usage_time, relative_start_time, action) = \
agents_list[i].differential_sarsa_decide_action(
verbose=verbose)
# tranfer relative start_time to absolute start_time
start_time = int(
df_tasks.loc[k, 'arrive_time'] + relative_start_time + 1)
bids_list.append(bidding_price)
max_usage_time_list.append(max_usage_time)
start_time_list.append(start_time)
relative_start_time_list.append(relative_start_time)
actions[i].append(action)
# find the winner
(winner_index, winner_num_time, winner_utility, max_utility) = \
mdp.step(bids_list, max_usage_time_list, start_time_list,
verbose=verbose,
auction_type=auction_type)
if verbose:
print()
print(f"nodes' bids = {bids_list}")
print(f"nodes' usage times = {max_usage_time_list}")
print(f"nodes' start times = {start_time_list}")
print(f"winner's index = {winner_index}")
print(f"number of usage time = {winner_num_time}")
print(f"winner's utility = {winner_utility}")
print(f"user's utility = {max_utility}")
# a list of social welfare after a new task arrives
sw_list.append(mdp.social_welfare)
# modify the overall allocation scheme
if winner_num_time is not None and winner_num_time > 0:
allocation_scheme.loc[k] = [winner_index,
start_time_list[winner_index],
start_time_list[winner_index] + winner_num_time - 1]
else: # the task is rejected
allocation_scheme.loc[k] = [None, None, None]
# Do not update weights during execution
if k < total_number_of_steps - 1:
# update sarsa weights
for i in range(mdp.num_fog_nodes):
if i == winner_index: # if fog node i wins this task
agents_list[i].differential_sarsa_update_weights(1,
max_usage_time_list[i],
relative_start_time_list[i],
winner_revenue=winner_utility,
bool_update_weights=False, verbose=verbose)
else: # if fog node i lose the reverse_auction
agents_list[i].differential_sarsa_update_weights(0,
max_usage_time_list[i],
relative_start_time_list[i],
winner_revenue=winner_utility,
bool_update_weights=False, verbose=verbose)
else:
if verbose:
print("This is the last task.") # no need to update weights
# enable_print()
if verbose:
print(f"social welfare = {sw_list[-1]}")
social_welfare_list.append(sw_list[-1])
# generate a list of average rewards
average_reward_sarsa_list = []
for i in range(total_number_of_steps):
average_reward = sw_list[i] / (i + 1)
average_reward_sarsa_list.append(average_reward)
result_sarsa_list.append(average_reward_sarsa_list.copy())
# print(result_sarsa_list)
if plot_bool: # plot the result
fig, axes = plt.subplots(1 + num_fog_nodes, 1, figsize=(12, 30))
fig.suptitle('Figures')
# plot the social welfare
result_df = None
for item in result_sarsa_list:
result_sarsa_y = item.copy()
x_list = range(len(result_sarsa_y))
auction_df = pd.DataFrame({
'algorithm': 'reverse reverse_auction',
'steps': x_list,
'average_social_welfare': result_sarsa_y
})
result_df = pd.concat([result_df, auction_df])
# print(result_df)
sns.lineplot(ax=axes[0], data=result_df, x="steps",
y="average_social_welfare")
# plot the actions taken by each node
for i in range(num_fog_nodes):
actions_of_i = actions[i]
x_list = range(len(actions_of_i))
actions_of_i_df = pd.DataFrame({
'steps': x_list,
'action options': actions_of_i
})
sns.lineplot(ax=axes[i + 1], data=actions_of_i_df,
x='steps', y='action options')
plt.show()
else: # save the result for jupyter notebook
if bool_decay:
with open(
f'../simulation_results/auction_v1_{j + 1}trials'
f'_rc={resource_coefficient_original}_seed={training_seed}_decay.txt',
'w') as f:
f.write(json.dumps(social_welfare_list))
else:
with open(
f'../simulation_results/auction_v1_{j + 1}trials'
f'_rc={resource_coefficient_original}_seed={training_seed}.txt',
'w') as f:
f.write(json.dumps(social_welfare_list))
return sw_list, total_value, df_tasks, df_nodes, agents_list, allocation_scheme
if __name__ == "__main__":
# code for running simulations
number_of_steps = 10000
time_length = number_of_steps / 4
num_trials = 50
num_actions = 4
epsilon_steps_tuple = (3000, 1000)
# for resource_coefficient in [2]:
# for alpha in [0.02]:
# for beta in [0.01]:
# for epsilons_tuple in [(0.2, 0.1, 0.05)]:
# # run multiple times and save the results
# train_multi_agent_sarsa(alpha=alpha, beta=beta, epsilon_tuple=epsilons_tuple,
# num_actions=num_actions, n_time=n_time,
# epsilon_steps_tuple=epsilon_steps_tuple,
# total_number_of_steps=number_of_steps, num_fog_nodes=6,
# resource_coefficient_original=resource_coefficient,
# number_of_runs=num_trials, plot_bool=False)
# run once and plot the average rewards
epsilons_tuple = (0.2, 0.1, 0.05)
epsilon_steps_tuple = (3000, 1000)
resource_coefficient_original = 3
train_multi_agent_sarsa(alpha=0.02, beta=0.01, epsilon_tuple=epsilons_tuple,
time_length=time_length,
epsilon_steps_tuple=epsilon_steps_tuple,
num_actions=num_actions,
total_number_of_steps=number_of_steps,
num_fog_nodes=6,
resource_coefficient_original=resource_coefficient_original,
number_of_runs=1, plot_bool=True)
| 44.568384
| 99
| 0.620118
| 3,215
| 25,092
| 4.546501
| 0.089269
| 0.027365
| 0.025792
| 0.027092
| 0.866457
| 0.844291
| 0.825409
| 0.812752
| 0.805295
| 0.797086
| 0
| 0.012586
| 0.303364
| 25,092
| 562
| 100
| 44.647687
| 0.823627
| 0.256177
| 0
| 0.73028
| 0
| 0
| 0.100435
| 0.027149
| 0
| 0
| 0
| 0.001779
| 0
| 1
| 0.005089
| false
| 0.002545
| 0.022901
| 0
| 0.033079
| 0.124682
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
571296272203dc0c0b7c954a4d9cab1957485d71
| 35,044
|
py
|
Python
|
convcap.py
|
Zjut-MultimediaPlus/BCIC
|
22b93b37d4b272db3c09dbd767bb1b89e394569f
|
[
"Apache-2.0"
] | null | null | null |
convcap.py
|
Zjut-MultimediaPlus/BCIC
|
22b93b37d4b272db3c09dbd767bb1b89e394569f
|
[
"Apache-2.0"
] | null | null | null |
convcap.py
|
Zjut-MultimediaPlus/BCIC
|
22b93b37d4b272db3c09dbd767bb1b89e394569f
|
[
"Apache-2.0"
] | null | null | null |
import sys
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
from torch.autograd import Variable
#import misc.utils as utils
import os
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
def Conv1d(in_channels, out_channels, kernel_size, padding, dropout=0):
m = nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding)
std = math.sqrt((4 * (1.0 - dropout)) / (kernel_size * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return nn.utils.weight_norm(m)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, 0.1)
return m
def Linear(in_features, out_features, dropout=0.):
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
class AttentionLayer(nn.Module):
def __init__(self, conv_channels, embed_dim):
super(AttentionLayer, self).__init__()
self.in_projection = Linear(conv_channels, embed_dim)
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = torch.bmm
def forward(self, x, wordemb, imgsfeats):
residual = x
x = (self.in_projection(x) + wordemb) * math.sqrt(0.5)
b, c, f_h, f_w = imgsfeats.size()
y = imgsfeats.view(b, c, f_h*f_w)
x = self.bmm(x, y)
sz = x.size()
x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)
x = x.view(sz)
attn_scores = x
y = y.permute(0, 2, 1)
x = self.bmm(x, y)
s = y.size(1)
x = x * (s * math.sqrt(1.0 / s))
x = (self.out_projection(x) + residual) * math.sqrt(0.5)
return x, attn_scores
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class convcap_G(nn.Module):
#def __init__(self, num_wordclass, num_layers=1, is_attention=True, nfeats=512, dropout=0.1):
def __init__(self, num_wordclass, num_layers=1, is_attention=True, nfeats=512, dropout=0.1):
super(convcap_G, self).__init__()
self.nimgfeats = 2048
self.is_attention = is_attention
self.nfeats = nfeats
self.dropout = dropout
self.emb_0 = Embedding(num_wordclass, nfeats, padding_idx=0) # Linear(9221, 512)
self.emb_1 = Linear(nfeats, nfeats, dropout=dropout)
self.imgproj = Linear(self.nimgfeats, self.nfeats, dropout=dropout)
self.resproj = Linear(nfeats*2, self.nfeats, dropout=dropout)
n_in = 2 * self.nfeats
n_out = self.nfeats
self.n_layers = num_layers
self.convs = nn.ModuleList()
self.attention = nn.ModuleList()
self.kernel_size = 5
self.pad = self.kernel_size - 1
for i in range(self.n_layers):
self.convs.append(Conv1d(n_in, 2*n_out, self.kernel_size, self.pad, dropout))
if(self.is_attention):
self.attention.append(AttentionLayer(n_out, nfeats))
n_in = n_out
self.classifier_0 = Linear(self.nfeats, (nfeats // 2))
self.classifier_1 = Linear((nfeats // 2), num_wordclass, dropout=dropout)
'''
self.input_encoding_size = 512
self.rnn_size = 512
self.drop_prob_lm = 0.5
self.fc_feat_size = 2048
self.att_feat_size = 2048
self.att_hid_size = 512
self.seq_per_img = 5
self.index_eval = 0
self.use_rela = False
self.vocab_size = 14964
self.use_bn = False
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.embed2vis = nn.Sequential(nn.Linear(self.input_encoding_size, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_sbj_rela_fc = nn.Sequential(nn.Linear(self.rnn_size*3, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_obj_rela_fc = nn.Sequential(nn.Linear(self.rnn_size*3, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_rela_fc = nn.Sequential(nn.Linear(self.rnn_size*3, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_attr_fc = nn.Sequential(nn.Linear(self.rnn_size*2, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_ctx2att = nn.Linear(self.rnn_size, self.att_hid_size) ## nn.Linear(512, 512)
'''
def clip_att(self, att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
return fc_feats, att_feats
def prepare_rela_feats(self, rela_data):
"""
Change relationship index (one-hot) to relationship features, or change relationship
probability to relationship features.
:param rela_matrix:
:param rela_masks:
:return: rela_features, [N_img*5, N_rela_max, rnn_size]
"""
rela_matrix = rela_data['rela_matrix']
rela_masks = rela_data['rela_masks']
rela_feats_size = rela_matrix.size()
N_att = rela_feats_size[0]
if self.index_eval == 1:
seq_per_img = 1
else:
seq_per_img = self.seq_per_img
N_img = N_att/seq_per_img
rela_feats = torch.zeros([rela_feats_size[0], rela_feats_size[1], self.rnn_size]).cuda()
for img_id in range(int(N_img)):
N_rela = torch.sum(rela_masks[img_id * seq_per_img, :])
N_rela = int(N_rela)
if N_rela>0:
rela_index = rela_matrix[img_id*seq_per_img,:N_rela,2].cuda().long()
rela_feats_temp = self.embed(rela_index)
rela_feats_temp = self.embed2vis(rela_feats_temp)
rela_feats[img_id*seq_per_img:(img_id+1)*seq_per_img,:N_rela,:] = rela_feats_temp
rela_data['rela_feats'] = rela_feats
return rela_data
def rela_graph_gfc(self, rela_data):
"""
:param att_feats: roi features of each bounding box, [N_img*5, N_att_max, rnn_size]
:param rela_feats: the embeddings of relationship, [N_img*5, N_rela_max, rnn_size]
:param rela_matrix: relationship matrix, [N_img*5, N_rela_max, 3], N_img
is the batch size, N_rela_max is the maximum number
of relationship in rela_matrix.
:param rela_masks: relationship masks, [N_img*5, N_rela_max].
For each row, the sum of that row is the total number
of realtionship.
:param att_masks: attention masks, [N_img*5, N_att_max].
For each row, the sum of that row is the total number
of roi poolings.
:param attr_matrix: attribute matrix,[N_img*5, N_attr_max, N_attr_each_max]
N_img is the batch size, N_attr_max is the maximum number
of attributes of one mini-batch, N_attr_each_max is the
maximum number of attributes of each objects in that mini-batch
:param attr_masks: attribute masks, [N_img*5, N_attr_max, N_attr_each_max]
the sum of attr_masks[img_id*5,:,0] is the number of objects
which own attributes, the sum of attr_masks[img_id*5, obj_id, :]
is the number of attribute that object has
:return: att_feats_new: new roi features
rela_feats_new: new relationship embeddings
attr_feats_new: new attribute features
"""
att_feats = rela_data['att_feats']
att_masks = rela_data['att_masks']
rela_matrix = rela_data['rela_matrix']
rela_feats = rela_data['rela_feats']
rela_masks = rela_data['rela_masks']
attr_matrix = rela_data['attr_matrix']
attr_masks = rela_data['attr_masks']
att_feats_size = att_feats.size()
attr_masks_size = attr_masks.size()
N_att = att_feats_size[0]
if self.index_eval == 1:
seq_per_img = 1
else:
seq_per_img = self.seq_per_img
N_img = N_att / seq_per_img
att_feats_new = att_feats.clone()
rela_feats_new = rela_feats.clone()
attr_feats_new = torch.zeros([attr_masks_size[0], attr_masks_size[1], self.rnn_size]).cuda()
for img_id in range(int(N_img)):
N_rela = torch.sum(rela_masks[img_id * seq_per_img, :])
#N_box = torch.sum(att_masks[img_id * seq_per_img, :])
N_rela = int(N_rela)
#N_box = int(N_box)
#box_num = np.ones([N_box,])
rela_num = np.ones([N_rela,])
for i in range(N_rela):
sub_id = rela_matrix[img_id * seq_per_img, i, 0]
sub_id = int(sub_id)
#box_num[sub_id] += 1.0
obj_id = rela_matrix[img_id * seq_per_img, i, 1]
obj_id = int(obj_id)
#box_num[obj_id] += 1.0
rela_id = i
rela_num[rela_id] += 1.0
sub_feat_use = att_feats[img_id * seq_per_img, sub_id, :]
obj_feat_use = att_feats[img_id * seq_per_img, obj_id, :]
rela_feat_use = rela_feats[img_id * seq_per_img, rela_id, :]
att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, sub_id, :] += \
self.rela_sbj_rela_fc(torch.cat((sub_feat_use, obj_feat_use, rela_feat_use)))
att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, obj_id, :] += \
self.rela_obj_rela_fc(torch.cat((sub_feat_use, obj_feat_use, rela_feat_use)))
rela_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, rela_id, :] += \
self.rela_rela_fc(torch.cat((sub_feat_use, obj_feat_use, rela_feat_use)))
N_obj_attr = torch.sum(attr_masks[img_id * seq_per_img, :, 0])
N_obj_attr = int(N_obj_attr)
for i in range(N_obj_attr):
attr_obj_id = int(attr_matrix[img_id * seq_per_img, i, 0])
obj_feat_use = att_feats[img_id * seq_per_img, int(attr_obj_id), :]
N_attr_each = torch.sum(attr_masks[img_id * seq_per_img, i, :])
for j in range(N_attr_each-1):
attr_index = attr_matrix[img_id * seq_per_img, i, j+1].cuda().long()
attr_feat_use = self.embed(attr_index)
attr_feat_use = self.embed2vis(attr_feat_use)
attr_feats_new[img_id * seq_per_img:(img_id+1) * seq_per_img, i, :] += \
self.rela_attr_fc( torch.cat((attr_feat_use, obj_feat_use)) )
attr_feats_new[img_id * seq_per_img:(img_id+1) * seq_per_img, i, :] = \
attr_feats_new[img_id * seq_per_img:(img_id+1) * seq_per_img, i, :]/(float(N_attr_each)-1)
# for i in range(N_box):
# att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i] = \
# att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i]/box_num[i]
for i in range(N_rela):
rela_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i, :] = \
rela_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i, :]/rela_num[i]
rela_data['att_feats'] = att_feats_new
rela_data['rela_feats'] = rela_feats_new
rela_data['attr_feats'] = attr_feats_new
return rela_data
def merge_rela_att(self, rela_data):
"""
merge attention features (roi features) and relationship features together
:param att_feats: [N_att, N_att_max, rnn_size]
:param att_masks: [N_att, N_att_max]
:param rela_feats: [N_att, N_rela_max, rnn_size]
:param rela_masks: [N_att, N_rela_max]
:return: att_feats_new: [N_att, N_att_new_max, rnn_size]
att_masks_new: [N_att, N_att_new_max]
"""
att_feats = rela_data['att_feats']
att_masks = rela_data['att_masks']
rela_feats = rela_data['rela_feats']
rela_masks = rela_data['rela_masks']
attr_feats = rela_data['attr_feats']
attr_masks = rela_data['attr_masks']
att_feats_size = att_feats.size()
N_att = att_feats_size[0]
if self.index_eval == 1:
seq_per_img = 1
else:
seq_per_img = self.seq_per_img
N_img = N_att/seq_per_img
N_att_new_max = -1
for img_id in range(int(N_img)):
N_att_new_max = \
max(N_att_new_max,torch.sum(rela_masks[img_id * seq_per_img, :]) +
torch.sum(att_masks[img_id * seq_per_img, :]) + torch.sum(attr_masks[img_id * seq_per_img,:,0]))
att_masks_new = torch.zeros([N_att, int(N_att_new_max)]).cuda()
att_feats_new = torch.zeros([N_att, int(N_att_new_max), self.rnn_size]).cuda()
for img_id in range(int(N_img)):
N_rela = int(torch.sum(rela_masks[img_id * seq_per_img, :]))
N_box = int(torch.sum(att_masks[img_id * seq_per_img, :]))
N_attr = int(torch.sum(attr_masks[img_id * seq_per_img,:,0]))
att_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_box, :] = \
att_feats[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_box, :]
if N_rela > 0:
att_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, N_box:N_box + N_rela, :] = \
rela_feats[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_rela, :]
if N_attr > 0:
att_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, N_box + N_rela: N_box + N_rela + N_attr, :] = \
attr_feats[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_attr, :]
att_masks_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_box] = 1
if N_rela > 0:
att_masks_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, N_box:N_box + N_rela] = 1
if N_attr > 0:
att_masks_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, N_box + N_rela:N_box + N_rela + N_attr] = 1
rela_data['att_feats_new'] = att_feats_new
rela_data['att_masks_new'] = att_masks_new
return rela_data
#def forward(self, fc_feats, att_feats, att_masks, rela_data, use_rela, imgsfeats, imgsfc7, wordclass):
def forward(self, imgsfeats, imgsfc7, wordclass, rela_data):
# caption word -> (100, 512, 15)
attn_buffer = None
wordemb = self.emb_0(wordclass) ## Embedding(9221, 512) -> (100, 15, 512)
wordemb = self.emb_1(wordemb) ## Linear(512, 512) -> (100, 15, 512)
x = wordemb.transpose(2, 1) ## (100, 15, 512) -> (100, 512, 15)
batchsize, wordembdim, maxtokens = x.size()
y = F.relu(self.imgproj(imgsfc7))
y = y.unsqueeze(2).expand(batchsize, self.nfeats, maxtokens)
x = torch.cat([x, y], 1)
for i, conv in enumerate(self.convs):
if(i == 0):
x = x.transpose(2, 1)
residual = self.resproj(x)
residual = residual.transpose(2, 1)
x = x.transpose(2, 1)
else:
residual = x
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = x[:,:,:-self.pad]
x = F.glu(x, dim=1)
if (self.is_attention):
attn = self.attention[i]
x = x.transpose(2, 1)
x, attn_buffer = attn(x, wordemb, imgsfeats)
x = x.transpose(2, 1)
x = (x+residual)*math.sqrt(.5)
x = x.transpose(2, 1)
x = self.classifier_0(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.classifier_1(x)
x = x.transpose(2, 1)
return x, attn_buffer
class convcap_D(nn.Module):
# def __init__(self, num_wordclass, num_layers=1, is_attention=True, nfeats=512, dropout=0.1):
def __init__(self, num_wordclass, num_layers=1, is_attention=True, nfeats=512, dropout=0.1):
super(convcap_D, self).__init__()
self.nimgfeats = 2048
self.is_attention = is_attention
self.nfeats = nfeats
self.dropout = dropout
self.emb_0 = Embedding(num_wordclass, nfeats, padding_idx=0) # Linear(9221, 512)
self.emb_1 = Linear(nfeats, nfeats, dropout=dropout)
self.imgproj = Linear(self.nimgfeats, self.nfeats, dropout=dropout)
self.resproj = Linear(nfeats * 2, self.nfeats, dropout=dropout)
n_in = 2 * self.nfeats
n_out = self.nfeats
self.n_layers = num_layers
self.convs = nn.ModuleList()
self.attention = nn.ModuleList()
self.kernel_size = 5
self.pad = self.kernel_size - 1
for i in range(self.n_layers):
self.convs.append(Conv1d(n_in, 2 * n_out, self.kernel_size, self.pad, dropout))
if (self.is_attention):
self.attention.append(AttentionLayer(n_out, nfeats))
n_in = n_out
self.classifier_0 = Linear(self.nfeats, (nfeats // 2))
self.classifier_1 = Linear((nfeats // 2), num_wordclass, dropout=dropout)
'''
self.input_encoding_size = 512
self.rnn_size = 512
self.drop_prob_lm = 0.5
self.fc_feat_size = 2048
self.att_feat_size = 2048
self.att_hid_size = 512
self.seq_per_img = 5
self.index_eval = 0
self.use_rela = False
self.vocab_size = 14964
self.use_bn = False
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ()) +
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn == 2 else ())))
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.embed2vis = nn.Sequential(nn.Linear(self.input_encoding_size, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_sbj_rela_fc = nn.Sequential(nn.Linear(self.rnn_size * 3, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_obj_rela_fc = nn.Sequential(nn.Linear(self.rnn_size * 3, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_rela_fc = nn.Sequential(nn.Linear(self.rnn_size * 3, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_attr_fc = nn.Sequential(nn.Linear(self.rnn_size * 2, self.rnn_size),
nn.ReLU(inplace=True),
nn.Dropout(self.drop_prob_lm))
self.rela_ctx2att = nn.Linear(self.rnn_size, self.att_hid_size) ## nn.Linear(512, 512)
'''
def clip_att(self, att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
return fc_feats, att_feats
def prepare_rela_feats(self, rela_data):
"""
Change relationship index (one-hot) to relationship features, or change relationship
probability to relationship features.
:param rela_matrix:
:param rela_masks:
:return: rela_features, [N_img*5, N_rela_max, rnn_size]
"""
rela_matrix = rela_data['rela_matrix']
rela_masks = rela_data['rela_masks']
rela_feats_size = rela_matrix.size()
N_att = rela_feats_size[0]
if self.index_eval == 1:
seq_per_img = 1
else:
seq_per_img = self.seq_per_img
N_img = N_att / seq_per_img
rela_feats = torch.zeros([rela_feats_size[0], rela_feats_size[1], self.rnn_size]).cuda()
for img_id in range(int(N_img)):
N_rela = torch.sum(rela_masks[img_id * seq_per_img, :])
N_rela = int(N_rela)
if N_rela > 0:
rela_index = rela_matrix[img_id * seq_per_img, :N_rela, 2].cuda().long()
rela_feats_temp = self.embed(rela_index)
rela_feats_temp = self.embed2vis(rela_feats_temp)
rela_feats[img_id * seq_per_img:(img_id + 1) * seq_per_img, :N_rela, :] = rela_feats_temp
rela_data['rela_feats'] = rela_feats
return rela_data
def rela_graph_gfc(self, rela_data):
"""
:param att_feats: roi features of each bounding box, [N_img*5, N_att_max, rnn_size]
:param rela_feats: the embeddings of relationship, [N_img*5, N_rela_max, rnn_size]
:param rela_matrix: relationship matrix, [N_img*5, N_rela_max, 3], N_img
is the batch size, N_rela_max is the maximum number
of relationship in rela_matrix.
:param rela_masks: relationship masks, [N_img*5, N_rela_max].
For each row, the sum of that row is the total number
of realtionship.
:param att_masks: attention masks, [N_img*5, N_att_max].
For each row, the sum of that row is the total number
of roi poolings.
:param attr_matrix: attribute matrix,[N_img*5, N_attr_max, N_attr_each_max]
N_img is the batch size, N_attr_max is the maximum number
of attributes of one mini-batch, N_attr_each_max is the
maximum number of attributes of each objects in that mini-batch
:param attr_masks: attribute masks, [N_img*5, N_attr_max, N_attr_each_max]
the sum of attr_masks[img_id*5,:,0] is the number of objects
which own attributes, the sum of attr_masks[img_id*5, obj_id, :]
is the number of attribute that object has
:return: att_feats_new: new roi features
rela_feats_new: new relationship embeddings
attr_feats_new: new attribute features
"""
att_feats = rela_data['att_feats']
att_masks = rela_data['att_masks']
rela_matrix = rela_data['rela_matrix']
rela_feats = rela_data['rela_feats']
rela_masks = rela_data['rela_masks']
attr_matrix = rela_data['attr_matrix']
attr_masks = rela_data['attr_masks']
att_feats_size = att_feats.size()
attr_masks_size = attr_masks.size()
N_att = att_feats_size[0]
if self.index_eval == 1:
seq_per_img = 1
else:
seq_per_img = self.seq_per_img
N_img = N_att / seq_per_img
att_feats_new = att_feats.clone()
rela_feats_new = rela_feats.clone()
attr_feats_new = torch.zeros([attr_masks_size[0], attr_masks_size[1], self.rnn_size]).cuda()
for img_id in range(int(N_img)):
N_rela = torch.sum(rela_masks[img_id * seq_per_img, :])
# N_box = torch.sum(att_masks[img_id * seq_per_img, :])
N_rela = int(N_rela)
# N_box = int(N_box)
# box_num = np.ones([N_box,])
rela_num = np.ones([N_rela, ])
for i in range(N_rela):
sub_id = rela_matrix[img_id * seq_per_img, i, 0]
sub_id = int(sub_id)
# box_num[sub_id] += 1.0
obj_id = rela_matrix[img_id * seq_per_img, i, 1]
obj_id = int(obj_id)
# box_num[obj_id] += 1.0
rela_id = i
rela_num[rela_id] += 1.0
sub_feat_use = att_feats[img_id * seq_per_img, sub_id, :]
obj_feat_use = att_feats[img_id * seq_per_img, obj_id, :]
rela_feat_use = rela_feats[img_id * seq_per_img, rela_id, :]
att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, sub_id, :] += \
self.rela_sbj_rela_fc(torch.cat((sub_feat_use, obj_feat_use, rela_feat_use)))
att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, obj_id, :] += \
self.rela_obj_rela_fc(torch.cat((sub_feat_use, obj_feat_use, rela_feat_use)))
rela_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, rela_id, :] += \
self.rela_rela_fc(torch.cat((sub_feat_use, obj_feat_use, rela_feat_use)))
N_obj_attr = torch.sum(attr_masks[img_id * seq_per_img, :, 0])
N_obj_attr = int(N_obj_attr)
for i in range(N_obj_attr):
attr_obj_id = int(attr_matrix[img_id * seq_per_img, i, 0])
obj_feat_use = att_feats[img_id * seq_per_img, int(attr_obj_id), :]
N_attr_each = torch.sum(attr_masks[img_id * seq_per_img, i, :])
for j in range(N_attr_each - 1):
attr_index = attr_matrix[img_id * seq_per_img, i, j + 1].cuda().long()
attr_feat_use = self.embed(attr_index)
attr_feat_use = self.embed2vis(attr_feat_use)
attr_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, i, :] += \
self.rela_attr_fc(torch.cat((attr_feat_use, obj_feat_use)))
attr_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, i, :] = \
attr_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, i, :] / (float(N_attr_each) - 1)
# for i in range(N_box):
# att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i] = \
# att_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i]/box_num[i]
for i in range(N_rela):
rela_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i, :] = \
rela_feats_new[img_id * seq_per_img: (img_id + 1) * seq_per_img, i, :] / rela_num[i]
rela_data['att_feats'] = att_feats_new
rela_data['rela_feats'] = rela_feats_new
rela_data['attr_feats'] = attr_feats_new
return rela_data
def merge_rela_att(self, rela_data):
"""
merge attention features (roi features) and relationship features together
:param att_feats: [N_att, N_att_max, rnn_size]
:param att_masks: [N_att, N_att_max]
:param rela_feats: [N_att, N_rela_max, rnn_size]
:param rela_masks: [N_att, N_rela_max]
:return: att_feats_new: [N_att, N_att_new_max, rnn_size]
att_masks_new: [N_att, N_att_new_max]
"""
att_feats = rela_data['att_feats']
att_masks = rela_data['att_masks']
rela_feats = rela_data['rela_feats']
rela_masks = rela_data['rela_masks']
attr_feats = rela_data['attr_feats']
attr_masks = rela_data['attr_masks']
att_feats_size = att_feats.size()
N_att = att_feats_size[0]
if self.index_eval == 1:
seq_per_img = 1
else:
seq_per_img = self.seq_per_img
N_img = N_att / seq_per_img
N_att_new_max = -1
for img_id in range(int(N_img)):
N_att_new_max = \
max(N_att_new_max, torch.sum(rela_masks[img_id * seq_per_img, :]) +
torch.sum(att_masks[img_id * seq_per_img, :]) + torch.sum(attr_masks[img_id * seq_per_img, :, 0]))
att_masks_new = torch.zeros([N_att, int(N_att_new_max)]).cuda()
att_feats_new = torch.zeros([N_att, int(N_att_new_max), self.rnn_size]).cuda()
for img_id in range(int(N_img)):
N_rela = int(torch.sum(rela_masks[img_id * seq_per_img, :]))
N_box = int(torch.sum(att_masks[img_id * seq_per_img, :]))
N_attr = int(torch.sum(attr_masks[img_id * seq_per_img, :, 0]))
att_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_box, :] = \
att_feats[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_box, :]
if N_rela > 0:
att_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, N_box:N_box + N_rela, :] = \
rela_feats[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_rela, :]
if N_attr > 0:
att_feats_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, N_box + N_rela: N_box + N_rela + N_attr,
:] = \
attr_feats[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_attr, :]
att_masks_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, 0:N_box] = 1
if N_rela > 0:
att_masks_new[img_id * seq_per_img:(img_id + 1) * seq_per_img, N_box:N_box + N_rela] = 1
if N_attr > 0:
att_masks_new[img_id * seq_per_img:(img_id + 1) * seq_per_img,
N_box + N_rela:N_box + N_rela + N_attr] = 1
rela_data['att_feats_new'] = att_feats_new
rela_data['att_masks_new'] = att_masks_new
return rela_data
# def forward(self, fc_feats, att_feats, att_masks, rela_data, use_rela, imgsfeats, imgsfc7, wordclass):
def forward(self, imgsfeats, imgsfc7, wordclass, rela_data):
# caption word -> (100, 512, 15)
attn_buffer = None
wordemb = self.emb_0(wordclass) ## Embedding(9221, 512) -> (100, 15, 512)
wordemb = self.emb_1(wordemb) ## Linear(512, 512) -> (100, 15, 512)
x = wordemb.transpose(2, 1) ## (100, 15, 512) -> (100, 512, 15)
batchsize, wordembdim, maxtokens = x.size()
y = F.relu(self.imgproj(imgsfc7))
y = y.unsqueeze(2).expand(batchsize, self.nfeats, maxtokens)
x = torch.cat([x, y], 1)
####
'''
att_masks = None
att_feats, att_masks = self.clip_att(att_feats, att_masks)
fc_feats, att_feats = self._prepare_feature(fc_feats, att_feats, att_masks)
if use_rela == 1:
rela_data['att_feats'] = att_feats
rela_data['att_masks'] = att_masks
rela_data = self.prepare_rela_feats(rela_data)
rela_data = self.rela_graph_gfc(rela_data)
rela_data = self.merge_rela_att(rela_data)
else:
rela_data['att_feats_new'] = fc_feats
rela_data['att_masks_new'] = att_masks
att_feats_rela = rela_data['att_feats_new']
p_att_feats_rela = att_feats_rela.unsqueeze(2).expand((batchsize, self.nfeats, maxtokens))
#x = torch.cat([x, p_att_feats_rela], 1)
####
'''
for i, conv in enumerate(self.convs):
if (i == 0):
x = x.transpose(2, 1)
residual = self.resproj(x)
residual = residual.transpose(2, 1)
x = x.transpose(2, 1)
else:
residual = x
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = x[:, :, :-self.pad]
x = F.glu(x, dim=1)
if (self.is_attention):
attn = self.attention[i]
x = x.transpose(2, 1)
x, attn_buffer = attn(x, wordemb, imgsfeats)
x = x.transpose(2, 1)
x = (x + residual) * math.sqrt(.5)
x = x.transpose(2, 1)
x = self.classifier_0(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.classifier_1(x)
x = x.transpose(2, 1)
return x, attn_buffer
| 46.293263
| 126
| 0.583438
| 5,129
| 35,044
| 3.637551
| 0.044648
| 0.046953
| 0.070429
| 0.047167
| 0.919012
| 0.90722
| 0.901967
| 0.893123
| 0.888407
| 0.888407
| 0
| 0.020948
| 0.307984
| 35,044
| 756
| 127
| 46.354497
| 0.748382
| 0.161369
| 0
| 0.85618
| 0
| 0
| 0.018463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049438
| false
| 0
| 0.022472
| 0
| 0.123596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5724a3c1a663a7ac9cce0159f09922e0cfe0d20a
| 52
|
py
|
Python
|
instance/config.py
|
Tellvinch/updater
|
3f72ac671c1d69ee5b88cad0d0c5ba6d99cb4e84
|
[
"MIT"
] | null | null | null |
instance/config.py
|
Tellvinch/updater
|
3f72ac671c1d69ee5b88cad0d0c5ba6d99cb4e84
|
[
"MIT"
] | null | null | null |
instance/config.py
|
Tellvinch/updater
|
3f72ac671c1d69ee5b88cad0d0c5ba6d99cb4e84
|
[
"MIT"
] | null | null | null |
NEWS_API_KEY = '<0ef96b496eb7419e8c763bdf082bea06>'
| 26
| 51
| 0.846154
| 4
| 52
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.387755
| 0.057692
| 52
| 1
| 52
| 52
| 0.469388
| 0
| 0
| 0
| 0
| 0
| 0.653846
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57278b454b8921bcfb07703a4fe14422b3396c25
| 10,609
|
py
|
Python
|
tests/app/main/views/test_service.py
|
alphagov/notify-api
|
16dbafbad69e5bb179ba4b2202a7afa299c88d61
|
[
"MIT"
] | 12
|
2015-10-06T08:58:28.000Z
|
2016-08-08T17:51:29.000Z
|
tests/app/main/views/test_service.py
|
gds-attic/notify-api
|
16dbafbad69e5bb179ba4b2202a7afa299c88d61
|
[
"MIT"
] | 1
|
2015-10-27T12:01:26.000Z
|
2015-10-27T12:01:26.000Z
|
tests/app/main/views/test_service.py
|
gds-attic/notify-api
|
16dbafbad69e5bb179ba4b2202a7afa299c88d61
|
[
"MIT"
] | 3
|
2016-05-31T17:40:15.000Z
|
2021-04-10T20:03:33.000Z
|
from flask import json
from . import uuid_regex
from app.models import User, Token
from datetime import datetime
from app import db
def test_should_be_able_to_deactivate_service(notify_api, notify_db, notify_db_session):
response_1 = notify_api.test_client().get(
'/user/1234/service/1234',
headers={
'Authorization': 'Bearer 1234'
}
)
data = json.loads(response_1.get_data())
assert data['service']['active']
response_2 = notify_api.test_client().post(
'/service/1234/deactivate',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response_2.get_data())
assert not data['service']['active']
def test_should_be_able_to_activate_service(notify_api, notify_db, notify_db_session):
response_1 = notify_api.test_client().post(
'/service/1234/deactivate',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response_1.get_data())
assert not data['service']['active']
response_2 = notify_api.test_client().post(
'/service/1234/activate',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response_2.get_data())
assert data['service']['active']
def test_should_be_able_to_restrict_service(notify_api, notify_db, notify_db_session):
response_1 = notify_api.test_client().get(
'/user/1234/service/1234',
headers={
'Authorization': 'Bearer 1234'
}
)
data = json.loads(response_1.get_data())
assert not data['service']['restricted']
response_2 = notify_api.test_client().post(
'/service/1234/restrict',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response_2.get_data())
assert data['service']['restricted']
def test_should_be_able_to_unrestrict_service(notify_api, notify_db, notify_db_session):
response_1 = notify_api.test_client().post(
'/service/1234/restrict',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response_1.get_data())
assert data['service']['restricted']
response_2 = notify_api.test_client().post(
'/service/1234/unrestrict',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response_2.get_data())
assert not data['service']['restricted']
def test_should_be_able_to_get_service_by_id_and_user_id(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().get(
'/user/1234/service/1234',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response.get_data())
assert response.status_code == 200
assert data['service']['id'] == 1234
assert data['service']['name'] == 'service test'
assert data['service']['token']['token'] == '1234'
def test_should_be_able_to_get_service_as_platform_admin(notify_api, notify_db, notify_db_session):
# Setup a dummy user for tests
user = User(
id=9999,
email_address="test-user@example-2.org",
mobile_number="+449999123123",
password='password',
active=True,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
password_changed_at=datetime.utcnow(),
role='platform-admin'
)
db.session.add(user)
db.session.commit()
response = notify_api.test_client().get(
'/user/9999/service/1234',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response.get_data())
assert response.status_code == 200
assert data['service']['id'] == 1234
assert data['service']['name'] == 'service test'
assert data['service']['token']['token'] == '1234'
def test_should_be_able_to_get_all_services_for_a_user(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().get(
'/user/1234/services',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response.get_data())
assert response.status_code == 200
assert len(data['services']) == 1
assert data['services'][0]['id'] == 1234
assert data['services'][0]['name'] == 'service test'
def test_should_be_able_to_get_all_services_as_platform_admin(notify_api, notify_db, notify_db_session):
# Setup a dummy user for tests
user = User(
id=9999,
email_address="test-user@example-2.org",
mobile_number="+449999123123",
password='password',
active=True,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
password_changed_at=datetime.utcnow(),
role='platform-admin'
)
db.session.add(user)
db.session.commit()
response = notify_api.test_client().get(
'/user/9999/services',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response.get_data())
assert response.status_code == 200
assert len(data['services']) == 1
assert data['services'][0]['id'] == 1234
assert data['services'][0]['name'] == 'service test'
def test_should_return_empty_list_if_no_services_for_user(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().get(
'/user/12345/services',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response.get_data())
assert response.status_code == 200
assert len(data['services']) == 0
def test_should_be_a_404_of_non_int_org_id(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().get(
'/user/not-valid/services',
headers={
'Authorization': 'Bearer 1234'
})
assert response.status_code == 404
def test_should_be_able_to_create_a_service(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().post(
'/service',
data=json.dumps(
{
'service': {
'userId': 1234,
'name': 'my service'
}
}
),
headers={
'Authorization': 'Bearer 1234'
},
content_type='application/json')
data = json.loads(response.get_data())
assert response.status_code == 201
assert 'service' in data
assert data['service']['name'] == 'my service'
assert data['service']['active']
assert data['service']['restricted']
assert data['service']['limit'] == 100
assert uuid_regex.match(data['service']['token']['token'])
def test_should_not_be_able_to_create_service_on_client_token(notify_api, notify_db, notify_db_session):
token = Token(token='client', type='client')
db.session.add(token)
db.session.commit()
response = notify_api.test_client().post(
'/service',
data=json.dumps(
{
'service': {
'userId': 1234,
'name': 'my service'
}
}
),
headers={
'Authorization': 'Bearer client'
},
content_type='application/json')
assert response.status_code == 403
def test_should_reject_a_service_with_invalid_user(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().post(
'/service',
data=json.dumps(
{
'service': {
'userId': 9999,
'name': 'this is ok'
}
}
),
headers={
'Authorization': 'Bearer 1234'
},
content_type='application/json')
data = json.loads(response.get_data())
assert response.status_code == 400
assert data['error'] == 'failed to create service - invalid user'
def test_should_reject_an_invalid_service(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().post(
'/service',
data=json.dumps(
{
'service': {
'name': '1',
'userId': 'not-valid'
}
}
),
headers={
'Authorization': 'Bearer 1234'
},
content_type='application/json')
data = json.loads(response.get_data())
assert response.status_code == 400
assert data['error'] == 'Invalid JSON'
assert len(data['error_details']) == 2
assert {'key': 'userId', 'message': "'not-valid' is not of type 'integer'"} in data['error_details']
assert {'key': 'name', 'message': "'1' is too short"} in data['error_details']
def test_should_reject_if_no_job_root_element(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().post(
'/service',
data=json.dumps({}),
content_type='application/json',
headers={
'Authorization': 'Bearer 1234'
}
)
data = json.loads(response.get_data())
assert data['error'] == "Invalid JSON; must have service as root element"
assert response.status_code == 400
def test_should_be_able_to_get_multiple_services_by_user_id(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().post(
'/service',
data=json.dumps(
{
'service': {
'userId': 1234,
'name': 'my service'
}
}
),
headers={
'Authorization': 'Bearer 1234'
},
content_type='application/json')
assert response.status_code == 201
response = notify_api.test_client().get(
'/user/1234/services',
headers={
'Authorization': 'Bearer 1234'
})
data = json.loads(response.get_data())
assert response.status_code == 200
assert len(data['services']) == 2
assert data['services'][0]['name'] == 'my service'
assert data['services'][1]['name'] == 'service test'
def test_should_be_a_404_if_service_does_not_exist(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().get(
'/user/1234/service/12345',
headers={
'Authorization': 'Bearer 1234'
})
assert response.status_code == 404
def test_should_be_a_404_if_service_id_is_not_an_int(notify_api, notify_db, notify_db_session):
response = notify_api.test_client().get(
'/user/1234/service/invalid-id',
headers={
'Authorization': 'Bearer 1234'
})
assert response.status_code == 404
| 32.148485
| 104
| 0.609011
| 1,225
| 10,609
| 4.998367
| 0.10449
| 0.060265
| 0.048832
| 0.07137
| 0.850727
| 0.828352
| 0.820839
| 0.809244
| 0.785236
| 0.751592
| 0
| 0.042532
| 0.259779
| 10,609
| 329
| 105
| 32.246201
| 0.737171
| 0.005373
| 0
| 0.70979
| 0
| 0
| 0.193194
| 0.033463
| 0
| 0
| 0
| 0
| 0.178322
| 1
| 0.062937
| false
| 0.013986
| 0.017483
| 0
| 0.08042
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9396896eea1767e47c0c5c8844e6960a55c9c26f
| 21,745
|
py
|
Python
|
pusion/evaluation/evaluation_metrics.py
|
IPVS-AS/pusion
|
58ef24b602f611192430f6005ecf5305f878f412
|
[
"MIT"
] | 5
|
2021-07-24T16:05:12.000Z
|
2022-01-21T15:06:03.000Z
|
pusion/evaluation/evaluation_metrics.py
|
IPVS-AS/pusion
|
58ef24b602f611192430f6005ecf5305f878f412
|
[
"MIT"
] | null | null | null |
pusion/evaluation/evaluation_metrics.py
|
IPVS-AS/pusion
|
58ef24b602f611192430f6005ecf5305f878f412
|
[
"MIT"
] | 2
|
2021-07-24T16:05:14.000Z
|
2022-03-25T21:24:40.000Z
|
import numpy as np
from pusion.util.constants import Problem
from sklearn.metrics import *
from pusion.auto.detector import determine_problem
from pusion.util.transformer import multiclass_assignments_to_labels, multilabel_to_multiclass_assignments
def micro_precision(y_true, y_pred):
"""
Calculate the micro precision, i.e. TP / (TP + FP).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro precision.
"""
return precision_score(y_true, y_pred, average='micro')
def micro_recall(y_true, y_pred):
"""
Calculate the micro recall, i.e. TP / (TP + FN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro recall.
"""
return recall_score(y_true, y_pred, average='micro')
def micro_f1(y_true, y_pred):
"""
Calculate the micro F1-score, i.e. 2 * (Precision * Recall) / (Precision + Recall).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro F1-score.
"""
return f1_score(y_true, y_pred, average='micro')
def micro_f2(y_true, y_pred):
"""
Calculate the micro F2-score (beta=2).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro F2-score.
"""
return fbeta_score(y_true, y_pred, average='micro', beta=2)
def micro_jaccard(y_true, y_pred):
"""
Calculate the micro Jaccard-score, i.e. TP / (TP + FP + FN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro Jaccard-score.
"""
return jaccard_score(y_true, y_pred, average='micro')
def macro_precision(y_true, y_pred):
"""
Calculate the macro precision, i.e. TP / (TP + FP).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The macro precision.
"""
return precision_score(y_true, y_pred, average='macro')
def macro_recall(y_true, y_pred):
"""
Calculate the macro recall, i.e. TP / (TP + FN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The macro recall.
"""
return recall_score(y_true, y_pred, average='macro')
def macro_f1(y_true, y_pred):
"""
Calculate the macro F1-score, i.e. 2 * (Precision * Recall) / (Precision + Recall).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The macro F1-score.
"""
return f1_score(y_true, y_pred, average='macro')
def macro_f2(y_true, y_pred):
"""
Calculate the macro F2-score (beta=2).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The macro F2-score.
"""
return fbeta_score(y_true, y_pred, average='macro', beta=2)
def macro_jaccard(y_true, y_pred):
"""
Calculate the macro Jaccard-score, i.e. TP / (TP + FP + FN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The macro Jaccard-score.
"""
return jaccard_score(y_true, y_pred, average='macro')
def accuracy(y_true, y_pred):
"""
Calculate the accuracy, i.e. (TP + TN) / (TP + FP + FN + TN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: Accuracy.
"""
return accuracy_score(y_true, y_pred)
def balanced_multiclass_accuracy(y_true, y_pred):
"""
Calculate the balanced accuracy, i.e. (Precision + Recall) / 2.
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: Accuracy.
"""
if y_true.ndim > 1 or y_pred.ndim > 1:
y_true = multiclass_assignments_to_labels(y_true)
y_pred = multiclass_assignments_to_labels(y_pred)
return balanced_accuracy_score(y_true, y_pred)
def mean_multilabel_confusion_matrix(y_true, y_pred):
"""
Calculate the normalized mean confusion matrix across all classes.
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: `numpy.array` of shape `(n_classes, n_classes)`. Normalized mean confusion matrix.
"""
cm_sum = np.sum(multilabel_confusion_matrix(y_true, y_pred, ), axis=0)
return cm_sum / (len(y_pred) * np.max(cm_sum))
def mean_confidence(y_true, y_pred):
"""
Calculate the mean confidence for continuous multiclass and multilabel classification outputs.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`. True class assignments.
:param y_pred: `numpy.array` of shape `(n_samples, n_classes)`. Predicted class assignments.
:return: Mean confidence.
"""
return 1 - np.sum(np.abs(y_true - y_pred)) / (y_true.shape[0] * y_true.shape[1])
def hamming(y_true, y_pred):
"""
Calculate the average Hamming Loss.
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: Average Hamming Loss.
"""
return hamming_loss(y_true, y_pred)
def log(y_true, y_pred):
"""
Calculate the Logistic Loss.
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: Logistic Loss.
"""
return log_loss(y_true, y_pred)
def cohens_kappa(y1, y2, labels):
"""
Calculate the Cohen's Kappa annotator agreement score according to :footcite:`cohen1960coefficient`.
.. footbibliography::
:param y1: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Labels or class assignments.
:param y2: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Labels or class assignments.
:param labels: `list` of all possible labels.
:return: Cohen's Kappa score.
"""
cm = confusion_matrix(y1, y2, labels=labels)
a = np.sum(np.diagonal(cm)) / np.sum(cm)
e = 0
for i in range(len(cm)):
e += np.sum(cm[i, :]) * np.sum(cm[:, i]) / np.sum(cm) ** 2
if e == 1:
return 1.0 # case when y1 and y2 are equivalent in their annotation
return (a - e) / (1 - e)
def pairwise_cohens_kappa(decision_tensor):
"""
Calculate the average of pairwise Cohen's Kappa scores over all multiclass decision outputs.
E.g., for 3 classifiers `(0,1,2)`, the agreement score is calculated for classifier tuples `(0,1)`, `(0,2)` and
`(1,2)`. These scores are then averaged over all 3 classifiers.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of crisp multiclass decision outputs by different classifiers per sample.
:return: Pairwise (averages) Cohen's Kappa score.
"""
decision_tensor = np.array(decision_tensor)
if determine_problem(decision_tensor) == Problem.MULTI_LABEL:
decision_tensor = multilabel_to_multiclass_assignments(decision_tensor)
n_classifiers = decision_tensor.shape[0]
n_classes = decision_tensor.shape[2]
indices = np.array(np.triu_indices(n_classifiers, k=1))
sum_kappa = 0.0
for i, j in zip(indices[0], indices[1]):
decision_labels = multiclass_assignments_to_labels([decision_tensor[i], decision_tensor[j]])
sum_kappa += cohens_kappa(decision_labels[0], decision_labels[1], labels=np.arange(n_classes))
return sum_kappa / len(indices[0])
def __relations(y1, y2, y_true):
"""
A helper function for calculating the correctness relations between two classifier outputs.
`A` accumulates samples which are correctly classified by both classifiers, `B` accumulates those which are
correctly classified by `c_1` but not by `c_2` and so on.
"""
n_samples = len(y_true)
a, b, c, d = 0, 0, 0, 0
for i in range(n_samples):
if np.all(y1[i] == y_true[i]) and np.all(y_true[i] == y2[i]):
a += 1 # both classifiers are correct.
elif np.all(y1[i] == y_true[i]) and np.any(y_true[i] != y2[i]):
b += 1 # c1 is correct, c2 is wrong.
elif np.any(y1[i] != y_true[i]) and np.all(y_true[i] == y2[i]):
c += 1 # c1 is wrong, c2 is correct.
elif np.any(y1[i] != y_true[i]) and np.any(y_true[i] != y2[i]):
d += 1 # both classifiers are wrong.
return a/n_samples, b/n_samples, c/n_samples, d/n_samples
def __pairwise_avg_score(decision_tensor, true_assignments, score_func):
"""
A helper function for calculating pairwise average score statistics.
"""
decision_tensor = np.array(decision_tensor)
indices = np.array(np.triu_indices(decision_tensor.shape[0], k=1))
scores = []
for i, j in zip(indices[0], indices[1]):
scores.append(score_func(decision_tensor[i], decision_tensor[j], true_assignments))
return np.mean(scores)
def correlation(y1, y2, y_true):
"""
Calculate the correlation score for decision outputs of two classifiers according to Kuncheva
:footcite:`kuncheva2014combining`.
.. footbibliography::
:param y1: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the first classifier.
:param y2: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the second classifier.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Correlation score.
"""
a, b, c, d = __relations(y1, y2, y_true)
return (a * d - b * c) / np.sqrt((a + b) * (c + d) * (a + c) * (b + d))
def q_statistic(y1, y2, y_true):
"""
Calculate the Q statistic score for decision outputs of two classifiers according to Yule
:footcite:`udny1900association`.
.. footbibliography::
:param y1: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the first classifier.
:param y2: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the second classifier.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Correlation score.
"""
a, b, c, d = __relations(y1, y2, y_true)
return (a * d - b * c) / (a * d + b * c)
def kappa_statistic(y1, y2, y_true):
"""
Calculate the kappa score for decision outputs of two classifiers according to Kuncheva
:footcite:`kuncheva2014combining`.
.. footbibliography::
:param y1: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the first classifier.
:param y2: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the second classifier.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Kappa score.
"""
a, b, c, d = __relations(y1, y2, y_true)
return (2 * (a * d - b * c))/((a + b)*(b + d) + (a + c)*(c + d))
def disagreement(y1, y2, y_true):
"""
Calculate the disagreement for decision outputs of two classifiers, i.e. the percentage of samples which are
correctly classified by exactly one of the classifiers.
:param y1: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the first classifier.
:param y2: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the second classifier.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Disagreement score.
"""
a, b, c, d = __relations(y1, y2, y_true)
return b + c
def double_fault(y1, y2, y_true):
"""
Calculate the double fault for decision outputs of two classifiers, i.e. the percentage of samples which are
misclassified by both classifiers.
:param y1: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the first classifier.
:param y2: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the second classifier.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Double fault score.
"""
a, b, c, d = __relations(y1, y2, y_true)
return d
def abs_correlation(y1, y2, y_true):
"""
Calculate the absolute correlation score for decision outputs of two classifiers.
:param y1: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the first classifier.
:param y2: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the second classifier.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Correlation score.
"""
a, b, c, d = __relations(y1, y2, y_true)
return np.abs((a * d - b * c) / np.sqrt((a + b) * (c + d) * (a + c) * (b + d)))
def abs_q_statistic(y1, y2, y_true):
"""
Calculate the absolute Q statistic score for decision outputs of two classifiers.
:param y1: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the first classifier.
:param y2: `numpy.array` of shape `(n_samples, n_classes)`.
Crisp multiclass decision outputs by the second classifier.
:param y_true: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Correlation score.
"""
a, b, c, d = __relations(y1, y2, y_true)
return np.abs((a * d - b * c) / (a * d + b * c))
def pairwise_correlation(decision_tensor, true_assignments):
"""
Calculate the average of the pairwise absolute correlation scores over all decision outputs.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of crisp multiclass decision outputs by different classifiers per sample.
:param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Pairwise correlation score.
"""
if determine_problem(decision_tensor) == Problem.MULTI_LABEL:
decision_tensor = multilabel_to_multiclass_assignments(decision_tensor)
true_assignments = multilabel_to_multiclass_assignments(true_assignments)
return __pairwise_avg_score(decision_tensor, true_assignments, abs_correlation)
def pairwise_q_statistic(decision_tensor, true_assignments):
"""
Calculate the average of the pairwise absolute Q-statistic scores over all decision outputs.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of crisp multiclass decision outputs by different classifiers per sample.
:param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Pairwise correlation score.
"""
if determine_problem(decision_tensor) == Problem.MULTI_LABEL:
decision_tensor = multilabel_to_multiclass_assignments(decision_tensor)
true_assignments = multilabel_to_multiclass_assignments(true_assignments)
return __pairwise_avg_score(decision_tensor, true_assignments, abs_q_statistic)
def pairwise_kappa_statistic(decision_tensor, true_assignments):
"""
Calculate the average of pairwise Kappa scores over all decision outputs.
Multilabel class assignments are transformed to equivalent multiclass class assignments.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of crisp multiclass decision outputs by different classifiers per sample.
:param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Pairwise kappa score.
"""
if determine_problem(decision_tensor) == Problem.MULTI_LABEL:
decision_tensor = multilabel_to_multiclass_assignments(decision_tensor)
true_assignments = multilabel_to_multiclass_assignments(true_assignments)
return __pairwise_avg_score(decision_tensor, true_assignments, kappa_statistic)
def pairwise_disagreement(decision_tensor, true_assignments):
"""
Calculate the average of pairwise disagreement scores over all decision outputs.
Multilabel class assignments are transformed to equivalent multiclass class assignments.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of crisp multiclass decision outputs by different classifiers per sample.
:param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Pairwise disagreement score.
"""
if determine_problem(decision_tensor) == Problem.MULTI_LABEL:
decision_tensor = multilabel_to_multiclass_assignments(decision_tensor)
true_assignments = multilabel_to_multiclass_assignments(true_assignments)
return __pairwise_avg_score(decision_tensor, true_assignments, disagreement)
def pairwise_double_fault(decision_tensor, true_assignments):
"""
Calculate the average of pairwise double fault scores over all decision outputs.
Multilabel class assignments are transformed to equivalent multiclass class assignments.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of crisp multiclass decision outputs by different classifiers per sample.
:param true_assignments: `numpy.array` of shape `(n_samples, n_classes)`.
Matrix of crisp class assignments which are considered as true.
:return: Pairwise double fault score.
"""
if determine_problem(decision_tensor) == Problem.MULTI_LABEL:
decision_tensor = multilabel_to_multiclass_assignments(decision_tensor)
true_assignments = multilabel_to_multiclass_assignments(true_assignments)
return __pairwise_avg_score(decision_tensor, true_assignments, double_fault)
def pairwise_euclidean_distance(decision_tensor):
"""
Calculate the average of pairwise euclidean distance between decision matrices for the given classifiers.
:param decision_tensor: `numpy.array` of shape `(n_classifiers, n_samples, n_classes)`.
Tensor of crisp multiclass decision outputs by different classifiers per sample.
:return: Pairwise euclidean distance.
"""
decision_tensor = np.array(decision_tensor)
indices = np.array(np.triu_indices(decision_tensor.shape[0], k=1))
scores = []
for i, j in zip(indices[0], indices[1]):
scores.append(np.mean(np.linalg.norm(decision_tensor[i] - decision_tensor[j], axis=1)))
return np.mean(scores)
| 43.230616
| 119
| 0.688802
| 3,043
| 21,745
| 4.732172
| 0.063096
| 0.058333
| 0.056667
| 0.080278
| 0.834931
| 0.816389
| 0.791181
| 0.737153
| 0.729514
| 0.688889
| 0
| 0.008886
| 0.203035
| 21,745
| 502
| 120
| 43.316733
| 0.822043
| 0.615314
| 0
| 0.266667
| 0
| 0
| 0.006932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.244444
| false
| 0
| 0.037037
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
9e07f5626df5a117ee98bd8f1e78b098812a60a3
| 99
|
py
|
Python
|
bitcoin-ctf/mapModel/__init__.py
|
garcilazor/Software_Supply_Chain_CTF
|
a0cd91d5e72ded132178c3f5868bf78b677316d5
|
[
"MIT"
] | null | null | null |
bitcoin-ctf/mapModel/__init__.py
|
garcilazor/Software_Supply_Chain_CTF
|
a0cd91d5e72ded132178c3f5868bf78b677316d5
|
[
"MIT"
] | 21
|
2021-08-06T01:42:28.000Z
|
2021-08-08T18:57:40.000Z
|
bitcoin-ctf/mapModel/__init__.py
|
garcilazor/Software_Supply_Chain_CTF
|
a0cd91d5e72ded132178c3f5868bf78b677316d5
|
[
"MIT"
] | 1
|
2021-09-03T22:24:37.000Z
|
2021-09-03T22:24:37.000Z
|
from .Model import model
# instatiate client container class
def get_client():
return model()
| 16.5
| 35
| 0.747475
| 13
| 99
| 5.615385
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 99
| 5
| 36
| 19.8
| 0.901235
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
9e090251b559ea7aaf15f8a3dbd526452924e71c
| 5,454
|
py
|
Python
|
plotter.py
|
ai-se/TimeLIME
|
eaf8cd44715bb1f6dcac29f4c0bfb2c93809ac98
|
[
"MIT"
] | null | null | null |
plotter.py
|
ai-se/TimeLIME
|
eaf8cd44715bb1f6dcac29f4c0bfb2c93809ac98
|
[
"MIT"
] | null | null | null |
plotter.py
|
ai-se/TimeLIME
|
eaf8cd44715bb1f6dcac29f4c0bfb2c93809ac98
|
[
"MIT"
] | 1
|
2021-04-28T17:21:30.000Z
|
2021-04-28T17:21:30.000Z
|
import matplotlib.pyplot as plt
import numpy as np
def plot_rq2(scores,bcs,fnames,planner):
N = 4
plt.rcParams.update({'font.size':15})
fig, ax = plt.subplots(figsize=(8,8))
ind = np.arange(N) # the x locations for the groups
width = 0.08 # the width of the bars
result =[]
for m in range(0,int(len(scores))):
p25,p50,p75,p100 = 0,0,0,0
r25,r50,r75,r100 = 0,0,0,0
a25,a50,a75,a100 = 0,0,0,0
score = scores[m]
bugchange = bcs[m]
for i in range(0,int(len(score))):
if 0<=score[i]<0.25:
p25+=1
if bugchange[i]<0:
r25-=bugchange[i]
if bugchange[i]>0:
a25+=bugchange[i]
if 0.25<=score[i]<0.5:
p50+=1
if bugchange[i]<0:
r50-=bugchange[i]
if bugchange[i]>0:
a50+=bugchange[i]
if 0.5<=score[i]<0.75:
p75+=1
if bugchange[i]<0:
r75-=bugchange[i]
if bugchange[i]>0:
a75+=bugchange[i]
if 0.75<=score[i]<=1:
p100+=1
if bugchange[i]<0:
r100-=bugchange[i]
if bugchange[i]>0:
a100+=bugchange[i]
s = p25+p50+p75+p100
result.append([p25/s,p50/s,p75/s,p100/s]) if s!=0 else result.append([p25,p50,p75,p100])
ax.set_ylabel("Ratio of plans over all plans")
# ax.set_ylabel("Total amount of bugs reduced")
# ax.set_ylabel("Number of bugs added")
# ax.set_ylabel("Number of bugs reduced")
ax.set_xlabel("Overlap percentage")
p0 = ax.bar(ind-width*4, result[0], width, bottom=0,label=fnames[0][0].split('-')[0])
p1 = ax.bar(ind-width*3, result[1], width, bottom=0,label=fnames[1][0].split('-')[0])
p2 = ax.bar(ind-width*2, result[2], width, bottom=0,label=fnames[2][0].split('-')[0])
p3 = ax.bar(ind-width*1, result[3], width, bottom=0,label=fnames[3][0].split('-')[0])
p4 = ax.bar(ind+width*0, result[4], width, bottom=0,label=fnames[4][0].split('-')[0])
p5 = ax.bar(ind+width*1, result[5], width, bottom=0,label=fnames[5][0].split('-')[0])
p6 = ax.bar(ind+width*2, result[6], width, bottom=0,label=fnames[6][0].split('-')[0])
p7 = ax.bar(ind+width*3, result[7], width, bottom=0,label=fnames[7][0].split('-')[0])
ax.set_title(planner)
ax.set_xticks(ind)
ax.set_xticklabels(('0-25', '25-50', '50-75', '75-100'))
ax.autoscale_view()
plt.grid(axis='y')
plt.savefig("rq2"+planner,dpi=100,bbox_inches = 'tight')
return result
def plot_rq3(scores,bcs,fnames,planner):
N = 4
plt.rcParams.update({'font.size':15})
fig, ax = plt.subplots(figsize=(8,8))
ind = np.arange(N) # the x locations for the groups
width = 0.08 # the width of the bars
result =[]
for m in range(0,int(len(scores))):
p25,p50,p75,p100 = 0,0,0,0
r25,r50,r75,r100 = 0,0,0,0
a25,a50,a75,a100 = 0,0,0,0
score = scores[m]
bugchange = bcs[m]
for i in range(0,int(len(score))):
if 0<=score[i]<0.25:
p25+=1
if bugchange[i]<0:
r25-=bugchange[i]
if bugchange[i]>0:
a25+=bugchange[i]
if 0.25<=score[i]<0.5:
p50+=1
if bugchange[i]<0:
r50-=bugchange[i]
if bugchange[i]>0:
a50+=bugchange[i]
if 0.5<=score[i]<0.75:
p75+=1
if bugchange[i]<0:
r75-=bugchange[i]
if bugchange[i]>0:
a75+=bugchange[i]
if 0.75<=score[i]<=1:
p100+=1
if bugchange[i]<0:
r100-=bugchange[i]
if bugchange[i]>0:
a100+=bugchange[i]
s = p25+p50+p75+p100
rate = [a25,a50,a75,a100]
rate = [r25 - a25, r50 - a50, r75 - a75, r100 - a100]
result.append(rate)
# result.append([p25,p50,p75,p100])
# result.append([p25/s,p50/s,p75/s,p100/s]) if s!=0 else result.append([p25,p50,p75,p100])
# result.append([p25/s,p50/s,p75/s,p100/s]) if s!=0 else result.append([p25,p50,p75,p100])
ax.set_ylabel("Total amount of bugs reduced")
ax.set_xlabel("Overlap percentage")
p0 = ax.bar(ind-width*4, result[0], width, bottom=0,label=fnames[0][0].split('-')[0])
p1 = ax.bar(ind-width*3, result[1], width, bottom=0,label=fnames[1][0].split('-')[0])
p2 = ax.bar(ind-width*2, result[2], width, bottom=0,label=fnames[2][0].split('-')[0])
p3 = ax.bar(ind-width*1, result[3], width, bottom=0,label=fnames[3][0].split('-')[0])
p4 = ax.bar(ind+width*0, result[4], width, bottom=0,label=fnames[4][0].split('-')[0])
p5 = ax.bar(ind+width*1, result[5], width, bottom=0,label=fnames[5][0].split('-')[0])
p6 = ax.bar(ind+width*2, result[6], width, bottom=0,label=fnames[6][0].split('-')[0])
p7 = ax.bar(ind+width*3, result[7], width, bottom=0,label=fnames[7][0].split('-')[0])
ax.set_title(planner)
ax.set_xticks(ind)
ax.set_xticklabels(('0-25', '25-50', '50-75', '75-100'))
ax.autoscale_view()
plt.grid(axis='y')
plt.savefig("rq3"+planner,dpi=100,bbox_inches = 'tight')
return result
| 40.701493
| 102
| 0.520719
| 851
| 5,454
| 3.314924
| 0.13631
| 0.113435
| 0.068061
| 0.073733
| 0.943283
| 0.943283
| 0.930876
| 0.930876
| 0.902517
| 0.902517
| 0
| 0.123607
| 0.292446
| 5,454
| 134
| 103
| 40.701493
| 0.607411
| 0.082325
| 0
| 0.898305
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.016949
| 0
| 0.050847
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9e12d3af2038cc29768637941c01302825190558
| 35,764
|
py
|
Python
|
obp/ope/estimators_tuning.py
|
han20192019/newRL
|
53598edab284b4364d127ec5662137de3f9c1206
|
[
"Apache-2.0"
] | 387
|
2020-07-19T14:56:36.000Z
|
2022-03-29T15:25:21.000Z
|
obp/ope/estimators_tuning.py
|
han20192019/newRL
|
53598edab284b4364d127ec5662137de3f9c1206
|
[
"Apache-2.0"
] | 89
|
2020-10-04T17:04:42.000Z
|
2022-03-27T10:43:15.000Z
|
obp/ope/estimators_tuning.py
|
han20192019/newRL
|
53598edab284b4364d127ec5662137de3f9c1206
|
[
"Apache-2.0"
] | 53
|
2020-08-18T09:52:22.000Z
|
2022-03-30T23:16:13.000Z
|
# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Off-Policy Estimators with built-in hyperparameter tuning."""
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
from sklearn.utils import check_scalar
from ..utils import check_array
from ..utils import check_ope_inputs
from .estimators import BaseOffPolicyEstimator
from .estimators import DoublyRobust
from .estimators import DoublyRobustWithShrinkage
from .estimators import InverseProbabilityWeighting
from .estimators import SwitchDoublyRobust
@dataclass
class BaseOffPolicyEstimatorTuning:
"""Base Class for Off-Policy Estimator with built-in hyperparameter tuning
base_ope_estimator: BaseOffPolicyEstimator
An OPE estimator with a hyperparameter
(such as IPW/DR with clipping, Switch-DR, and DR with Shrinkage).
lambdas: List[float]
A list of candidate hyperparameter values.
use_bias_upper_bound: bool, default=True
Whether to use bias upper bound in hyperparameter tuning.
If False, direct bias estimator is used to estimate the MSE.
delta: float, default=0.05
A confidence delta to construct a high probability upper bound based on the Bernstein’s inequality.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.
"Doubly Robust Off-Policy Evaluation with Shrinkage.", 2020.
"""
base_ope_estimator: BaseOffPolicyEstimator = field(init=False)
lambdas: List[float] = None
use_bias_upper_bound: bool = True
delta: float = 0.05
def __new__(cls, *args, **kwargs):
dataclass(cls)
return super().__new__(cls)
def _check_lambdas(self) -> None:
"""Check type and value of lambdas."""
if isinstance(self.lambdas, list):
if len(self.lambdas) == 0:
raise ValueError("lambdas must not be empty")
for hyperparam_ in self.lambdas:
check_scalar(
hyperparam_,
name="an element of lambdas",
target_type=(int, float),
min_val=0.0,
)
if hyperparam_ != hyperparam_:
raise ValueError("an element of lambdas must not be nan")
else:
raise TypeError("lambdas must be a list")
def _check_init_inputs(self) -> None:
"""Initialize Class."""
if not isinstance(self.use_bias_upper_bound, bool):
raise TypeError(
"`use_bias_upper_bound` must be a bool"
", but {type(self.use_bias_upper_bound)} is given"
)
check_scalar(self.delta, "delta", (float), min_val=0.0, max_val=1.0)
def _tune_hyperparam(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
) -> None:
"""Find the best hyperparameter value from the given candidate set."""
self.estimated_mse_score_dict = dict()
for hyperparam_ in self.lambdas:
estimated_mse_score = self.base_ope_estimator(
hyperparam_
)._estimate_mse_score(
reward=reward,
action=action,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
use_bias_upper_bound=self.use_bias_upper_bound,
delta=self.delta,
)
self.estimated_mse_score_dict[hyperparam_] = estimated_mse_score
self.best_hyperparam = min(
self.estimated_mse_score_dict.items(), key=lambda x: x[1]
)[0]
def estimate_policy_value_with_tuning(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
) -> float:
"""Estimate the policy value of evaluation policy with a tuned hyperparameter.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
Returns
----------
V_hat: float
Policy value estimated by the DR estimator.
"""
# tune hyperparameter if necessary
if not hasattr(self, "best_hyperparam"):
self._tune_hyperparam(
reward=reward,
action=action,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
return self.base_ope_estimator(self.best_hyperparam).estimate_policy_value(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
def estimate_interval_with_tuning(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list), default=None
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
# tune hyperparameter if necessary
if not hasattr(self, "best_hyperparam"):
self._tune_hyperparam(
reward=reward,
action=action,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
return self.base_ope_estimator(self.best_hyperparam).estimate_interval(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
class InverseProbabilityWeightingTuning(BaseOffPolicyEstimatorTuning):
"""Inverse Probability Weighting (IPW) with built-in hyperparameter tuning.
Parameters
----------
lambdas: List[float]
A list of candidate clipping hyperparameters.
The automatic hyperparameter tuning proposed by Su et al.(2020)
will choose the best hyperparameter value from the data.
use_bias_upper_bound: bool, default=True
Whether to use bias upper bound in hyperparameter tuning.
If False, direct bias estimator is used to estimate the MSE.
delta: float, default=0.05
A confidence delta to construct a high probability upper bound based on the Bernstein’s inequality.
estimator_name: str, default='ipw'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.
"Doubly Robust Off-Policy Evaluation with Shrinkage.", 2020.
"""
estimator_name: str = "ipw"
def __post_init__(self) -> None:
"""Initialize Class."""
self.base_ope_estimator = InverseProbabilityWeighting
super()._check_lambdas()
super()._check_init_inputs()
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
Returns
----------
V_hat: float
Estimated policy value (performance) of a given evaluation policy.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_policy_value_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
)
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities
by the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_interval_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class DoublyRobustTuning(BaseOffPolicyEstimatorTuning):
"""Doubly Robust (DR) with built-in hyperparameter tuning.
Parameters
----------
lambdas: List[float]
A list of candidate clipping hyperparameters.
The automatic hyperparameter tuning proposed by Su et al.(2020)
will choose the best hyperparameter value from the data.
estimator_name: str, default='dr'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.
"Doubly Robust Off-Policy Evaluation with Shrinkage.", 2020.
"""
lambdas: List[float] = None
estimator_name: str = "dr"
def __post_init__(self) -> None:
"""Initialize Class."""
self.base_ope_estimator = DoublyRobust
super()._check_lambdas()
super()._check_init_inputs()
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
) -> float:
"""Estimate the policy value of evaluation policy with a tuned hyperparameter.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
Returns
----------
V_hat: float
Policy value estimated by the DR estimator.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_policy_value_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_interval_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class SwitchDoublyRobustTuning(BaseOffPolicyEstimatorTuning):
"""Switch Doubly Robust (Switch-DR) with build-in hyperparameter tuning.
Parameters
----------
lambdas: List[float]
A list of candidate switching hyperparameters.
The automatic hyperparameter tuning proposed by Su et al.(2020)
will choose the best hyperparameter value from the data.
estimator_name: str, default='switch-dr'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yu-Xiang Wang, Alekh Agarwal, and Miroslav Dudík.
"Optimal and Adaptive Off-policy Evaluation in Contextual Bandits", 2016.
"""
estimator_name: str = "switch-dr"
def __post_init__(self) -> None:
"""Initialize Class."""
self.base_ope_estimator = SwitchDoublyRobust
super()._check_lambdas()
super()._check_init_inputs()
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
) -> float:
"""Estimate the policy value of evaluation policy with a tuned hyperparameter.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
Returns
----------
V_hat: float
Policy value estimated by the DR estimator.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_policy_value_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_interval_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class DoublyRobustWithShrinkageTuning(BaseOffPolicyEstimatorTuning):
"""Doubly Robust with optimistic shrinkage (DRos) with built-in hyperparameter tuning.
Parameters
----------
lambdas: List[float]
A list of candidate shrinkage hyperparameters.
The automatic hyperparameter tuning proposed by Su et al.(2020)
will choose the best hyperparameter value from the data.
estimator_name: str, default='dr-os'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.
"Doubly Robust Off-Policy Evaluation with Shrinkage.", 2020.
"""
estimator_name: str = "dr-os"
def __post_init__(self) -> None:
"""Initialize Class."""
self.base_ope_estimator = DoublyRobustWithShrinkage
super()._check_lambdas()
super()._check_init_inputs()
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
) -> float:
"""Estimate the policy value of evaluation policy with a tuned hyperparameter.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
Returns
----------
V_hat: float
Policy value estimated by the DR estimator.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_policy_value_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return super().estimate_interval_with_tuning(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
| 37.765576
| 128
| 0.633822
| 4,287
| 35,764
| 5.086774
| 0.063914
| 0.032558
| 0.052002
| 0.060669
| 0.895905
| 0.881139
| 0.879626
| 0.876324
| 0.876324
| 0.876324
| 0
| 0.007498
| 0.276507
| 35,764
| 946
| 129
| 37.805497
| 0.835285
| 0.464461
| 0
| 0.783186
| 0
| 0
| 0.033586
| 0.013895
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039823
| false
| 0
| 0.030973
| 0
| 0.126106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f547668045352af962baf40f72a717cbaff007d5
| 4,271
|
py
|
Python
|
RFEM/BasicObjects/memberSet.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 16
|
2021-10-13T21:00:11.000Z
|
2022-03-21T11:12:09.000Z
|
RFEM/BasicObjects/memberSet.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 49
|
2021-10-19T13:18:51.000Z
|
2022-03-30T08:20:17.000Z
|
RFEM/BasicObjects/memberSet.py
|
Dlubal-Software/RFEM_Python_Client
|
9e29c598dadf380d49677c463931f0be659ccc40
|
[
"MIT"
] | 7
|
2021-10-13T06:06:24.000Z
|
2022-03-29T17:48:39.000Z
|
from RFEM.initModel import Model, clearAtributes, ConvertToDlString
from RFEM.enums import SetType
class MemberSet():
def __init__(self,
no: int = 1,
members_no: str = '1 4 5 8 9 12 13 16 17 20 21 24',
member_set_type = SetType.SET_TYPE_GROUP,
comment: str = '',
params: dict = None,
model = Model):
'''
Args:
no (int): Member Set Tag
members_no (str): Tags of Members Contained Within Member Set
member_set_type (enum): Member Set Type Enumeration
comment (str, optional): Comments
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
'''
# Client model | Member Set
clientObject = model.clientModel.factory.create('ns0:member_set')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Set No.
clientObject.no = no
# Members number
clientObject.members = ConvertToDlString(members_no)
# Member Set Type
clientObject.set_type = member_set_type.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Member Set to client model
model.clientModel.service.set_member_set(clientObject)
@staticmethod
def ContinuousMembers(
no: int = 1,
members_no: str = '1 4 5 8 9 12 13 16 17 20 21 24',
comment: str = '',
params: dict = None,
model = Model):
'''
Args:
no (int): Member Set Tag
members_no (str): Tags of Members Contained Within Continuous Member Set
comment (str, optional): Comments
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
'''
# Client model | Member Set
clientObject = model.clientModel.factory.create('ns0:member_set')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Set No.
clientObject.no = no
# Members number
clientObject.members = ConvertToDlString(members_no)
# Member Set Type
clientObject.set_type = SetType.SET_TYPE_CONTINUOUS.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Member Set to client model
model.clientModel.service.set_member_set(clientObject)
@staticmethod
def GroupOfmembers(
no: int = 1,
members_no: str = '1 4 5 8 9 12 13 16 17 20 21 24',
comment: str = '',
params: dict = None,
model = Model):
'''
Args:
no (int): Member Set Tag
members_no (str): Tags of Members Contained Within Group of Members Member Set
comment (str, optional): Comments
params (dict, optional): Any WS Parameter relevant to the object and its value in form of a dictionary
'''
# Client model | Member Set
clientObject = model.clientModel.factory.create('ns0:member_set')
# Clears object atributes | Sets all atributes to None
clearAtributes(clientObject)
# Member Set No.
clientObject.no = no
# Members number
clientObject.members = ConvertToDlString(members_no)
# Member Set Type
clientObject.set_type = SetType.SET_TYPE_GROUP.name
# Comment
clientObject.comment = comment
# Adding optional parameters via dictionary
if params:
for key in params:
clientObject[key] = params[key]
# Add Member Set to client model
model.clientModel.service.set_member_set(clientObject)
| 32.603053
| 114
| 0.575509
| 464
| 4,271
| 5.217672
| 0.185345
| 0.104089
| 0.037588
| 0.016109
| 0.899215
| 0.899215
| 0.886411
| 0.886411
| 0.886411
| 0.886411
| 0
| 0.023043
| 0.359869
| 4,271
| 130
| 115
| 32.853846
| 0.862473
| 0.337158
| 0
| 0.814815
| 0
| 0
| 0.050114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.037037
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f572f6faf65636cfedf0030aeff5669803ce0835
| 29,921
|
py
|
Python
|
sdk/python/pulumi_azuread/service_principal_password.py
|
ragnarstolsmark/pulumi-azuread
|
b9398511c142f0aad349e492ded419f870edc925
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azuread/service_principal_password.py
|
ragnarstolsmark/pulumi-azuread
|
b9398511c142f0aad349e492ded419f870edc925
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azuread/service_principal_password.py
|
ragnarstolsmark/pulumi-azuread
|
b9398511c142f0aad349e492ded419f870edc925
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ServicePrincipalPasswordArgs', 'ServicePrincipalPassword']
@pulumi.input_type
class ServicePrincipalPasswordArgs:
def __init__(__self__, *,
service_principal_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_date: Optional[pulumi.Input[str]] = None,
end_date_relative: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[str]] = None,
start_date: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ServicePrincipalPassword resource.
:param pulumi.Input[str] service_principal_id: The ID of the Service Principal for which this password should be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] description: A description for the Password. Deprecated in favour of `display_name`.
:param pulumi.Input[str] display_name: The display name for the password.
:param pulumi.Input[str] end_date: The End Date which the Password is valid until, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). Changing this field forces a new resource to be created.
:param pulumi.Input[str] end_date_relative: A relative duration for which the Password is valid until, for example `240h` (10 days) or `2400h30m`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Changing this field forces a new resource to be created.
:param pulumi.Input[str] key_id: A GUID used to uniquely identify this Key. If not specified a GUID will be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] start_date: The Start Date which the Password is valid from, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date is used. Changing this field forces a new resource to be created.
:param pulumi.Input[str] value: The Password for this Service Principal.
"""
pulumi.set(__self__, "service_principal_id", service_principal_id)
if description is not None:
warnings.warn("""This property has been renamed to `display_name` and will be removed in version 2.0 of the AzureAD provider""", DeprecationWarning)
pulumi.log.warn("""description is deprecated: This property has been renamed to `display_name` and will be removed in version 2.0 of the AzureAD provider""")
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if end_date is not None:
pulumi.set(__self__, "end_date", end_date)
if end_date_relative is not None:
pulumi.set(__self__, "end_date_relative", end_date_relative)
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if start_date is not None:
pulumi.set(__self__, "start_date", start_date)
if value is not None:
warnings.warn("""In version 2.0 of the AzureAD provider, this attribute will become read-only as it will no longer be possible to specify a password value. It will be generated by Azure Active Directory and persisted to state for reuse in your Terraform configuration.""", DeprecationWarning)
pulumi.log.warn("""value is deprecated: In version 2.0 of the AzureAD provider, this attribute will become read-only as it will no longer be possible to specify a password value. It will be generated by Azure Active Directory and persisted to state for reuse in your Terraform configuration.""")
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="servicePrincipalId")
def service_principal_id(self) -> pulumi.Input[str]:
"""
The ID of the Service Principal for which this password should be created. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "service_principal_id")
@service_principal_id.setter
def service_principal_id(self, value: pulumi.Input[str]):
pulumi.set(self, "service_principal_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Password. Deprecated in favour of `display_name`.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name for the password.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="endDate")
def end_date(self) -> Optional[pulumi.Input[str]]:
"""
The End Date which the Password is valid until, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "end_date")
@end_date.setter
def end_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_date", value)
@property
@pulumi.getter(name="endDateRelative")
def end_date_relative(self) -> Optional[pulumi.Input[str]]:
"""
A relative duration for which the Password is valid until, for example `240h` (10 days) or `2400h30m`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "end_date_relative")
@end_date_relative.setter
def end_date_relative(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_date_relative", value)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[pulumi.Input[str]]:
"""
A GUID used to uniquely identify this Key. If not specified a GUID will be created. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="startDate")
def start_date(self) -> Optional[pulumi.Input[str]]:
"""
The Start Date which the Password is valid from, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date is used. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "start_date")
@start_date.setter
def start_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_date", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The Password for this Service Principal.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _ServicePrincipalPasswordState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_date: Optional[pulumi.Input[str]] = None,
end_date_relative: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
start_date: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ServicePrincipalPassword resources.
:param pulumi.Input[str] description: A description for the Password. Deprecated in favour of `display_name`.
:param pulumi.Input[str] display_name: The display name for the password.
:param pulumi.Input[str] end_date: The End Date which the Password is valid until, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). Changing this field forces a new resource to be created.
:param pulumi.Input[str] end_date_relative: A relative duration for which the Password is valid until, for example `240h` (10 days) or `2400h30m`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Changing this field forces a new resource to be created.
:param pulumi.Input[str] key_id: A GUID used to uniquely identify this Key. If not specified a GUID will be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] service_principal_id: The ID of the Service Principal for which this password should be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] start_date: The Start Date which the Password is valid from, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date is used. Changing this field forces a new resource to be created.
:param pulumi.Input[str] value: The Password for this Service Principal.
"""
if description is not None:
warnings.warn("""This property has been renamed to `display_name` and will be removed in version 2.0 of the AzureAD provider""", DeprecationWarning)
pulumi.log.warn("""description is deprecated: This property has been renamed to `display_name` and will be removed in version 2.0 of the AzureAD provider""")
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if end_date is not None:
pulumi.set(__self__, "end_date", end_date)
if end_date_relative is not None:
pulumi.set(__self__, "end_date_relative", end_date_relative)
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if service_principal_id is not None:
pulumi.set(__self__, "service_principal_id", service_principal_id)
if start_date is not None:
pulumi.set(__self__, "start_date", start_date)
if value is not None:
warnings.warn("""In version 2.0 of the AzureAD provider, this attribute will become read-only as it will no longer be possible to specify a password value. It will be generated by Azure Active Directory and persisted to state for reuse in your Terraform configuration.""", DeprecationWarning)
pulumi.log.warn("""value is deprecated: In version 2.0 of the AzureAD provider, this attribute will become read-only as it will no longer be possible to specify a password value. It will be generated by Azure Active Directory and persisted to state for reuse in your Terraform configuration.""")
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description for the Password. Deprecated in favour of `display_name`.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name for the password.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="endDate")
def end_date(self) -> Optional[pulumi.Input[str]]:
"""
The End Date which the Password is valid until, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "end_date")
@end_date.setter
def end_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_date", value)
@property
@pulumi.getter(name="endDateRelative")
def end_date_relative(self) -> Optional[pulumi.Input[str]]:
"""
A relative duration for which the Password is valid until, for example `240h` (10 days) or `2400h30m`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "end_date_relative")
@end_date_relative.setter
def end_date_relative(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_date_relative", value)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[pulumi.Input[str]]:
"""
A GUID used to uniquely identify this Key. If not specified a GUID will be created. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="servicePrincipalId")
def service_principal_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Service Principal for which this password should be created. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "service_principal_id")
@service_principal_id.setter
def service_principal_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_principal_id", value)
@property
@pulumi.getter(name="startDate")
def start_date(self) -> Optional[pulumi.Input[str]]:
"""
The Start Date which the Password is valid from, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date is used. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "start_date")
@start_date.setter
def start_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_date", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The Password for this Service Principal.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class ServicePrincipalPassword(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_date: Optional[pulumi.Input[str]] = None,
end_date_relative: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
start_date: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a password credential associated with a service principal within Azure Active Directory. See also the ApplicationPassword resource.
> **NOTE:** If you're authenticating using a Service Principal then it must have permissions to both `Read and write all applications` and `Sign in and read user profile` within the `Windows Azure Active Directory` API.
## Example Usage
```python
import pulumi
import pulumi_azuread as azuread
example_application = azuread.Application("exampleApplication")
example_service_principal = azuread.ServicePrincipal("exampleServicePrincipal", application_id=example_application.application_id)
example_service_principal_password = azuread.ServicePrincipalPassword("exampleServicePrincipalPassword", service_principal_id=example_service_principal.object_id)
```
## Import
Passwords can be imported using the `object id` of a Service Principal and the `key id` of the password, e.g.
```sh
$ pulumi import azuread:index/servicePrincipalPassword:ServicePrincipalPassword test 00000000-0000-0000-0000-000000000000/password/11111111-1111-1111-1111-111111111111
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description for the Password. Deprecated in favour of `display_name`.
:param pulumi.Input[str] display_name: The display name for the password.
:param pulumi.Input[str] end_date: The End Date which the Password is valid until, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). Changing this field forces a new resource to be created.
:param pulumi.Input[str] end_date_relative: A relative duration for which the Password is valid until, for example `240h` (10 days) or `2400h30m`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Changing this field forces a new resource to be created.
:param pulumi.Input[str] key_id: A GUID used to uniquely identify this Key. If not specified a GUID will be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] service_principal_id: The ID of the Service Principal for which this password should be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] start_date: The Start Date which the Password is valid from, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date is used. Changing this field forces a new resource to be created.
:param pulumi.Input[str] value: The Password for this Service Principal.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServicePrincipalPasswordArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a password credential associated with a service principal within Azure Active Directory. See also the ApplicationPassword resource.
> **NOTE:** If you're authenticating using a Service Principal then it must have permissions to both `Read and write all applications` and `Sign in and read user profile` within the `Windows Azure Active Directory` API.
## Example Usage
```python
import pulumi
import pulumi_azuread as azuread
example_application = azuread.Application("exampleApplication")
example_service_principal = azuread.ServicePrincipal("exampleServicePrincipal", application_id=example_application.application_id)
example_service_principal_password = azuread.ServicePrincipalPassword("exampleServicePrincipalPassword", service_principal_id=example_service_principal.object_id)
```
## Import
Passwords can be imported using the `object id` of a Service Principal and the `key id` of the password, e.g.
```sh
$ pulumi import azuread:index/servicePrincipalPassword:ServicePrincipalPassword test 00000000-0000-0000-0000-000000000000/password/11111111-1111-1111-1111-111111111111
```
:param str resource_name: The name of the resource.
:param ServicePrincipalPasswordArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServicePrincipalPasswordArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_date: Optional[pulumi.Input[str]] = None,
end_date_relative: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
start_date: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServicePrincipalPasswordArgs.__new__(ServicePrincipalPasswordArgs)
if description is not None and not opts.urn:
warnings.warn("""This property has been renamed to `display_name` and will be removed in version 2.0 of the AzureAD provider""", DeprecationWarning)
pulumi.log.warn("""description is deprecated: This property has been renamed to `display_name` and will be removed in version 2.0 of the AzureAD provider""")
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["end_date"] = end_date
__props__.__dict__["end_date_relative"] = end_date_relative
__props__.__dict__["key_id"] = key_id
if service_principal_id is None and not opts.urn:
raise TypeError("Missing required property 'service_principal_id'")
__props__.__dict__["service_principal_id"] = service_principal_id
__props__.__dict__["start_date"] = start_date
if value is not None and not opts.urn:
warnings.warn("""In version 2.0 of the AzureAD provider, this attribute will become read-only as it will no longer be possible to specify a password value. It will be generated by Azure Active Directory and persisted to state for reuse in your Terraform configuration.""", DeprecationWarning)
pulumi.log.warn("""value is deprecated: In version 2.0 of the AzureAD provider, this attribute will become read-only as it will no longer be possible to specify a password value. It will be generated by Azure Active Directory and persisted to state for reuse in your Terraform configuration.""")
__props__.__dict__["value"] = value
super(ServicePrincipalPassword, __self__).__init__(
'azuread:index/servicePrincipalPassword:ServicePrincipalPassword',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_date: Optional[pulumi.Input[str]] = None,
end_date_relative: Optional[pulumi.Input[str]] = None,
key_id: Optional[pulumi.Input[str]] = None,
service_principal_id: Optional[pulumi.Input[str]] = None,
start_date: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'ServicePrincipalPassword':
"""
Get an existing ServicePrincipalPassword resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A description for the Password. Deprecated in favour of `display_name`.
:param pulumi.Input[str] display_name: The display name for the password.
:param pulumi.Input[str] end_date: The End Date which the Password is valid until, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). Changing this field forces a new resource to be created.
:param pulumi.Input[str] end_date_relative: A relative duration for which the Password is valid until, for example `240h` (10 days) or `2400h30m`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Changing this field forces a new resource to be created.
:param pulumi.Input[str] key_id: A GUID used to uniquely identify this Key. If not specified a GUID will be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] service_principal_id: The ID of the Service Principal for which this password should be created. Changing this field forces a new resource to be created.
:param pulumi.Input[str] start_date: The Start Date which the Password is valid from, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date is used. Changing this field forces a new resource to be created.
:param pulumi.Input[str] value: The Password for this Service Principal.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServicePrincipalPasswordState.__new__(_ServicePrincipalPasswordState)
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["end_date"] = end_date
__props__.__dict__["end_date_relative"] = end_date_relative
__props__.__dict__["key_id"] = key_id
__props__.__dict__["service_principal_id"] = service_principal_id
__props__.__dict__["start_date"] = start_date
__props__.__dict__["value"] = value
return ServicePrincipalPassword(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
A description for the Password. Deprecated in favour of `display_name`.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The display name for the password.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="endDate")
def end_date(self) -> pulumi.Output[str]:
"""
The End Date which the Password is valid until, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "end_date")
@property
@pulumi.getter(name="endDateRelative")
def end_date_relative(self) -> pulumi.Output[Optional[str]]:
"""
A relative duration for which the Password is valid until, for example `240h` (10 days) or `2400h30m`. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "end_date_relative")
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Output[str]:
"""
A GUID used to uniquely identify this Key. If not specified a GUID will be created. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter(name="servicePrincipalId")
def service_principal_id(self) -> pulumi.Output[str]:
"""
The ID of the Service Principal for which this password should be created. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "service_principal_id")
@property
@pulumi.getter(name="startDate")
def start_date(self) -> pulumi.Output[str]:
"""
The Start Date which the Password is valid from, formatted as a RFC3339 date string (e.g. `2018-01-01T01:02:03Z`). If this isn't specified, the current date is used. Changing this field forces a new resource to be created.
"""
return pulumi.get(self, "start_date")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
The Password for this Service Principal.
"""
return pulumi.get(self, "value")
| 55.615242
| 311
| 0.675679
| 3,945
| 29,921
| 4.965019
| 0.062864
| 0.060652
| 0.075765
| 0.0775
| 0.900495
| 0.887885
| 0.882677
| 0.878287
| 0.875683
| 0.861337
| 0
| 0.020927
| 0.230206
| 29,921
| 537
| 312
| 55.718808
| 0.829462
| 0.391999
| 0
| 0.811688
| 1
| 0.038961
| 0.214037
| 0.009488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152597
| false
| 0.061688
| 0.016234
| 0
| 0.25974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
f5743ebfb8c29d01fea27ea81f62611880cb4751
| 17,487
|
py
|
Python
|
XCS229ii-PS1-Sandbox/src/grader.py
|
bearbearyu1223/Stanford-XCS-229-II
|
7e5743fb326352a168400bb96694c54ed476773f
|
[
"MIT"
] | 2
|
2021-04-16T20:15:20.000Z
|
2021-04-23T08:37:27.000Z
|
XCS229ii-PS1-Sandbox/src/grader.py
|
bearbearyu1223/Stanford-XCS-229-II
|
7e5743fb326352a168400bb96694c54ed476773f
|
[
"MIT"
] | null | null | null |
XCS229ii-PS1-Sandbox/src/grader.py
|
bearbearyu1223/Stanford-XCS-229-II
|
7e5743fb326352a168400bb96694c54ed476773f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import unittest, random, sys, copy, argparse, inspect
from graderUtil import graded, CourseTestRunner, GradedTestCase
# Import student submission
import submission
#############################################
# HELPER FUNCTIONS FOR CREATING TEST INPUTS #
#############################################
#########
# TESTS #
#########
class Test_1a(GradedTestCase):
@graded()
def test_0(self):
"""1a-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1a()])
self.assertTrue(response.issubset(set(['a','b','c'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1a-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1a', lambda f: set([choice.lower() for choice in f()]))
class Test_1b(GradedTestCase):
@graded()
def test_0(self):
"""1b-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1b()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1b-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1b', lambda f: set([choice.lower() for choice in f()]))
class Test_1c(GradedTestCase):
@graded()
def test_0(self):
"""1c-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1c()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1c-1-hidden: Multiple choice response."""
self.assertTrue(True)
# self.compare_with_solution_or_wait(submission, 'multiple_choice_1c', lambda f: set([choice.lower() for choice in f()]))
class Test_1d(GradedTestCase):
@graded()
def test_0(self):
"""1d-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1d()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1d-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1d', lambda f: set([choice.lower() for choice in f()]))
class Test_1e(GradedTestCase):
@graded()
def test_0(self):
"""1e-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1e()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1e-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1e', lambda f: set([choice.lower() for choice in f()]))
class Test_1f(GradedTestCase):
@graded()
def test_0(self):
"""1f-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1f()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1f-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1f', lambda f: set([choice.lower() for choice in f()]))
class Test_1g(GradedTestCase):
@graded()
def test_0(self):
"""1g-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1g()])
self.assertTrue(response.issubset(set(['a','b','c','d','e'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),2, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1g-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1g', lambda f: set([choice.lower() for choice in f()]))
class Test_1h(GradedTestCase):
@graded()
def test_0(self):
"""1h-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1h()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),2, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1h-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1h', lambda f: set([choice.lower() for choice in f()]))
class Test_1i(GradedTestCase):
@graded()
def test_0(self):
"""1i-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1i()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1i-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1i', lambda f: set([choice.lower() for choice in f()]))
class Test_1j(GradedTestCase):
@graded()
def test_0(self):
"""1j-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_1j()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""1j-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_1j', lambda f: set([choice.lower() for choice in f()]))
class Test_2a(GradedTestCase):
@graded()
def test_0(self):
"""2a-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_2a()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""2a-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2a', lambda f: set([choice.lower() for choice in f()]))
class Test_2b(GradedTestCase):
@graded()
def test_0(self):
"""2b-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_2b()])
self.assertTrue(response.issubset(set(['a','b'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""2b-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2b', lambda f: set([choice.lower() for choice in f()]))
class Test_2c(GradedTestCase):
@graded()
def test_0(self):
"""2c-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_2c()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""2c-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2c', lambda f: set([choice.lower() for choice in f()]))
class Test_2d(GradedTestCase):
@graded()
def test_0(self):
"""2d-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_2d()])
self.assertTrue(response.issubset(set(['a','b','c'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""2d-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2d', lambda f: set([choice.lower() for choice in f()]))
class Test_2e(GradedTestCase):
@graded()
def test_0(self):
"""2e-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_2e_i()])
self.assertTrue(response.issubset(set(['a','b'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
response = set([choice.lower() for choice in submission.multiple_choice_2e_ii()])
self.assertTrue(response.issubset(set(['a','b'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
response = set([choice.lower() for choice in submission.multiple_choice_2e_iii()])
self.assertTrue(response.issubset(set(['a','b'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
response = set([choice.lower() for choice in submission.multiple_choice_2e_iv()])
self.assertTrue(response.issubset(set(['a','b'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_i(self):
"""2e-i-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2e_i', lambda f: set([choice.lower() for choice in f()]))
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_ii(self):
"""2e-ii-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2e_ii', lambda f: set([choice.lower() for choice in f()]))
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_iii(self):
"""2e-iii-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2e_iii', lambda f: set([choice.lower() for choice in f()]))
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_iv(self):
"""2e-iv-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_2e_iv', lambda f: set([choice.lower() for choice in f()]))
class Test_3a(GradedTestCase):
@graded()
def test_0(self):
"""3a-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_3a()])
self.assertTrue(response.issubset(set(['a','b','c'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""3a-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_3a', lambda f: set([choice.lower() for choice in f()]))
class Test_3b(GradedTestCase):
@graded()
def test_0(self):
"""3b-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_3b()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""3b-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_3b', lambda f: set([choice.lower() for choice in f()]))
class Test_3c(GradedTestCase):
@graded()
def test_0(self):
"""3c-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_3c()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""3c-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_3c', lambda f: set([choice.lower() for choice in f()]))
class Test_4a(GradedTestCase):
@graded()
def test_0(self):
"""4a-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_4a()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""4a-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_4a', lambda f: set([choice.lower() for choice in f()]))
class Test_4b(GradedTestCase):
@graded()
def test_0(self):
"""4b-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_4b()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""4b-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_4b', lambda f: set([choice.lower() for choice in f()]))
class Test_4c(GradedTestCase):
@graded()
def test_0(self):
"""4c-0-helper: Sanity check."""
response = set([choice.lower() for choice in submission.multiple_choice_4c()])
self.assertTrue(response.issubset(set(['a','b','c','d'])), msg='Checks that the response contains only the options available.')
self.assertGreaterEqual(len(response),1, msg='Checks that the response is within the cardinality of possible options.')
@graded(is_hidden=True, after_published=False, hide_errors=True)
def test_1(self):
"""4c-1-hidden: Multiple choice response."""
self.compare_with_solution_or_wait(submission, 'multiple_choice_4c', lambda f: set([choice.lower() for choice in f()]))
def getTestCaseForTestID(test_id):
question, part, _ = test_id.split('-')
g = globals().copy()
for name, obj in g.items():
if inspect.isclass(obj) and name == ('Test_'+question):
return obj('test_'+part)
if __name__ == '__main__':
# Parse for a specific test_core
parser = argparse.ArgumentParser()
parser.add_argument('test_case', nargs='?', default='all')
test_id = parser.parse_args().test_case
assignment = unittest.TestSuite()
if test_id != 'all':
assignment.addTest(getTestCaseForTestID(test_id))
else:
assignment.addTests(unittest.defaultTestLoader.discover('.', pattern='grader.py'))
CourseTestRunner().run(assignment)
| 54.818182
| 135
| 0.71882
| 2,467
| 17,487
| 4.949737
| 0.056344
| 0.082549
| 0.055032
| 0.066825
| 0.925395
| 0.924085
| 0.86635
| 0.86635
| 0.86635
| 0.856277
| 0
| 0.014512
| 0.129124
| 17,487
| 318
| 136
| 54.990566
| 0.787314
| 0.102934
| 0
| 0.588496
| 0
| 0
| 0.242604
| 0.00417
| 0
| 0
| 0
| 0
| 0.216814
| 1
| 0.20354
| false
| 0
| 0.013274
| 0
| 0.314159
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
19136b81aa430fea83fa8c561abde96c4199dfbc
| 93
|
py
|
Python
|
agent/continuous/seperate/__init__.py
|
SunandBean/tensorflow_RL
|
a248cbfb99b2041f6f7cc008fcad53fb83ac486e
|
[
"MIT"
] | 60
|
2019-01-29T14:13:00.000Z
|
2020-11-24T09:08:05.000Z
|
agent/continuous/seperate/__init__.py
|
SunandBean/tensorflow_RL
|
a248cbfb99b2041f6f7cc008fcad53fb83ac486e
|
[
"MIT"
] | 2
|
2019-08-14T06:44:32.000Z
|
2020-11-12T12:57:55.000Z
|
agent/continuous/seperate/__init__.py
|
SunandBean/tensorflow_RL
|
a248cbfb99b2041f6f7cc008fcad53fb83ac486e
|
[
"MIT"
] | 37
|
2019-01-22T05:19:34.000Z
|
2021-04-12T02:27:50.000Z
|
from agent.continuous.seperate.ddpg import DDPG
from agent.continuous.seperate.td3 import TD3
| 46.5
| 47
| 0.860215
| 14
| 93
| 5.714286
| 0.5
| 0.225
| 0.475
| 0.675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.075269
| 93
| 2
| 48
| 46.5
| 0.906977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
1927f7b0302955a77045e3d4e6299a01d8d06ff6
| 78
|
py
|
Python
|
python/testData/inspections/PyPep8NamingInspection/ignored/ignoreN806.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyPep8NamingInspection/ignored/ignoreN806.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyPep8NamingInspection/ignored/ignoreN806.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def do_stuff(): return 1
def func():
Test = do_stuff()
return Test
| 11.142857
| 24
| 0.602564
| 12
| 78
| 3.75
| 0.583333
| 0.311111
| 0.577778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 0.282051
| 78
| 6
| 25
| 13
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
19334e8926876a858fc7ded1b9c7dfe427149d57
| 81
|
py
|
Python
|
TOPSIS_RATISH_101803004/__init__.py
|
zeearo/TOPSIS_THAPAR
|
c40c58fe0e706eca5487e53fd424d229f7bc5642
|
[
"MIT"
] | null | null | null |
TOPSIS_RATISH_101803004/__init__.py
|
zeearo/TOPSIS_THAPAR
|
c40c58fe0e706eca5487e53fd424d229f7bc5642
|
[
"MIT"
] | null | null | null |
TOPSIS_RATISH_101803004/__init__.py
|
zeearo/TOPSIS_THAPAR
|
c40c58fe0e706eca5487e53fd424d229f7bc5642
|
[
"MIT"
] | null | null | null |
name="TOPSIS_RATISH_101803004/TOPSIS_RATISH_101803004"
__version__ = "1.0.0"
| 20.25
| 55
| 0.790123
| 11
| 81
| 5.090909
| 0.636364
| 0.428571
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287671
| 0.098765
| 81
| 3
| 56
| 27
| 0.479452
| 0
| 0
| 0
| 0
| 0
| 0.675325
| 0.61039
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
19930b62e22186d94f517c643d9ae714f32db65b
| 347
|
py
|
Python
|
bitmovin_api_sdk/analytics/outputs/s3_role_based/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/analytics/outputs/s3_role_based/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/analytics/outputs/s3_role_based/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.analytics.outputs.s3_role_based.s3_role_based_api import S3RoleBasedApi
from bitmovin_api_sdk.analytics.outputs.s3_role_based.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.analytics.outputs.s3_role_based.analytics_s3_role_based_output_list_query_params import AnalyticsS3RoleBasedOutputListQueryParams
| 86.75
| 151
| 0.927954
| 48
| 347
| 6.229167
| 0.375
| 0.100334
| 0.183946
| 0.180602
| 0.451505
| 0.451505
| 0.451505
| 0.451505
| 0.451505
| 0
| 0
| 0.020896
| 0.034582
| 347
| 3
| 152
| 115.666667
| 0.871642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
199895a547c9ad690b831a03c54cce0845e370b1
| 5,539
|
py
|
Python
|
test/test_bin.py
|
kmanalo/qhost
|
8d681da52451d3687053532fa25c041cd31ad8bf
|
[
"Apache-2.0"
] | 4
|
2015-01-07T21:36:25.000Z
|
2017-09-11T02:25:57.000Z
|
test/test_bin.py
|
kmanalo/qhost
|
8d681da52451d3687053532fa25c041cd31ad8bf
|
[
"Apache-2.0"
] | 10
|
2015-01-08T20:40:33.000Z
|
2015-09-17T15:09:28.000Z
|
test/test_bin.py
|
kmanalo/qhost
|
8d681da52451d3687053532fa25c041cd31ad8bf
|
[
"Apache-2.0"
] | 1
|
2019-01-02T15:18:53.000Z
|
2019-01-02T15:18:53.000Z
|
import unittest
import sys
import os
class TestBin(unittest.TestCase):
def test_run(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_00.xml')
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_00.txt')
).read()
self.assertEquals(actual, expected)
def test_longer_run(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_04.xml')
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_04_4.txt')
).read()
self.assertEquals(actual, expected)
def test_filter_by_state(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_04.xml'),
'-s EO -x',
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_04_1.txt')
).read()
self.assertEquals(actual, expected)
def test_filter_by_state_ODE(self):
'''
State filter on 'ODE'
'''
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_05.xml'),
'-s DEO',
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_05.txt')
).read()
self.assertEquals(actual, expected)
def test_filter_by_state_and_node(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_04.xml'),
'-s EO -x',
'n0[35]'
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_04_2.txt')
).read()
self.assertEquals(actual, expected)
def test_filter_by_node(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_04.xml'),
'n0[15]'
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_04_3.txt')
).read()
self.assertEquals(actual, expected)
def test_filter_by_jobid(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_04.xml'),
'-J 1158770'
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_04_5.txt')
).read()
self.assertEquals(actual, expected)
def test_job_note_notification(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_06.xml')
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_06_1.txt')
).read()
self.assertEquals(actual, expected)
def test_job_note_display(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_06.xml'),
'-N'
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_06_2.txt')
).read()
self.assertEquals(actual, expected)
def test_any_state_display(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_07.xml'),
'-s E'
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_07_1.txt')
).read()
self.assertEquals(actual, expected)
def test_exclusive_state_display(self):
top = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
cmd = [
os.path.join(top, 'bin', 'qhost'),
'-X',
os.path.join(top, 'test', 'output', 'output_07.xml'),
'-s E -x'
]
actual = os.popen(' '.join(cmd)).read()
expected = open(
os.path.join(top, 'test', 'output', 'output_07_2.txt')
).read()
self.assertEquals(actual, expected)
| 34.61875
| 78
| 0.501715
| 659
| 5,539
| 4.053111
| 0.098634
| 0.148259
| 0.164732
| 0.160614
| 0.942344
| 0.942344
| 0.942344
| 0.928117
| 0.908648
| 0.87383
| 0
| 0.017259
| 0.309623
| 5,539
| 159
| 79
| 34.836478
| 0.681224
| 0.003791
| 0
| 0.662069
| 0
| 0
| 0.133782
| 0
| 0
| 0
| 0
| 0
| 0.075862
| 1
| 0.075862
| false
| 0
| 0.02069
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
274c4bdd71fa36740b0323aed6b95f484a1965fe
| 113,261
|
py
|
Python
|
Montefx.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | 1
|
2018-08-26T05:10:56.000Z
|
2018-08-26T05:10:56.000Z
|
Montefx.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | null | null | null |
Montefx.py
|
fireballpoint1/fortranTOpy
|
55843a62c6f0a2f8e2a777ef70193940d3d2d141
|
[
"Apache-2.0"
] | 1
|
2018-06-26T18:06:44.000Z
|
2018-06-26T18:06:44.000Z
|
# global NGAS,NSTEP,NANISO,EFINAL,ESTEP,AKT,ARY,TEMPC,TORR,IPEN
# #COMMON/INPT1/
# global NDVEC
# #COMMON/CNSTS1/
# global CONST1,CONST2,CONST3,CONST4,CONST5
# #COMMON/SETP/
# global TMAX,SMALL,API,ESTART,THETA,PHI,TCFMAX#(10)
# global TCFMAX1,RSTART,EFIELD,ETHRM,ECUT,NDELTA,IMIP,IWRITE
# #COMMON/LARGE/
# global CF
# CF=[[0 for x in range(20000)] for y in range(512)]
# global EIN#(512)
# EIN=[0 for x in range(512)]
# global TCF#(20000)
# TCF=[0 for x in range(20000)]
# global IARRY#(512)
# IARRY=[0 for x in range(512)]
# global RGAS#(512)
# RGAS=[0 for x in range(512)]
# global IPN#(512)
# IPN=[0 for x in range(512)]
# global WPL#(512)
# WPL=[0 for x in range(512)]
# global IZBR#(512)
# IZBR=[0 for x in range(512)]
# global IPLAST
# IPLAST=1
# global PENFRA#(3,512)
# IARRY=[[0 for y in range(3)] for x in range(512)]
# #COMMON/LARGEN/
# global CFN#(20000,60)
# CFN=[[0 for x in range(20000)] for y in range(60)]
# global TCFN#(20000)
# TCFN=[0 for x in range(20000)]
# global SCLENUL#(60)
# SCLENUL=[0 for x in range(60)]
# global NPLAST
# #COMMON/OUTPT/
# global ICOLL#(30)
# ICOLL==[0 for x in range(30)]
# global NETOT,NPRIME,TMAX1,TIME#(300)
# TIME=[0 for x in range(300)]
# global NNULL,NITOT,ICOLN#(512)
# ICOLN=[0 for x in range(512)]
# global ICOLNN#(60)
# ICOLNN=[0 for x in range(60)]
# global NREAL,NEXCTOT
# #COMMON/RLTVY/
# global BET#(2000)
# BET=[0 for x in range(2000)]
# global GAM#(20000)
# GAM=[0 for x in range(20000)]
# global VC,EMS
# #COMMON/STTS/
# global XST#(150000)
# XST=[0 for x in range(150000)]
# global YST#(150000)
# YST=[0 for x in range(150000)]
# global ZST#(150000)
# ZST=[0 for x in range(150000)]
# global TST#(150000)
# TST=[0 for x in range(150000)]
# global TTIME#(150000)
# TTIME=[0 for x in range(150000)]
# global NFGF#(150000)
# NFGF=[0 for x in range(150000)]
# global NFGPP#(150000)
# NFGPP=[0 for x in range(150000)]
# global NFGBR#(150000)
# NFGBR=[0 for x in range(150000)]
# global NELEC,NEGION,EST1,EST2
# #COMMON/STEXC/
# global XSTEXC#(150000)
# XSTEXC=[0 for x in range(150000)]
# global YSTEXC#(150000)
# YSTEXC=[0 for x in range(150000)]
# global ZSTEXC#(150000)
# ZSTEXC=[0 for x in range(150000)]
# global TSTEXC#(150000)
# TSTEXC=[0 for x in range(150000)]
# global NSTEXC
# #COMMON/STEXCNUL/
# global XSTN#(150000)
# XSTN=[0 for x in range(150000)]
# global YSTN#(150000)
# YSTN=[0 for x in range(150000)]
# global ZSTN#(150000)
# ZSTN=[0 for x in range(150000)]
# global TSTN#(150000)
# TSTN=[0 for x in range(150000)]
# global IDNUL#(150000)
# IDNUL=[0 for x in range(150000)]
# global NEXCNUL
# #COMMON/IONC/
# global DOUBLE#(6,20000)
# DOUBLE=[[0 for x in range(6)] for y in range(20000)]
# global CMINIXSC#(6)
# CMINIXSC=[0 for x in range(6)]
# global CMINEXSC#(6)
# CMINEXSC=[0 for x in range(6)]
# global ECLOSS#(6)
# ECLOSS=[0 for x in range(6)]
# global WPLN#(6)
# WPLN=[0 for x in range(6)]
# global ICOUNT,AVPFRAC#(3,6)
# AVOFRAC=[[0 for x in range(3)] for y in range(6)]
# #COMMON/IONFL/
# global NC0#(512)
# NC0=[0 for x in range(512)]
# global EC0#(512)
# EC0=[0 for x in range(512)]
# global NG1#(512)
# NG1=[0 for x in range(512)]
# global EG1#(512)
# EG1=[0 for x in range(512)]
# global NG2#(512)
# NG2=[0 for x in range(512)]
# global EG2#(512)
# EG2=[0 for x in range(512)]
# global WKLM#(512)
# WKLM=[0 for x in range(512)]
# global DSTFL#(512)
# DSTFL=[0 for x in range(512)]
# #COMMON/IONMOD/
# global ESPLIT#(512,20)
# ESPLIT=[[0 for x in range(512)] for y in range(20)]
# global IONMODEL#(512)
# IONMODEL=[0 for x in range(512)]
# #COMMON/ANIS/
# global PSCT#(20000,512)
# PSCT=[[0 for y in range(20000)] for x in range(512)]
# global ANGCT#(20000,512)
# ANGCT=[[0 for y in range(20000)] for x in range(512)]
# global INDEX#(512)
# INDEX=[0 for x in range(512)]
# global NISO
# #COMMON/CASRS/
# global ECAS#(400)
# ECAS=[0 for x in range(400)]
# global XCAS#(400)
# XCAS=[0 for x in range(400)]
# global YCAS#(400)
# YCAS=[0 for x in range(400)]
# global ZCAS#(400)
# ZCAS=[0 for x in range(400)]
# global DRXS#(400)
# DRXS=[0 for x in range(400)]
# global DRYS#(400)
# DRYS=[0 for x in range(400)]
# global DRZS#(400)
# DRZS=[0 for x in range(400)]
# global TT1#(400)
# global NFLGF#(400)
# NFLGF=[0 for x in range(400)]
# global NFLGPP#(400)
# NFLGPP=[0 for x in range(400)]
# global IEVNTL
# #COMMON/COMP/
# global LCMP,LCFLG,LRAY,LRFLG,LPAP,LPFLG,LBRM,LBFLG,LPEFLG
# #COMMON/BREMG/
# global EBRGAM#(10)
# EBRGAM=[0 for x in range(10)]
# global BRDCOSX#(10)
# BRDCOSX=[0 for x in range(10)]
# global BRDCOSY#(10)
# BRDCOSY=[0 for x in range(10)]
# global BRDCOSZ#(10)
# BRDCOSZ=[0 for x in range(10)]
# global BRX#(10)
# BRX=[0 for x in range(10)]
# global BRY#(10)
# BRY==[0 for x in range(10)]
# global BRZ#(10)
# BRZ=[0 for x in range(10)]
# global BRT#(10)
# BRT=[0 for x in range(10)]
# global EBRTOT#(6)
# EBRTOT=[0 for x in range(6)]
# global NBREM#(6)
# NBREM=[0 for x in range(6)]
# #COMMON/CASRSB/
# global ECASB#(400)
# ECASB=[0 for x in range(400)]
# global XCASB#(400)
# XCASB=[0 for x in range(400)]
# global YCASB#(400)
# YCASB=[0 for x in range(400)]
# global ZCASB#(400)
# ZCASB=[0 for x in range(400)]
# global DRXB#(400)
# DRXB=[0 for x in range(400)]
# global DRYB#(400)
# DRYB=[0 for x in range(400)]
# global DRZB#(400)
# DRZB=[0 for x in range(400)]
# global TTB1#(400)
# TTB1=[0 for x in range(400)]
# global NFLGFB#(400)
# NFLGFB=[0 for x in range(400)]
# global NFLGPPB#(400)
# NFLGPPB=[0 for x in range(400)]
# global IEVNTLB
# #COMMON/CASRSE/
# global ECASE#(400)
# ECASE=[0 for x in range(400)]
# global XCASE#(400)
# XCASE=[0 for x in range(400)]
# global YCASE#(400)
# YCASE=[0 for x in range(400)]
# global ZCASE#(400)
# ZCASE=[0 for x in range(400)]
# global DRXCE#(400)
# DRXCE=[0 for x in range(400)]
# global DRYCE#(400)
# DRYCE=[0 for x in range(400)]
# global DRZCE#(400)
# DRZCE=[0 for x in range(400)]
# global TCASE#(400)
# TCASE=[0 for x in range(400)]
# global NFLGFE#(400)
# NFLGFE=[0 for x in range(400)]
# global NFLGPPE#(400)
# NFLGPPE=[0 for x in range(400)]
# global IEVENTE
# #COMMON/ECASC/
# global NEGAS#(512)
# NEGAS=[0 for x in range(512)]
# global LEGAS#(512)
# LEGAS=[0 for x in range(512)]
# global IESHELL#(512)
# IESHELL=[0 for x in range(512)]
# global IECASC
# #COMMON/IDEXC/
# global NGEXC1,NGEXC2,NGEXC3,NGEXC4,NGEXC5,NGEXC6,IDG1,IDG2,IDG3,IDG4,IDG5,IDG6
def MONTEFE():
# IMPLICIT #real*8 (A-H,O-Z)
# IMPLICIT #integer*8 (I-N)
# COMMON/INPT/NGAS,NSTEP,NANISO,EFINAL,ESTEP,AKT,ARY,TEMPC,TORR,IPEN
# COMMON/INPT1/NDVEC
# COMMON/CNSTS1/CONST1,CONST2,CONST3,CONST4,CONST5
# COMMON/SETP/TMAX,SMALL,API,ESTART,THETA,PHI,TCFMAX(10),TCFMAX1,RSTART,EFIELD,ETHRM,ECUT,NDELTA,IMIP,IWRITE
# COMMON/LARGE/CF(20000,512),EIN(512),TCF(20000),IARRY(512),RGAS(512),IPN(512),WPL(512),IZBR(512),IPLAST,PENFRA[3,512]
# COMMON/LARGEN/CFN(20000,60),TCFN(20000),SCLENUL(60),NPLAST
# COMMON/OUTPT/ICOLL(30),NETOT,NPRIME,TMAX1,TIME(300),NNULL,NITOT,ICOLN(512),ICOLNN(60),NREAL,NEXCTOT
# COMMON/RLTVY/BET[2000],GAM(20000),VC,EMS
# COMMON/STTS/XST(150000),YST(150000),ZST(150000),TST(150000),TTIME(150000),NFGF(150000),NFGPP(150000),NFGBR(150000),NELEC,NEGION,EST1,EST2
# COMMON/STEXC/XSTEXC(150000),YSTEXC(150000),ZSTEXC(150000),TSTEXC(150000),NSTEXC
# COMMON/STEXCNUL/XSTN(150000),YSTN(150000),ZSTN(150000),TSTN(150000),IDNUL(150000),NEXCNUL
# COMMON/IONC/DOUBLE(6,20000),CMINIXSC[6],CMINEXSC[6],ECLOSS[6],WPLN[6],ICOUNT,AVPFRAC(3,6)
# COMMON/IONFL/NC0(512),EC0(512),NG1(512),EG1(512),NG2(512),EG2(512),WKLM(512),DSTFL(512)
# COMMON/IONMOD/ESPLIT(512,20),IONMODEL(512)
# COMMON/ANIS/PSCT(20000,512),ANGCT(20000,512),INDEX(512),NISO
# COMMON/CASRS/ECAS(400),XCAS(400),YCAS(400),ZCAS(400),DRXS(400),DRYS(400),DRZS(400),TT1(400),NFLGF(400),NFLGPP(400),IEVNTL
# COMMON/COMP/LCMP,LCFLG,LRAY,LRFLG,LPAP,LPFLG,LBRM,LBFLG,LPEFLG
# COMMON/BREMG/EBRGAM(10),BRnumpy.cosX(10),BRnumpy.cosY(10),BRnumpy.cosZ[10],BRX(10),BRY(10),BRZ[10],BRT(10),EBRTOT[6],NBREM[6]
# COMMON/CASRSB/ECASB[400],XCASB[400],YCASB[400],ZCASB[400],DRXB[400],DRYB[400],DRZB[400],TTB1(400),NFLGFB[400],NFLGPPB[400],IEVNTLB
# COMMON/CASRSE/ECASE(400),XCASE(400),YCASE(400),ZCASE(400),DRXCE(400),DRYCE(400),DRZCE(400),TCASE(400),NFLGFE(400),NFLGPPE(400),IEVENTE
# COMMON/ECASC/NEGAS(512),LEGAS(512),IESHELL(512),IECASC
# COMMON/IDEXC/NGEXC1,NGEXC2,NGEXC3,NGEXC4,NGEXC5,NGEXC6,IDG1,IDG2,IDG3,IDG4,IDG5,IDG6
#COMMON/INPT/
global NGAS,NSTEP,NANISO,EFINAL,ESTEP,AKT,ARY,TEMPC,TORR,IPEN
#COMMON/INPT1/
global NDVEC
#COMMON/CNSTS1/
global CONST1,CONST2,CONST3,CONST4,CONST5
#COMMON/SETP/
global TMAX,SMALL,API,ESTART,THETA,PHI,TCFMAX#(10)
global TCFMAX1,RSTART,EFIELD,ETHRM,ECUT,NDELTA,IMIP,IWRITE
#COMMON/LARGE/
global CF#(20000,512)
global EIN#(512)
global TCF#(20000)
global IARRY#(512)
global RGAS#(512)
global IPN#(512)
global WPL#(512)
global IZBR#(512)
global IPLAST
global PENFRA#(3,512)
#COMMON/LARGEN/
global CFN#(20000,60)
global TCFN#(20000)
global SCLENUL#(60)
global NPLAST
#COMMON/OUTPT/
global ICOLL#(30)
global NETOT,NPRIME,TMAX1,TIME#(300)
global NNULL,NITOT,ICOLN#(512)
global ICOLNN#(60)
global NREAL,NEXCTOT
#COMMON/RLTVY/
global BET#(2000)
global GAM#(20000)
global VC,EMS
#COMMON/STTS/
global XST#(150000)
global YST#(150000)
global ZST#(150000)
global TST#(150000)
global TTIME#(150000)
global NFGF#(150000)
global NFGPP#(150000)
global NFGBR#(150000)
global NELEC,NEGION,EST1,EST2
#COMMON/STEXC/
global XSTEXC#(150000)
global YSTEXC#(150000)
global ZSTEXC#(150000)
global TSTEXC#(150000)
global NSTEXC
#COMMON/STEXCNUL/
global XSTN#(150000)
global YSTN#(150000)
global ZSTN#(150000)
global TSTN#(150000)
global IDNUL#(150000)
global NEXCNUL
#COMMON/IONC/
global DOUBLE#(6,20000)
global CMINIXSC#(6)
global CMINEXSC#(6)
global ECLOSS#(6)
global WPLN#(6)
global ICOUNT,AVPFRAC#(3,6)
#COMMON/IONFL/
global NC0#(512)
global EC0#(512)
global NG1#(512)
global EG1#(512)
global NG2#(512)
global EG2#(512)
global WKLM#(512)
global DSTFL#(512)
#COMMON/IONMOD/
global ESPLIT#(512,20)
global IONMODEL#(512)
#COMMON/ANIS/
global PSCT#(20000,512)
global ANGCT#(20000,512)
global INDEX#(512)
global NISO
#COMMON/CASRS/
global ECAS#(400)
global XCAS#(400)
global YCAS#(400)
global ZCAS#(400)
global DRXS#(400)
global DRYS#(400)
global DRZS#(400)
global TT1#(400)
global NFLGF#(400)
global NFLGPP#(400)
global IEVNTL
#COMMON/COMP/
global LCMP,LCFLG,LRAY,LRFLG,LPAP,LPFLG,LBRM,LBFLG,LPEFLG
#COMMON/BREMG/
global EBRGAM#(10)
global BRDCOSX#(10)
global BRDCOSY#(10)
global BRDCOSZ#(10)
global BRX#(10)
global BRY#(10)
global BRZ#(10)
global BRT#(10)
global EBRTOT#(6)
global NBREM#(6)
#COMMON/CASRSB/
global ECASB#(400)
global XCASB#(400)
global YCASB#(400)
global ZCASB#(400)
global DRXB#(400)
global DRYB#(400)
global DRZB#(400)
global TTB1#(400)
global NFLGFB#(400)
global NFLGPPB#(400)
global IEVNTLB
#COMMON/CASRSE/
global ECASE#(400)
global XCASE#(400)
global YCASE#(400)
global ZCASE#(400)
global DRXCE#(400)
global DRYCE#(400)
global DRZCE#(400)
global TCASE#(400)
global NFLGFE#(400)
global NFLGPPE#(400)
global IEVENTE
#COMMON/ECASC/
global NEGAS#(512)
global LEGAS#(512)
global IESHELL#(512)
global IECASC
#COMMON/IDEXC/
global NGEXC1,NGEXC2,NGEXC3,NGEXC4,NGEXC5,NGEXC6,IDG1,IDG2,IDG3,IDG4,IDG5,IDG6
#DIMENSION
XS(150000),YS(150000),ZS(150000),TS(150000),ES(150000),DCX(150000),DCY(150000),DCZ(150000),NFLGFC(150000),NFLGPPC(150000),NFLGBRMC(150000)
#DIMENSION
TEMP(20000)
# DIMENSION ETEMP(1000)
# ----------------------------------------------------------------------
# RELATIVISTIC VERSION SEPTEMBER 2013
# ELECTRIC FIELD ALONG Z AXIS. NO MAGNETIC FIELD.
# TRACKS DELTA ELECTRONS AND UPDATES ARRAYS CONTAINING POSITION AND
# TIME OF THERMALISED ELECTRONS.
# CALCULATES NUMBER OF PRODUCED ELECTRONS PER PRIMARY AND OTHER
# HIGHER FANO FACTORS.
# RANGE IS ACCURATE ONLY FOR ANISOTROPIC X-SECTIONS
# ----------------------------------------------------------------------
# VARYING ENERGY STEPS
if(EFINAL <= 140000.):
ESTEP1=(EFINAL-16000.0)/float(4000)
else:
ESTEP1=20.0
ESTEP2=(EFINAL-92000.0)/float(4000)
# endif
NPRINT=0
J20000=20000
J300=300
API=numpy.arccos(-1.00)
SMALL=1.0E-20
TMAX1=0.00
EMAX=0.00
RDUM=RSTART
CONST9=CONST3*0.010
for I in range(1,300):
TIME[I]=0.00
for I in range(1,30):
ICOLL[I]=0
for I in range(1,512):
ICOLN[I]=0
NREAL=0
NNULL=0
NETOT=0
NEXCTOT=0
NITOT=0
NMXADD=0
NTMPFLG=0
BP=EFIELD*EFIELD*CONST1
F1=EFIELD*CONST2
F2=EFIELD*CONST3
F4=2.00*API
THETA1=THETA
PHI1=PHI
# CALCULATE MAXIMUM COLLISION FREQUENCY
TLIM=0.0
for J in range(1,20000):
TEMP[J]=TCFN[J]+TCF[J]
if(TLIM < TEMP[J]):
TLIM=TEMP[J]
NEOVFL=0
J1=0
# START OF PRIMARY EVENT LOOP
for J11 in range(1,NDELTA):
J1=J1+1
NPRIME=J1
NGEXC1=0
NGEXC2=0
NGEXC3=0
NGEXC4=0
NGEXC5=0
NGEXC6=0
# INITIAL DIRECTION COSINES FOR ELECTRON BEAM
DCZ1=numpy.cos(THETA1)
DCX1=numpy.sin(THETA1)*numpy.cos(PHI1)
DCY1=numpy.sin(THETA1)*numpy.sin(PHI1)
NFLGFF=0
NFLGPPP=0
NFLGBRMM=0
NFLGHIGH=0
EST1=ESTART
E1=ESTART
X=0.00
Y=0.00
Z=0.00
K1=0
KEXC=0
NSTEXC=0
NEXCNUL=0
NCLUS=0
NELEC=0
NEGION=0
TLAST=0.00
ST=0.00
TDASH=0.00
if(IMIP == 2):
pass
else:
if(IMIP > 2):
# READIN FIRST ELECTRON FROM BETA DECAY OR XRAY UNTHERMALISED CLUSTERS
CASRES(J11,IBADTOT,IBAD1)
# SKIP IF BAD EVENT
if(IBAD1 == 1):
J1=J1-1
continue
# endif
elif(IMIP == 1) :
# READ IN FIRST ELECTRON FROM MIP INTERACTION
CASREM(J11)
EST1=ECAS[1]
EST2=EST1
# endif
X=XCAS[1]
Y=YCAS[1]
Z=ZCAS[1]
ST=TT1[1]
TS[1]=TT1[1]
E1=ECAS[1]
DCZ1=DRZS[1]
DCY1=DRYS[1]
DCX1=DRXS[1]
NFLGFF=NFLGF[1]
NFLGPPP=NFLGPP[1]
NFLGBRMM=0
NFLGHIGH=NFLGFF
# PUT REMAINDER OF ELECTRONS INTO CLUSTER STORE
ISDUM=0
for IST in range(2,IEVNTL):
ISDUM=ISDUM+1
XS[ISDUM]=XCAS[IST]
YS[ISDUM]=YCAS[IST]
ZS[ISDUM]=ZCAS[IST]
TS[ISDUM]=TT1[IST]
ES[ISDUM]=ECAS[IST]
DCX[ISDUM]=DRXS[IST]
DCY[ISDUM]=DRYS[IST]
DCZ[ISDUM]=DRZS[IST]
NFLGFC[ISDUM]=NFLGF[IST]
NFLGPPC[ISDUM]=NFLGPP[IST]
NFLGBRMC[ISDUM]=0
NCLUS=ISDUM
if(NFLGF[IST]> NFLGHIGH):
NFLGHIGH=NFLGF[IST]
# START OF LOOP FOR NEWLY CREATED ELECTRONS
flag190=0
def GOTO1():
R1=DRAND48(RDUM)
T=-math.log(R1)/TLIM+TDASH
TDASH=T
# AP=DCZ1*F2*math.sqrt(E1)
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
AP=DCZ1*EFIELD*BET1*VC*1.0E-10
BP1=BP/GAM1
E=E1+(AP+BP1*T)*T
if(E < 0.00):
E=0.0010
# endif
# INSERT NEW ALGORITHM TO FIND IE FOR VARYING ENERGY STEP
if(IMIP == 1):
IE=int(E/ESTEP)+1
else:
if(EFINAL <= 20000.):
IE=int(E/ESTEP)+1
elif(EFINAL <= 140000.) :
if(E <= 16000.):
IE=int(E)+1
else:
IE=16000+int((E-16000.)/ESTEP1)
# endif
else:
if(E <= 12000.):
IE=int(E)+1
elif(E <= 92000.) :
IE=12000+int((E-12000.)/ESTEP1)
else:
IE=16000+int((E-92000.)/ESTEP2)
# endif
# endif
# endif
IE=DMIN0[IE][J20000]
#
# TEST FOR #real OR NULL COLLISION
#
R5=DRAND48(RDUM)
TEST1=TCF[IE]/TLIM
if(R5 <= TEST1):
pass
else:
NNULL=NNULL+1
TEST2=TEMP[IE]/TLIM
if(R5 < TEST2):
# TEST FOR NULL LEVELS
if(NPLAST == 0):
GOTO1()
R2=DRAND48(RDUM)
I=0
flag888=1
while(flag888):
flag888=0
I=I+1
if(CFN[IE][I]< R2):
flag888=1
# INCREMENT NULL LEVEL SUM
NEXCNUL=NEXCNUL+1
ICOLNN[I]=ICOLNN[I]+1
# STORE X Y Z T ID FOR MOLECULAR LIGHT EMISSION FROM NULL EXCITATION
# NOTE: SMALL APPROX USED POSITION OF PREVIOUS #real COLLISION
XSTN[NEXCNUL]=X
YSTN[NEXCNUL]=Y
ZSTN[NEXCNUL]=Z
TSTN[NEXCNUL]=ST
IDNUL[NEXCNUL]=I
GOTO1()
else:
# NULL
GOTO1()
# endif
#
# CALCULATE DIRECTION COSINES AND POSITIONS AT INSTANT BEFORE COLLISION
# 137
T2=T*T
if(E > EMAX):
EMAX=E
if(T > TMAX1):
TMAX1=T
TDASH=0.00
NREAL=NREAL+1
# CONST6=math.sqrt(E1/E)
GAM2=(EMS+E)/EMS
GAM12=(GAM1+GAM2)/2.00
BET2=math.sqrt(1.00-1.00/(GAM2*GAM2))
CONST6=BET1/BET2
DCX2=DCX1*CONST6
DCY2=DCY1*CONST6
# DCZ2=DCZ1*CONST6+EFIELD*T*CONST5/math.sqrt(E)
DCZ2=DCZ1*CONST6+EFIELD*T*2.0*10**(10*CONST1/(VC*BET2))
# CONST7=CONST9*math.sqrt(E1)
CONST7=VC*BET1*1.0E-12
A=T*CONST7
X=X+DCX1*A
Y=Y+DCY1*A
Z=Z+DCZ1*A+T2*F1/GAM12
# Z=Z+DCZ1*A+T2*F1
ST=ST+T
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
# ---------------------------------------------------------------------
# DETERMINATION OF #real COLLISION TYPE
# ---------------------------------------------------------------------
R2=DRAND48(RDUM)
I=0
flag140=1
while(flag140):
flag140=0
I=I+1
if(CF[IE][I]< R2):
flag140=1
#************************************************************
# CHECK IF BREMSSTRAHLUNG
if(IZBR[I]!= 0 and LBRM == 1):
NFLGBRMM=1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
for KNGS in range(1,NGAS):
if(IPT == (KNGS*5)-1):
break
IATOMNO=IZBR[I]
BREMS(IATOMNO,E,DCX2,DCY2,DCZ2,EOUT,EDCX,EDCY,EDCZ,EGAMMA,GDCX,GDCY,GDCZ)
NBREM[KNGS]=NBREM[KNGS]+1
EBRTOT[KNGS]=EBRTOT[KNGS]+EGAMMA
# GET NEW DRCOS DRCOSY DRCOSX AND ENERGY OF ELECTRON
E1=EOUT
DCX1=EDCX
DCY1=EDCY
DCZ1=EDCZ
# RUN BREMSSTRAHLUNG GAMMA THROUGH CASCADE : STORE CONVERTED
# ELECTRONS IN COMMON/CASRSB/
#
BREMSCASC(J11,EGAMMA,X,Y,Z,ST,GDCX,GDCY,GDCZ,ILOW)
# BREMSSTRAHLUNG ENERGY TOO LOW TO IONISE
if(ILOW == 1):
GO TO 190
#
# STORE BREMSSTRAHLUNG DATA IN CLUSTER STORE
#
ETSUM=0.0
for KBR in range(1,IEVNTLB):
NCLUS=NCLUS+1
if(NCLUS > 150000):
print(' def STOPPED: . NCLUS=',NCLUS,' NREAL=',NREAL)
sys.exit()
# endif
ES[NCLUS]=ECASB[KBR]
ETSUM=ETSUM+ES[NCLUS]
XS[NCLUS]=XCASB[KBR]
YS[NCLUS]=YCASB[KBR]
ZS[NCLUS]=ZCASB[KBR]
TS[NCLUS]=TTB1[KBR]
DCX[NCLUS]=DRXB[KBR]
DCY[NCLUS]=DRYB[KBR]
DCZ[NCLUS]=DRZB[KBR]
NFLGFC[NCLUS]=NFLGFB[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPB[KBR]
NFLGBRMC[NCLUS]=2
if(NFLGFC[NCLUS]> NFLGHIGH):
NFLGHIGH=NFLGFC[NCLUS]
GO TO 190
# endif
891 CONTINUE
#*****************************************************************
# S1=RGAS[I]
S1=1.00+GAM2*(RGAS[I]-1.00)
EI=EIN[I]
# WRITE(6,8890) EIN[I],I
#8890 print(' EIN=','%.4f' % ,' I=',I3)
if(E < EI):
EI=E-0.00010
# endif
if(IPN[I]== 0):
GO TO 666
# ATTACHMENT
flag335=0
if(IPN[I]== -1):
NETOT=NETOT+1
NITOT=NITOT+1
NELEC=NELEC+1
NEGION=NEGION+1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
flag335=1
# endif
else:
EISTR=EI
if(IONMODEL[I]> 0):
# CALCULATE SECONDARY ENERGY,ESEC,IN IONISATION COLLISION USING
# FIVE DIFFERENT MODELS
IONSPLIT(I,E,EI,ESEC)
pass
# endif
else:
R9=DRAND48(RDUM)
# USE OPAL PETERSON AND BEATY SPLITTING FACTOR.
ESEC=WPL[I]*numpy.tan(R9*numpy.arctan((E-EI)/(2.00*WPL[I])))
ESEC=WPL[I]*(ESEC/WPL[I])**0.9524
# 544 CONTINUE
EI=ESEC+EI
# STORE POSITION ,ENERGY, DIRECTION COSINES AND TIME OF GENERATION
# OF SECONDARY IONISATION ELECTRONS
NCLUS=NCLUS+1
NMXADD=MAX[NCLUS][NMXADD]
if(NCLUS > 150000):
#546
print(' ROUTINE STOPPED: . NCLUS=',NCLUS,' NREAL=',NREAL)
sys.exit()
# endif
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
ES[NCLUS]=ESEC
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
NTMPFLG=1
NCLTMP=NCLUS
# ES[NCLUS]=ESEC
# RANDOMISE SECONDARY ELECTRON DIRECTION
# R3=drand48(RDUM)
# F3=1.0-2.00*R3
# THETA0=DACOS(F3)
# F6=DCOS(THETA0)
# F5=DSIN(THETA0)
# R4=drand48(RDUM)
# PHI0=F4*R4
# F8=DSIN(PHI0)
# F9=DCOS(PHI0)
# DCX[NCLUS]=F9*F5
# DCY[NCLUS]=F8*F5
# DCZ[NCLUS]=F6
#*********************************************************
flag666=1
if(IECASC == 0):
pass
elif(LEGAS[I]== 0): # changed if to elif cause same destination
pass
else:
# USE COMPLETE CASCADE FOR ELECTRON IONISATION
KG1=NEGAS[I]
LG1=LEGAS[I]
IGSHEL=IESHELL[I]
CASCADEE(J11,KG1,LG1,X,Y,Z,ST,ESEC,IGSHEL)
#
# STORE CASCADE IN CLUSTER STORE
#
ETSUM=0.0
for KBR in range(1,IEVENTE):
NCLUS=NCLUS+1
if(NCLUS > 150000):
print(' SUBROUTINE STOPPED: . NCLUS=',NCLUS,' NREAL=',NREAL)
sys.exit()
# endif
ES[NCLUS]=ECASE[KBR]
ETSUM=ETSUM+ES[NCLUS]
XS[NCLUS]=XCASE[KBR]
YS[NCLUS]=YCASE[KBR]
ZS[NCLUS]=ZCASE[KBR]
TS[NCLUS]=TCASE[KBR]
DCX[NCLUS]=DRXCE[KBR]
DCY[NCLUS]=DRYCE[KBR]
DCZ[NCLUS]=DRZCE[KBR]
NFLGFC[NCLUS]=NFLGFE[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPE[KBR]
NFLGBRMC[NCLUS]=NFLGBRMM
if(NFLGFC[NCLUS]> NFLGHIGH):
NFLGHIGH=NFLGFC[NCLUS]
flag666=0
#*********************************************************
# STORE POSSIBLE SHELL EMISSIONS AUGER OR FLUORESCENCE
# 333
if(flag666):
if(EISTR > 30.0) :
# WRITE(6,8891) EISTR
#8891 print(' EISTR=','%.4f' % )
# TEST IF FLUORESCENCE EMISSION
IFLTST=0:
if(WKLM[I]> 0.0):
R9=DRAND48(RDUM)
if(R9 < WKLM[I]):
IFLTST=1
# endif
if(IFLTST == 0):
# AUGER EMISSION WITHOUT FLUORESCENCE
NAUG=NC0[I]
EAVAUG=EC0[I]/float(NAUG)
for JFL in range(1,NC0[I]):
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
else:
# AUGER EMISSION AND FLUORESENCE
if(NG2[I]== 0):
pass
else:
NAUG=NG2[I]
EAVAUG=EG2[I]/float(NAUG)
for JFL in range(1,NG2[I]):
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
if(NG1[I] == 0):
pass
else:
NAUG=NG1[I]
EAVAUG=EG1[I]/float(NAUG)
R9=DRAND48(RDUM)
DFL=-math.log(R9)*DSTFL[I]
for JFL in range(1,NG1[I]):
NCLUS=NCLUS+1
R3=DRAND48(RDUM)
THEFL=numpy.arccos(1.0-2.00*R3)
R4=DRAND48(RDUM)
PHIFL=F4*R4
XS[NCLUS]=X+DFL*numpy.sin(THEFL)*numpy.cos(PHIFL)
YS[NCLUS]=Y+DFL*numpy.sin(THEFL)*numpy.sin(PHIFL)
ZS[NCLUS]=Z+DFL*numpy.cos(THEFL)
NFLGFC[NCLUS]=NFLGHIGH+1
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
NFLGHIGH=NFLGFC[NCLUS]
# endif
# endif
#
# GENERATE SCATTERING ANGLES AND UPDATE LABORATORY COSINES AFTER
# COLLISION ALSO UPDATE ENERGY OF ELECTRON.
#
#666
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
# IF EXCITATION : ADD PROBABILITY ,PENFRA(1,I),OF TRANSFER TO GIVE
# IONISATION OF THE OTHER GASES IN MIXTURE
flag6=1
if(IPEN == 0 or NGAS == 1):
pass
else:
if(PENFRA[1][I] != 0.0):
RAN=DRAND48(RDUM)
if(RAN > PENFRA[1][I]):
pass
else:
NCLUS=NCLUS+1
# ENTER HERE POSSIBLE DELOCALISATION LENGTH FOR PENNING TRANSFER
if(PENFRA[2][I] == 0.0):
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
pass
# endif
else:
ASIGN=1.0
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
XS[NCLUS]=X-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
YS[NCLUS]=Y-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
ZS[NCLUS]=Z-math.log(RAN)*PENFRA[2][I]*ASIGN
#667
RAN=DRAND48(RDUM)
TS[NCLUS]=ST-math.log(RAN)*PENFRA[3][I]
# ASSIGN EXCESS ENERGY OF 1EV TO PENNING CREATED ELECTRON
ES[NCLUS]=1.0
DCX[NCLUS]=DCX1
DCY[NCLUS]=DCY1
DCZ[NCLUS]=DCZ1
flag6=0
# endif
# GO TO 6
# CALCULATE SUM OF EXCITATION PER CLUSTER AND STORE EXCITATION X Y Z T
# 5
if(flag6):
if(IPN[I] == 0) :
if((RGAS[I]*EIN[I]) > 4.0):
KEXC=KEXC+1
if(KEXC > 150000):
print(2X,' def STOPPED: . KEXC=',KEXC)
sys.exit()
# endif
# FIND GAS IN WHICH EXCITATION OCCURED AND INCREMENT COUNTER
if(I <= IDG1):
NGEXC1=NGEXC1+1
elif(I <= IDG2) :
NGEXC2=NGEXC2+1
elif(I <= IDG3) :
NGEXC3=NGEXC3+1
elif(I <= IDG4) :
NGEXC4=NGEXC4+1
elif(I <= IDG5) :
NGEXC5=NGEXC5+1
elif(I <= IDG6) :
NGEXC6=NGEXC6+1
else:
print(' def STOPPED: BAD GAS ID IN MONTE')
sys.exit()
# endif
NEXCTOT=NEXCTOT+1
NSTEXC=NSTEXC+1
XSTEXC[KEXC]=X
YSTEXC[KEXC]=Y
ZSTEXC[KEXC]=Z
TSTEXC[KEXC]=ST
# endif
# endif
# 6
S2=(S1*S1)/(S1-1.00)
# ANISOTROPIC SCATTERING
R3=DRAND48(RDUM)
if(INDEX[I]== 1):
R31=DRAND48(RDUM)
F3=1.00-R3*ANGCT[IE][I]
if(R31 > PSCT[IE][I]):
F3=-F3
elif(INDEX[I] == 2) :
EPSI=PSCT[IE][I]
F3=1.00-(2.00*R3*(1.00-EPSI)/(1.00+EPSI*(1.00-2.00*R3)))
else:
# ISOTROPIC SCATTERING
F3=1.00-2.00*R3
# endif
THETA0=numpy.arccos(F3)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
if(E < EI):
EI=0.00
ARG1=1.00-S1*EI/E
ARG1=DMAX1[ARG1][SMALL]
D=1.00-F3*math.sqrt(ARG1)
E1=E*(1.00-EI/(S1*E)-2.00*D/S2)
E1=DMAX1[E1][SMALL]
Q=math.sqrt((E/E1)*ARG1)/S1
Q=DMIN1[Q][1.00]
THETA=numpy.arcsin(Q*numpy.sin(THETA0))
F6=numpy.cos(THETA)
U=(S1-1.00)*(S1-1.00)/ARG1
CSQD=F3*F3
if(F3 < 0.00 and CSQD > U):
F6=-1.00*F6
F5=numpy.sin(THETA)
DCZ2=DMIN1[DCZ2][1.00]
ARGZ=math.sqrt(DCX2*DCX2+DCY2*DCY2)
if(ARGZ == 0.00):
DCZ1=F6
DCX1=F9*F5
DCY1=F8*F5
if(NTMPFLG == 1):
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S > 1.0):
F5S=1.0
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=F6S
DCX[NCLTMP]=F9S*F5S
DCY[NCLTMP]=F8S*F5S
NTMPFLG=0
# endif
pass
# endif
else:
DCZ1=DCZ2*F6+ARGZ*F5*F8
DCY1=DCY2*F6+(F5/ARGZ)*(DCX2*F9-DCY2*DCZ2*F8)
DCX1=DCX2*F6-(F5/ARGZ)*(DCY2*F9+DCX2*DCZ2*F8)
if(NTMPFLG == 1):
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S > 1.0):
F5S=1.0
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=DCZ2*F6S+ARGZ*F5S*F8S
DCY[NCLTMP]=DCY2*F6S+(F5S/ARGZ)*(DCX2*F9S-DCY2*DCZ2*F8S)
DCX[NCLTMP]=DCX2*F6S-(F5S/ARGZ)*(DCY2*F9S+DCX2*DCZ2*F8S)
NTMPFLG=0
# endif
# 190 CONTINUE
# TEST IF ELECTRON IS THERMALISED
if(E1 > ETHRM):
GOTO1()
# STORE POSITION AND TIME OF ELECTRON AND COLLISION HISTORY
#191
flag191=1
while (flag191):
flag191=0
if(flag335==0):
K1=K1+1
XST[K1]=X
YST[K1]=Y
ZST[K1]=Z
TST[K1]=ST
NFGF[K1]=NFLGFF
NFGPP[K1]=NFLGPPP
NFGBR[K1]=NFLGBRMM
TTIME[K1]=ST-TLAST
NELEC=NELEC+1
NETOT=NETOT+1
#335
if(K1 == 150000):
GOTO889()
# CATCH SINGLE ELECTRON CLUSTER THAT WAS ATTACHED.
# if(NELEC == 1 and NCLUS == 0) GO TO 210
#
if(NELEC == (NCLUS+1)):
# WRITE(6,884) NELEC,NCLUS,NEGION,J11
# 884 print(' NELEC=',I6,' NCLUS=',I6,' NEGION=',I3,' J11=',I6)
# LAST ELECTRON IN CLUSTER DO STATISTICS OVER PRIMARY CLUSTER
STATS(J11,J1)
pass
# endif
else:
if(NELEC < (NCLUS+1)) :
# GET NEW IONISATION ELECTRON FROM STORE
X=XS[NELEC]
Y=YS[NELEC]
Z=ZS[NELEC]
ST=TS[NELEC]
NFLGFF=NFLGFC[NELEC]
NFLGPPP=NFLGPPC[NELEC]
NFLGBRMM=NFLGBRMC[NELEC]
TLAST=TS[NELEC]
E1=ES[NELEC]
DCX1=DCX[NELEC]
DCY1=DCY[NELEC]
DCZ1=DCZ[NELEC]
if(E1 < ETHRM):
flag191=1
else:
GOTO1()
# endif
# MAIN LOOP # end
GOTO1()
# RESET NUMBER OF EVENTS FOR BAD EVENTS
if(IMIP > 2):
NDELTA=NDELTA-IBADTOT
#
print(' EMAX=','%.7f' % EMAX,' NEOVFL=',NEOVFL)
if(EMAX > EFINAL):
print('INCREASE ENERGY LIMIT FROM','%.6f' % EFINAL,' EV TO AT LEAST','%.6f' % EMAX,' EV.')
sys.exit()
# endif
return
def GOTO889():
NLEFT=NCLUS-NELEC
print('\n\n\n WARNING STOPPED: AFTER NPRIME=',NPRIME,' LAST PRIMARY HASAT LEAST ',NLEFT,' SECONDARIES LEFT TO TRACK OUT OF ',NCLUS,' ELECTRONS ALREADY IN CLUSTER')
sys.exit()
GOTO889()
return
# end
def MONTEF():
# IMPLICIT #real*8 (A-H,O-Z)
# IMPLICIT #integer*8 (I-N)
COMMON/INPT/NGAS,NSTEP,NANISO,EFINAL,ESTEP,AKT,ARY,TEMPC,TORR,IPEN
COMMON/INPT1/NDVEC
COMMON/CNSTS1/CONST1,CONST2,CONST3,CONST4,CONST5
COMMON/SETP/TMAX,SMALL,API,ESTART,THETA,PHI,TCFMAX(10),TCFMAX1,RSTART,EFIELD,ETHRM,ECUT,NDELTA,IMIP,IWRITE
COMMON/BFLD/EOVB,WB,BTHETA,BMAG
COMMON/LARGE/CF(20000,512),EIN(512),TCF(20000),IARRY(512), RGAS(512),IPN(512),WPL(512),IZBR(512),IPLAST,PENFRA[3,512]
COMMON/LARGEN/CFN(20000,60),TCFN(20000),SCLENUL(60),NPLAST
COMMON/OUTPT/ICOLL(30),NETOT,NPRIME,TMAX1,TIME(300),NNULL,NITOT,ICOLN(512),ICOLNN(60),NREAL,NEXCTOT
COMMON/RLTVY/BET[2000],GAM(20000),VC,EMS
COMMON/STTS/XST(150000),YST(150000),ZST(150000),TST(150000),TTIME(150000),NFGF(150000),NFGPP(150000),NFGBR(150000),NELEC,NEGION,EST1,EST2
COMMON/STEXC/XSTEXC(150000),YSTEXC(150000),ZSTEXC(150000),TSTEXC(150000),NSTEXC
COMMON/STEXCNUL/XSTN(150000),YSTN(150000),ZSTN(150000),TSTN(150000),IDNUL(150000),NEXCNUL
COMMON/IONC/DOUBLE(6,20000),CMINIXSC[6],CMINEXSC[6],ECLOSS[6],WPLN[6],ICOUNT,AVPFRAC(3,6)
COMMON/IONFL/NC0(512),EC0(512),NG1(512),EG1(512),NG2(512),EG2(512),WKLM(512),DSTFL(512)
COMMON/IONMOD/ESPLIT(512,20),IONMODEL(512)
COMMON/ANIS/PSCT(20000,512),ANGCT(20000,512),INDEX(512),NISO
COMMON/CASRS/ECAS(400),XCAS(400),YCAS(400),ZCAS(400),DRXS(400),DRYS(400),DRZS(400),TT1(400),NFLGF(400),NFLGPP(400),IEVNTL
COMMON/COMP/LCMP,LCFLG,LRAY,LRFLG,LPAP,LPFLG,LBRM,LBFLG,LPEFLG
COMMON/BREMG/EBRGAM(10),BRDCOSX(10),BRDCOSY(10),BRDCOSZ[10],BRX(10),BRY(10),BRZ[10],BRT(10),EBRTOT[6],NBREM[6]
COMMON/CASRSB/ECASB[400],XCASB[400],YCASB[400],ZCASB[400],DRXB[400],DRYB[400],DRZB[400],TTB1(400),NFLGFB[400],NFLGPPB[400],IEVNTLB
COMMON/CASRSE/ECASE(400),XCASE(400),YCASE(400),ZCASE(400),DRXCE(400),DRYCE(400),DRZCE(400),TCASE(400),NFLGFE(400),NFLGPPE(400),IEVENTE
COMMON/ECASC/NEGAS(512),LEGAS(512),IESHELL(512),IECASC
COMMON/IDEXC/NGEXC1,NGEXC2,NGEXC3,NGEXC4,NGEXC5,NGEXC6,IDG1,IDG2,IDG3,IDG4,IDG5,IDG6
DIMENSION XS(150000),YS(150000),ZS(150000),TS(150000),ES(150000),DCX(150000),DCY(150000),DCZ[150000],NFLGFC(150000),NFLGPPC(150000),NFLGBRMC(150000)
DIMENSION TEMP(20000)
# ----------------------------------------------------------------------
# RELATIVISTIC KINEMATICS
# ELECTRIC AND MAGNETIC FIELDS PARALLEL TO Z-AXIS
# TRACKS DELTA ELECTRONS AND UPDATES ARRAYS CONTAINING POSITION AND
# TIME OF THERMALISED ELECTRONS.
# CALCULATES NUMBER OF PRODUCED ELECTRONS PER PRIMARY DELTA AND OTHER
# HIGHER FANO FACTORS
# RANGE CALCULATION IS ACCURATE ONLY FOR ANISOTROPIC X-SECTIONS.
# ----------------------------------------------------------------------
# VARYING ENERGY STEPS
if(EFINAL <= 140000.):
:
ESTEP1=(EFINAL-16000.0)/float(4000)
else:
ESTEP1=20.0
ESTEP2=(EFINAL-92000.0)/float(4000)
# endif
NPRINT=0
J300=300
J20000=20000
API=numpy.arccos(-1.00)
SMALL=1.0D-20
EMAX=0.00
TMAX1=0.00
RDUM=RSTART
CONST9=CONST3*0.010
DO 25 I=1,300
25 TIME[I]=0.00
DO 26 I=1,30
26 ICOLL[I]=0
DO 27 I=1,512
27 ICOLN[I]=0
NREAL=0
NNULL=0
NETOT=0
NEXCTOT=0
NITOT=0
NMXADD=0
NTMPFLG=0
BP=EFIELD*EFIELD*CONST1
F1=EFIELD*CONST2
F2=EFIELD*CONST3
F4=2.00*API
THETA1=THETA
PHI1=PHI
NEOVFL=0
# CALCULATE MAXIMUM COLLISION FREQUENCY
TLIM=0.0
DO 111 J=1,20000
TEMP[J]=TCFN[J]+TCF[J]
if(TLIM < TEMP[J]:
) TLIM=TEMP[J]
111 CONTINUE
# START OF PRIMARY DELTA LOOP
J1=0
DO 210 J11=1,NDELTA
J1=J1+1
NPRIME=J1
NGEXC1=0
NGEXC2=0
NGEXC3=0
NGEXC4=0
NGEXC5=0
NGEXC6=0
# INITIAL DIRECTION COSINES FOR ELECTRON BEAM
DCZ1=numpy.cos(THETA1)
DCX1=numpy.sin(THETA1)*numpy.cos(PHI1)
DCY1=numpy.sin(THETA1)*numpy.sin(PHI1)
NFLGFF=0
NFLGPPP=0
NFLGBRMM=0
NFLGHIGH=0
EST1=ESTART
# INITIAL VELOCITY
E1=ESTART
# VTOT=CONST9*math.sqrt(E1)
GAM1=(EMS+E1)/EMS
GAM12=GAM1
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.0D-12
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
X=0.00
Y=0.00
Z=0.00
K1=0
KEXC=0
NSTEXC=0
NEXCNUL=0
NCLUS=0
NELEC=0
NEGION=0
TLAST=0.00
ST=0.00
TDASH=0.00
if(IMIP == 2):
GO TO 1
if(IMIP > 2):
:
# READIN FIRST ELECTRON FROM BETA DECAY OR XRAY UNTHERMALISED CLUSTERS
CALL CASRES(J11,IBADTOT,IBAD1)
# SKIP IF BAD EVENT
if(IBAD1 == 1):
:
J1=J1-1
GO TO 210
# endif
else if(IMIP == 1) :
# READ IN FIRST ELECTRON FROM MIP INTERACTION
CALL CASREM(J11)
EST1=ECAS[1]
EST2=EST1
# endif
X=XCAS[1]
Y=YCAS[1]
Z=ZCAS[1]
ST=TT1[1]
TS[1]=TT1[1]
E1=ECAS[1]
DCZ1=DRZS[1]
DCY1=DRYS[1]
DCX1=DRXS[1]
NFLGFF=NFLGF[1]
NFLGPPP=NFLGPP[1]
NFLGBRMM=0
NFLGHIGH=NFLGFF
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
# VTOT=CONST9*math.sqrt(E1)
VTOT=BET1*VC*1.0D-12
#
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
# PUT REMAINDER OF ELECTRONS INTO CLUSTER STORE
ISDUM=0
DO 35 IST=2,IEVNTL
ISDUM=ISDUM+1
XS[ISDUM]=XCAS[IST]
YS[ISDUM]=YCAS[IST]
ZS[ISDUM]=ZCAS[IST]
TS[ISDUM]=TT1[IST]
ES[ISDUM]=ECAS[IST]
DCX[ISDUM]=DRXS[IST]
DCY[ISDUM]=DRYS[IST]
DCZ[ISDUM]=DRZS[IST]
NFLGFC[ISDUM]=NFLGF[IST]
NFLGPPC[ISDUM]=NFLGPP[IST]
NFLGBRMC[ISDUM]=0
NCLUS=ISDUM
if(NFLGF[IST]:
> NFLGHIGH) NFLGHIGH=NFLGF[IST]
35 CONTINUE
GAM12=GAM1
# START OF LOOP FOR NEWLY CREATED ELECTRONS
1 CONTINUE
R1=DRAND48(RDUM)
T=-math.log(R1)/TLIM+TDASH
TDASH=T
# AP=DCZ1*F2*math.sqrt(E1)
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
AP=DCZ1*EFIELD*BET1*VC*1.0D-10
BP1=BP/GAM1
913 print(3X,' AFTER STORE NREAL=',I10,' E1=',E12.3,' T=',E12.3,' AP=',E12.3,' BP=',E12.3,' DCZ1=',E12.3)
# E=E1+(AP+BP*T)*T
E=E1+(AP+BP1*T)*T
if(E < 0.00):
:
if(NPRINT == 0):
WRITE(6,913)NREAL,E1,T,AP,BP,DCZ1
NPRINT=1
E=0.0010
# endif
# INSERT NEW ALGORITHM TO FIND IE FOR VARYING ENERGY STEP
if(IMIP == 1):
:
IE=int(E/ESTEP)+1
else:
if(EFINAL <= 20000.):
:
IE=int(E/ESTEP)+1
else if(EFINAL <= 140000.) :
if(E <= 16000.):
:
IE=int(E)+1
else:
IE=16000+int((E-16000.)/ESTEP1)
# endif
else:
if(E <= 12000.):
:
IE=int(E)+1
else if(E <= 92000.) :
IE=12000+int((E-12000.)/ESTEP1)
else:
IE=16000+int((E-92000.)/ESTEP2)
# endif
# endif
# endif
IE=DMIN0(IE,J20000)
#
# TEST FOR #real OR NULL COLLISION
#
R5=DRAND48(RDUM)
TEST1=TCF[IE]/TLIM
if(R5 <= TEST1):
GO TO 137
NNULL=NNULL+1
TEST2=TEMP[IE]/TLIM
if(R5 < TEST2):
:
# TEST FOR NULL LEVELS
if(NPLAST == 0):
GO TO 1
R2=DRAND48(RDUM)
I=0
888 I=I+1
if(CFN[IE][I]:
< R2) GO TO 888
# INCREMENT NULL LEVEL SUM
NEXCNUL=NEXCNUL+1
ICOLNN[I]=ICOLNN[I]+1
# STORE X Y Z T ID FOR MOLECULAR LIGHT EMISSION AND DISSOCIATION FROM
# NULL EXCITATION
# NOTE: SMALL APPROX USED POSITION OF PREVIOUS COLLISION
XSTN[NEXCNUL]=X
YSTN[NEXCNUL]=Y
ZSTN[NEXCNUL]=Z
TSTN[NEXCNUL]=ST
IDNUL[NEXCNUL]=I
GO TO 1
else:
# NULL
GO TO 1
# endif
#
# CALCULATE DIRECTION COSINES AND POSITIONS AT INSTANT BEFORE COLLISION
137 T2=T*T
GAM2=(EMS+E)/EMS
BET2=math.sqrt(1.00-1.00/(GAM2*GAM2))
GAM12=(GAM1+GAM2)/2.00
if(E > EMAX):
EMAX=E
if(T > TMAX1):
TMAX1=T
TDASH=0.00
NREAL=NREAL+1
WBT=WB*T/GAM12
# WBT=WB*T
WBR=WB/GAM12
COSWT=numpy.cos(WBT)
SINWT=numpy.sin(WBT)
# CONST6=math.sqrt(E1/E)
CONST6=BET1/BET2
CX2=CX1*COSWT-CY1*SINWT
CY2=CY1*COSWT+CX1*SINWT
# VTOT=CONST9*math.sqrt(E)
VTOT=VC*BET2*1.0D-12
DCX2=CX2/VTOT
DCY2=CY2/VTOT
# DCZ2=DCZ1*CONST6+EFIELD*T*CONST5/math.sqrt(E)
DCZ2=DCZ1*CONST6+EFIELD*T*2.0D10*CONST1/(VC*BET2)
# CONST7=CONST9*math.sqrt(E1)
CONST7=VC*BET1*1.0D-12
A=T*CONST7
# DX=(CX1*SINWT-CY1*(1.00-COSWT))/WB
DX=(CX1*SINWT-CY1*(1.00-COSWT))/WBR
X=X+DX
# DY=(CY1*SINWT+CX1*(1.00-COSWT))/WB
DY=(CY1*SINWT+CX1*(1.00-COSWT))/WBR
Y=Y+DY
# Z=Z+DCZ1*A+T2*F1
Z=Z+DCZ1*A+T2*F1/GAM12
ST=ST+T
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
# ---------------------------------------------------------------------
# DETERMINATION OF #real COLLISION TYPE
# ---------------------------------------------------------------------
R2=DRAND48(RDUM)
I=0
140 I=I+1
if(I <= 0 or I > 512):
:
WRITE(6,945) I
945 print(' BAD SELECTION I=',I8)
sys.exit()
# endif
if(CF[IE][I]:
< R2) GO TO 140
#************************************************************
# CHECK IF BREMSSTRAHLUNG
if(IZBR[I]:
!= 0 and LBRM == 1) :
NFLGBRMM=1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
DO 141 KNGS=1,NGAS
if(IPT == (KNGS*5):
-1) GO TO 142
141 CONTINUE
142 IATOMNO=IZBR[I]
CALL BREMS(IATOMNO,E,DCX2,DCY2,DCZ2,EOUT,EDCX,EDCY,EDCZ,EGAMMA,GDCX,GDCY,GDCZ)
NBREM[KNGS]=NBREM[KNGS]+1
EBRTOT[KNGS]=EBRTOT[KNGS]+EGAMMA
# WRITE(6,668) EGAMMA,J11
# 668 print(' BREM EGAMMA=','%.4f' % ,' EVENT NO=',I5)
# GET NEW DRCOS DRCOSY DRCOSX AND ENERGY OF ELECTRON
E1=EOUT
DCX1=EDCX
DCY1=EDCY
DCZ1=EDCZ
# RUN BREMSSTRAHLUNG GAMMA THROUGH CASCADE : STORE CONVERTED
# ELECTRONS IN COMMON/CASRSB/
#
CALL BREMSCASC(J11,EGAMMA,X,Y,Z,ST,GDCX,GDCY,GDCZ,ILOW)
# BREMSSTRAHLUNG ENERGY TOO LOW TO IONISE
if(ILOW == 1):
GO TO 190
#
# STORE BREMSSTRAHLUNG DATA IN CLUSTER STORE
#
DO 890 KBR=1,IEVNTLB
NCLUS=NCLUS+1
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
sys.exit()
# endif
ES[NCLUS]=ECASB[KBR]
XS[NCLUS]=XCASB[KBR]
YS[NCLUS]=YCASB[KBR]
ZS[NCLUS]=ZCASB[KBR]
TS[NCLUS]=TTB1[KBR]
DCX[NCLUS]=DRXB[KBR]
DCY[NCLUS]=DRYB[KBR]
DCZ[NCLUS]=DRZB[KBR]
NFLGFC[NCLUS]=NFLGFB[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPB[KBR]
NFLGBRMC[NCLUS]=2
890 CONTINUE
if(NFLGFC[NCLUS]:
> NFLGHIGH) NFLGHIGH=NFLGFC[NCLUS]
GO TO 190
# endif
891 CONTINUE
#****************************************************************
# S1=RGAS[I]
S1=1.00+GAM2*(RGAS[I]-1.00)
EI=EIN[I]
if(E < EI):
:
EI=E-0.00010
# endif
if(IPN[I]:
== 0) GO TO 666
# ATTACHMENT
if(IPN[I]:
== -1) :
NETOT=NETOT+1
NITOT=NITOT+1
NELEC=NELEC+1
NEGION=NEGION+1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
GO TO 335
# endif
EISTR=EI
if(IONMODEL[I]:
> 0) :
# CALCULATE SECONDARY ENERGY,ESEC,IN IONISATION COLLISION USING
# FIVE DIFFERENT MODELS
CALL IONSPLIT(I,E,EI,ESEC)
GO TO 544
# endif
R9=DRAND48(RDUM)
# USE OPAL PETERSON AND BEATY SPLITTING FACTOR.
ESEC=WPL[I]*TAN(R9*ATAN((E-EI)/(2.00*WPL[I])))
ESEC=WPL[I]*(ESEC/WPL[I])**0.9524
544 CONTINUE
EI=ESEC+EI
# STORE POSITION ,ENERGY, DIRECTION COSINES AND TIME OF GENERATION
# OF SECONDARY IONISATION ELECTRON
NCLUS=NCLUS+1
NMXADD=MAX[NCLUS][NMXADD]
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
546 print(2X,' def STOPPED: . NCLUS=',I7,' NREAL =',I10)
sys.exit()
# endif
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
ES[NCLUS]=ESEC
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
NTMPFLG=1
NCLTMP=NCLUS
# RANDOMISE SECONDARY ELECTRON DIRECTION
# R3=drand48(RDUM)
# F3=1.0-2.00*R3
# THETA0=DACOS(F3)
# F6=DCOS(THETA0)
# F5=DSIN(THETA0)
# R4=drand48(RDUM)
# PHI0=F4*R4
# F8=DSIN(PHI0)
# F9=DCOS(PHI0)
# DCX[NCLUS]=F9*F5
# DCY[NCLUS]=F8*F5
# DCZ[NCLUS]=F6
#*********************************************************
if(IECASC == 0):
GO TO 333
if(LEGAS[I]:
== 0) GO TO 333
# USE COMPLETE CASCADE FOR ELECTRON IONISATION
KG1=NEGAS[I]
LG1=LEGAS[I]
IGSHEL=IESHELL[I]
CALL CASCADEE(J11,KG1,LG1,X,Y,Z,ST,ESEC,IGSHEL)
#
# STORE CASCADE IN CLUSTER STORE
#
ETSUM=0.0
DO 844 KBR=1,IEVENTE
NCLUS=NCLUS+1
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
sys.exit()
# endif
ES[NCLUS]=ECASE[KBR]
ETSUM=ETSUM+ES[NCLUS]
XS[NCLUS]=XCASE[KBR]
YS[NCLUS]=YCASE[KBR]
ZS[NCLUS]=ZCASE[KBR]
TS[NCLUS]=TCASE[KBR]
DCX[NCLUS]=DRXCE[KBR]
DCY[NCLUS]=DRYCE[KBR]
DCZ[NCLUS]=DRZCE[KBR]
NFLGFC[NCLUS]=NFLGFE[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPE[KBR]
NFLGBRMC[NCLUS]=NFLGBRMM
844 CONTINUE
if(NFLGFC[NCLUS]:
> NFLGHIGH) NFLGHIGH=NFLGFC[NCLUS]
GO TO 666
#*********************************************************
# STORE POSSIBLE SHELL EMISSSIONS BY AUGER OR FLUORESCENCE
333 if (EISTR > 30.0) :
# TEST IF FLUORESCENCE EMISSION
IFLTST=0:
if(WKLM[I]:
> 0.0) :
R9=DRAND48(RDUM)
if(R9 < WKLM[I]:
) IFLTST=1
# endif
if(IFLTST == 0):
:
# AUGER EMISSION WITHOUT FLUORESCENCE
NAUG=NC0[I]
EAVAUG=EC0[I]/float(NAUG)
DO 700 JFL=1,NC0[I]
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
700 CONTINUE
else:
# AUGER EMISSION AND FLUORESCENCE
if(NG2[I]:
== 0) GO TO 702
NAUG=NG2[I]
EAVAUG=EG2[I]/float(NAUG)
DO 701 JFL=1,NG2[I]
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
701 CONTINUE
702 if(NG1[I] == 0) GO TO 704
NAUG=NG1[I]
EAVAUG=EG1[I]/float(NAUG)
R9=DRAND48(RDUM)
DFL=-math.log(R9)*DSTFL[I]
DO 703 JFL=1,NG1[I]
NCLUS=NCLUS+1
R3=DRAND48(RDUM)
THEFL=numpy.arccos(1.0-2.00*R3)
R4=DRAND48(RDUM)
PHIFL=F4*R4
XS[NCLUS]=X+DFL*numpy.sin(THEFL)*numpy.cos(PHIFL)
YS[NCLUS]=Y+DFL*numpy.sin(THEFL)*numpy.sin(PHIFL)
ZS[NCLUS]=Z+DFL*numpy.cos(THEFL)
NFLGFC[NCLUS]=NFLGHIGH+1
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
NFLGHIGH=NFLGFC[NCLUS]
703 CONTINUE
704 CONTINUE
# endif
# endif
#
# GENERATE SCATTERING ANGLES AND UPDATE LABORATORY COSINES AFTER
# COLLISION ALSO UPDATE ENERGY OF ELECTRON.
#
666 IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
# IF EXCITATION : ADD PROBABILITY,PENFRA(1,I), OF TRANSFER TO GIVE
# IONISATION OF THE OTHER GASES IN MIXTURE
if(IPEN == 0 or NGAS == 1):
GO TO 5
if(PENFRA[1][I] != 0.0):
:
RAN=DRAND48(RDUM)
if(RAN > PENFRA[1][I]):
GO TO 5
NCLUS=NCLUS+1
# ENTER HERE POSSIBLE DELOCALISATION LENGTH FOR PENNING TRANSFER
if(PENFRA[2][I] == 0.0):
:
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
GO TO 667
# endif
ASIGN=1.0
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
XS[NCLUS]=X-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
YS[NCLUS]=Y-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
ZS[NCLUS]=Z-math.log(RAN)*PENFRA[2][I]*ASIGN
667 RAN=DRAND48(RDUM)
TS[NCLUS]=ST-math.log(RAN)*PENFRA[3][I]
# ASSIGN EXCESS ENERGY OF 1EV TO PENNING CREATED ELECTRON
ES[NCLUS]=1.0
DCX[NCLUS]=DCX1
DCY[NCLUS]=DCY1
DCZ[NCLUS]=DCZ1
GO TO 6
# endif
# GO TO 6
# CALCULATE SUM OF EXCITATION PER CLUSTER AND STORE EXCITATION X Y Z T
5 if(IPN[I] == 0) :
if((RGAS[I]:
*EIN[I]) > 4.0) :
KEXC=KEXC+1
if(KEXC > 150000):
:
WRITE(6,548) KEXC
548 print(2X,' def STOPPED: . KEXC=',I7)
sys.exit()
# endif
# FIND GAS IN WHICH EXCITATION OCCURED AND INCREMENT COUNTER
if(I <= IDG1):
:
NGEXC1=NGEXC1+1
else if(I <= IDG2) :
NGEXC2=NGEXC2+1
else if(I <= IDG3) :
NGEXC3=NGEXC3+1
else if(I <= IDG4) :
NGEXC4=NGEXC4+1
else if(I <= IDG5) :
NGEXC5=NGEXC5+1
else if(I <= IDG6) :
NGEXC6=NGEXC6+1
else:
WRITE(6,9911)
9911 print(' def STOPPED: BAD GAS ID IN MONTE')
sys.exit()
# endif
NEXCTOT=NEXCTOT+1
NSTEXC=NSTEXC+1
XSTEXC[KEXC]=X
YSTEXC[KEXC]=Y
ZSTEXC[KEXC]=Z
TSTEXC[KEXC]=ST
# endif
# endif
6 S2=(S1*S1)/(S1-1.00)
# ANISOTROPIC SCATTERING
R3=DRAND48(RDUM)
if(INDEX[I]:
== 1) :
R31=DRAND48(RDUM)
F3=1.00-R3*ANGCT[IE][I]
if(R31 > PSCT[IE][I]:
) F3=-F3
else if (INDEX[I] == 2) :
EPSI=PSCT[IE][I]
F3=1.00-(2.00*R3*(1.00-EPSI)/(1.00+EPSI*(1.00-2.00*R3)))
else:
# ISOTROPIC SCATTERING
F3=1.00-2.00*R3
# endif
THETA0=numpy.arccos(F3)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
if(E < EI):
EI=0.00
ARG1=1.00-S1*EI/E
ARG1=DMAX1[ARG1][SMALL]
D=1.00-F3*math.sqrt(ARG1)
E1=E*(1.00-EI/(S1*E)-2.00*D/S2)
E1=DMAX1[E1][SMALL]
Q=math.sqrt((E/E1)*ARG1)/S1
Q=DMIN1[Q][1.00]
THETA=numpy.arcsin(Q*numpy.sin(THETA0))
F6=numpy.cos(THETA)
U=(S1-1.00)*(S1-1.00)/ARG1
CSQD=F3*F3
if(F3 < 0.00 and CSQD > U):
F6=-1.00*F6
F5=numpy.sin(THETA)
DCZ2=DMIN1[DCZ2][1.00]
ARGZ=math.sqrt(DCX2*DCX2+DCY2*DCY2)
if(ARGZ == 0.00):
:
DCZ1=F6
DCX1=F9*F5
DCY1=F8*F5
if(NTMPFLG == 1):
:
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S >= 1.0):
F5S=0.999
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=F6S
DCX[NCLTMP]=F9S*F5S
DCY[NCLTMP]=F8S*F5S
NTMPFLG=0
# endif
GO TO 190
# endif
DCZ1=DCZ2*F6+ARGZ*F5*F8
DCY1=DCY2*F6+(F5/ARGZ)*(DCX2*F9-DCY2*DCZ2*F8)
DCX1=DCX2*F6-(F5/ARGZ)*(DCY2*F9+DCX2*DCZ2*F8)
if(NTMPFLG == 1):
:
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S >= 1.0):
F5S=0.999
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=DCZ2*F6S+ARGZ*F5S*F8S
DCY[NCLTMP]=DCY2*F6S+(F5S/ARGZ)*(DCX2*F9S-DCY2*DCZ2*F8S)
DCX[NCLTMP]=DCX2*F6S-(F5S/ARGZ)*(DCY2*F9S+DCX2*DCZ2*F8S)
NTMPFLG=0
# endif
190 CONTINUE
# VTOT=CONST9*math.sqrt(E1)
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.0D-12
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
# TEST IF ELECTRON IS THERMALISED
if(E1 > ETHRM):
GO TO 1
# STORE POSITION AND TIME OF THERMALISED ELECTRON
191 CONTINUE
K1=K1+1
XST[K1]=X
YST[K1]=Y
ZST[K1]=Z
TST[K1]=ST
NFGF[K1]=NFLGFF
NFGPP[K1]=NFLGPPP
NFGBR[K1]=NFLGBRMM
TTIME[K1]=ST-TLAST
NELEC=NELEC+1
NETOT=NETOT+1
# WRITE(6,777) XST[K1],YST[K1],ZST[K1],TST[K1],NFGF[K1],NFGPP[K1],
# /NFGBR[K1],NELEC,NETOT,K1
# 777 print(' XST=','%.4f' % ,' YST=','%.4f' % ,' ZST=','%.4f' % ,' TST=','%.4f' % ,/,
# /' NFGF=',I4,' NFGPP=',I4,' NFGBR=',I4,' NELEC=',I4,' NETOT=',I4,
# /' K1=',I4)
335 if(K1 == 150000) GO TO 889
if(NELEC == (NCLUS+1):
) :
# LAST ELECTRON IN CLUSTER, DO STATISTICS ON PRIMARY
CALL STATS(J11,J1)
GO TO 210
# endif
# GET NEW IONISATION ELECTRON FROM STORE
X=XS[NELEC]
Y=YS[NELEC]
Z=ZS[NELEC]
ST=TS[NELEC]
NFLGFF=NFLGFC[NELEC]
NFLGPPP=NFLGPPC[NELEC]
NFLGBRMM=NFLGBRMC[NELEC]
TLAST=TS[NELEC]
E1=ES[NELEC]
DCX1=DCX[NELEC]
DCY1=DCY[NELEC]
DCZ1=DCZ[NELEC]
# IF(NELEC > 94) WRITE(6,766) X,Y,Z,ST,E1,DCX1,DCY1,DCZ1,NELEC
# 766 print(' X=','%.4f' % ,' Y=','%.4f' % ,' Z=','%.4f' % ,' T=','%.4f' % ,/,' E=',
# /'%.4f' % ,' DCX=','%.4f' % ,' DCY=','%.4f' % ,' DCZ=','%.4f' % ,' NELEC=',I6,/)
# STORE ALREADY THERMALISED ELECTRONS
if(E1 < ETHRM):
GO TO 191
GO TO 1
# MAIN LOOP # end
210 CONTINUE
# RESET NUMBER OF EVENTS FOR BAD EVENTS
if(IMIP > 2):
NDELTA=NDELTA-IBADTOT
#
WRITE(6,887) EMAX,NEOVFL
887 print(' EMAX=','%.7f' % ,' NEOVFL =',I5)
if(EMAX > EFINAL):
:
WRITE(6,989) EFINAL,EMAX
989 print('INCREASE ENERGY LIMIT FROM','%.6f' % ,' EV TO AT LEAST','%.6f' % ,' EV.')
sys.exit()
# endif
return
889 NLEFT=NCLUS-NELEC
WRITE(6,992) NPRIME,NLEFT,NCLUS
992 print(3(/),' WARNING STOPPED: AFTER NPRIME=',I6,' LAST PRIMARY HAS AT LEAST ',I6,' SECONDARIES LEFT TO TRACK, OUT OF ',I6,' ELECTRONS ALREADY IN CLUSTER')
sys.exit()
return
# end
def MONTEFB():
# IMPLICIT #real*8 (A-H,O-Z)
# IMPLICIT #integer*8 (I-N)
COMMON/INPT/NGAS,NSTEP,NANISO,EFINAL,ESTEP,AKT,ARY,TEMPC,TORR,IPEN
COMMON/INPT1/NDVEC
COMMON/CNSTS1/CONST1,CONST2,CONST3,CONST4,CONST5
COMMON/SETP/TMAX,SMALL,API,ESTART,THETA,PHI,TCFMAX(10),TCFMAX1,RSTART,EFIELD,ETHRM,ECUT,NDELTA,IMIP,IWRITE
COMMON/BFLD/EOVB,WB,BTHETA,BMAG
COMMON/LARGE/CF(20000,512),EIN(512),TCF(20000),IARRY(512), RGAS(512),IPN(512),WPL(512),IZBR(512),IPLAST,PENFRA[3,512]
COMMON/LARGEN/CFN(20000,60),TCFN(20000),SCLENUL(60),NPLAST
COMMON/OUTPT/ICOLL(30),NETOT,NPRIME,TMAX1,TIME(300),NNULL, NITOT,ICOLN(512),ICOLNN(60),NREAL,NEXCTOT
COMMON/RLTVY/BET[2000],GAM(20000),VC,EMS
COMMON/STTS/XST(150000),YST(150000),ZST(150000),TST(150000),TTIME(150000),NFGF(150000),NFGPP(150000),NFGBR(150000),NELEC,NEGION,EST1,EST2
COMMON/STEXC/XSTEXC(150000),YSTEXC(150000),ZSTEXC(150000),TSTEXC(150000),NSTEXC
COMMON/STEXCNUL/XSTN(150000),YSTN(150000),ZSTN(150000),TSTN(150000),IDNUL(150000),NEXCNUL
COMMON/IONC/DOUBLE(6,20000),CMINIXSC[6],CMINEXSC[6],ECLOSS[6],WPLN[6],ICOUNT,AVPFRAC(3,6)
COMMON/IONFL/NC0(512),EC0(512),NG1(512),EG1(512),NG2(512),EG2(512),WKLM(512),DSTFL(512)
COMMON/IONMOD/ESPLIT(512,20),IONMODEL(512)
COMMON/ANIS/PSCT(20000,512),ANGCT(20000,512),INDEX(512),NISO
COMMON/CASRS/ECAS(400),XCAS(400),YCAS(400),ZCAS(400),DRXS(400),DRYS(400),DRZS(400),TT1(400),NFLGF(400),NFLGPP(400),IEVNTL
COMMON/COMP/LCMP,LCFLG,LRAY,LRFLG,LPAP,LPFLG,LBRM,LBFLG,LPEFLG
COMMON/BREMG/EBRGAM(10),BRDCOSX(10),BRDCOSY(10),BRDCOSZ[10],BRX(10),BRY(10),BRZ[10],BRT(10),EBRTOT[6],NBREM[6]
COMMON/CASRSB/ECASB[400],XCASB[400],YCASB[400],ZCASB[400],DRXB[400],DRYB[400],DRZB[400],TTB1(400),NFLGFB[400],NFLGPPB[400],IEVNTLB
COMMON/CASRSE/ECASE(400),XCASE(400),YCASE(400),ZCASE(400),DRXCE(400),DRYCE(400),DRZCE(400),TCASE(400),NFLGFE(400),NFLGPPE(400),IEVENTE
COMMON/ECASC/NEGAS(512),LEGAS(512),IESHELL(512),IECASC
COMMON/IDEXC/NGEXC1,NGEXC2,NGEXC3,NGEXC4,NGEXC5,NGEXC6,IDG1,IDG2,IDG3,IDG4,IDG5,IDG6
DIMENSION XS(150000),YS(150000),ZS(150000),TS(150000),ES(150000),DCX(150000),DCY(150000),DCZ[150000],NFLGFC(150000),NFLGPPC(150000),NFLGBRMC(150000)
DIMENSION TEMP(20000)
# -------------------------------------------------------------------
# RELATIVISTIC VERSION
# ELECTRIC FIELD ALONG Z-AXIS MAGNETIC FIELD ALONG X-AXIS.
# TRACKS DELTA ELECTRONS AND UPDATES ARRAYS CONTAINING POSITION AND
# TIME OF THERMALISED ELECTRONS.
# CALCULATES NUMBER OF PRODUCED ELECTRONS PER PRIMARY DELTA AND OTHER
# HIGHER FANO FACTORS .
# -------------------------------------------------------------------
# VARYING ENERGY STEPS
if(EFINAL <= 140000.):
:
ESTEP1=(EFINAL-16000.0)/float(4000)
else:
ESTEP1=20.0
ESTEP2=(EFINAL-92000.0)/float(4000)
# endif
NPRINT=0
J20000=20000
J300=300
API=numpy.arccos(-1.00)
SMALL=1.0D-20
EMAX=0.00
TMAX1=0.00
RDUM=RSTART
CONST9=CONST3*0.010
DO 25 I=1,300
25 TIME[I]=0.00
DO 26 I=1,30
26 ICOLL[I]=0
DO 27 I=1,512
27 ICOLN[I]=0
NREAL=0
NNULL=0
NETOT=0
NEXCTOT=0
NITOT=0
NMXADD=0
NTMPFLG=0
THETA1=THETA
PHI1=PHI
F4=2.00*API
NEOVFL=0
# CALCULATE MAXIMUM COLLISION FREQUENCY
TLIM=0.0
DO 111 J=1,20000
TEMP[J]=TCFN[J]+TCF[J]
if(TLIM < TEMP[J]:
) TLIM=TEMP[J]
111 CONTINUE
J1=0
# START OF PRIMARY EVENT LOOP
DO 210 J11=1,NDELTA
J1=J1+1
NPRIME=J1
NGEXC1=0
NGEXC2=0
NGEXC3=0
NGEXC4=0
NGEXC5=0
NGEXC6=0
# INITIAL DIRECTION COSINES
DCZ1=numpy.cos(THETA1)
DCX1=numpy.sin(THETA1)*numpy.cos(PHI1)
DCY1=numpy.sin(THETA1)*numpy.sin(PHI1)
NFLGFF=0
NFLGPPP=0
NFLGBRMM=0
NFLGHIGH=0
EST1=ESTART
# INITIAL VELOCITY,TIME AND POSITION
E1=ESTART
GAM1=(EMS+E1)/EMS
GAM12=GAM1
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.0D-12
# VTOT=CONST9*math.sqrt(E1)
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
X=0.00
Y=0.00
Z=0.00
K1=0
KEXC=0
NSTEXC=0
NEXCNUL=0
NCLUS=0
NELEC=0
NEGION=0
TLAST=0.00
ST=0.00
TDASH=0.00
if(IMIP == 2):
GO TO 1
if(IMIP > 2):
:
# READ IN FIRST ELECTRON FROM BETA DECAY OR XRAY UNTHERMALISED CLUSTERS
CALL CASRES(J11,IBADTOT,IBAD1)
# SKIP IF BAD EVENT
if(IBAD1 == 1):
:
J1=J1-1
GO TO 210
# endif
else if(IMIP == 1) :
# READ IN FIRST ELECTRON FROM MIP INTERACTION
CALL CASREM(J11)
EST1=ECAS[1]
EST2=EST1
# endif
X=XCAS[1]
Y=YCAS[1]
Z=ZCAS[1]
ST=TT1[1]
TS[1]=TT1[1]
E1=ECAS[1]
DCZ1=DRZS[1]
DCY1=DRYS[1]
DCX1=DRXS[1]
NFLGFF=NFLGF[1]
NFLGPPP=NFLGPP[1]
NFLGBRMM=0
NFLGHIGH=NFLGFF
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=VC*BET1*1.0D-12
# VTOT=CONST9*math.sqrt(E1)
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
# PUT REMAINDER OF ELECTRONS INTO CLUSTER STORE
ISDUM=0
DO 35 IST=2,IEVNTL
ISDUM=ISDUM+1
XS[ISDUM]=XCAS[IST]
YS[ISDUM]=YCAS[IST]
ZS[ISDUM]=ZCAS[IST]
TS[ISDUM]=TT1[IST]
ES[ISDUM]=ECAS[IST]
DCX[ISDUM]=DRXS[IST]
DCY[ISDUM]=DRYS[IST]
DCZ[ISDUM]=DRZS[IST]
NFLGFC[ISDUM]=NFLGF[IST]
NFLGPPC[ISDUM]=NFLGPP[IST]
NFLGBRMC[ISDUM]=0
NCLUS=ISDUM
if(NFLGFC[IST]:
> NFLGHIGH) NFLGHIGH=NFLGFC[IST]
35 CONTINUE
GAM12=GAM1
# START OF LOOP FOR NEWLY CREATED ELECTRONS
1 CONTINUE
R1=DRAND48(RDUM)
T=-math.log(R1)/TLIM+TDASH
TDASH=T
WBT=WB*T/GAM12
# WBT=WB*T
COSWT=numpy.cos(WBT)
SINWT=numpy.sin(WBT)
DZ=GAM12*(CZ1*SINWT+(EOVB-CY1)*(1.00-COSWT))/WB
# DZ=(CZ1*SINWT+(EOVB-CY1)*(1.00-COSWT))/WB
E=E1+DZ*EFIELD*100.00
GAM2=(EMS+E)/EMS
BET2=math.sqrt(1.00-1.00/(GAM2*GAM2))
#913 print(3X,' AFTER STORE NREAL=',I10,' DZ=','%.3f' %,'E1=','%.3f' %,' COS
# /WT=','%.3f' %,' SINWT=','%.3f' %,' WBT=','%.3f' %,' CY1=','%.3f' %)
if(E < 0.00):
:
# IF(NPRINT == 0) WRITE(6,913)NREAL,DZ,E1,COSWT,SINWT,WBT,CY1
# NPRINT=1
E=0.0010
# endif
# INSERT NEW ALGORITHM TO FIND IE FOR VARYING ENERGY STEP
if(IMIP == 1):
:
IE=int(E/ESTEP)+1
else:
if(EFINAL <= 20000.):
:
IE=int(E/ESTEP)+1
else if(EFINAL <= 140000.) :
if(E <= 16000.):
:
IE=int(E)+1
else:
IE=16000+int((E-16000.)/ESTEP1)
# endif
else:
if(E <= 12000.):
:
IE=int(E)+1
else if(E <= 92000.) :
IE=12000+int((E-12000.)/ESTEP1)
else:
IE=16000+int((E-92000.)/ESTEP2)
# endif
# endif
# endif
IE=DMIN0(IE,J20000)
#
# TEST FOR #real OR NULL COLLISION
#
R5=DRAND48(RDUM)
TEST1=TCF[IE]/TLIM
if(R5 <= TEST1):
GO TO 137
NNULL=NNULL+1
TEST2=TEMP[IE]/TLIM
if(R5 < TEST2):
:
# TEST FOR NULL LEVELS
if(NPLAST == 0):
GO TO 1
R2=DRAND48(RDUM)
I=0
888 I=I+1
if(CFN[IE][I]:
< R2) GO TO 888
# INCREMENT NULL LEVEL SUM
NEXCNUL=NEXCNUL+1
ICOLNN[I]=ICOLNN[I]+1
# STORE X Y Z T ID FOR MOLECULAR LIGHT EMISSION AND DISSOCIATION FROM
# NULL EXCITATION
# NOTE: SMALL APPROX USED POSITION OF PREVIOUS #real COLLISION
XSTN[NEXCNUL]=X
YSTN[NEXCNUL]=Y
ZSTN[NEXCNUL]=Z
TSTN[NEXCNUL]=ST
IDNUL[NEXCNUL]=I
GO TO 1
else:
# NULL
GO TO 1
# endif
#
# CALCULATE DIRECTION COSINES AND POSITIONS AT INSTANT BEFORE COLLISION
137 T2=T*T
if(E > EMAX):
EMAX=E
if(T > TMAX1):
TMAX1=T
TDASH=0.00
NREAL=NREAL+1
# CALC VELOCITY
CX2=CX1
CY2=(CY1-EOVB)*COSWT+CZ1*SINWT+EOVB
CZ2=CZ1*COSWT-(CY1-EOVB)*SINWT
# CALC DIRECTION COSINES
VTOT=math.sqrt(CX2*CX2+CY2*CY2+CZ2*CZ2)
DCX2=CX2/VTOT
DCY2=CY2/VTOT
DCZ2=CZ2/VTOT
# CALC NEW POSITION
X=X+CX1*T
Y=Y+EOVB*T+GAM12*((CY1-EOVB)*SINWT+CZ1*(1.00-COSWT))/WB
Z=Z+DZ
GAM12=(GAM1+GAM2)/2.00
ST=ST+T
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
# ---------------------------------------------------------------------
# DETERMINATION OF #real COLLISION TYPE
# ---------------------------------------------------------------------
R2=DRAND48(RDUM)
I=0
140 I=I+1
if(CF[IE][I]:
< R2) GO TO 140
#************************************************************
# CHECK IF BREMSSTRAHLUNG
if(IZBR[I]:
!= 0 and LBRM == 1) :
NFLGBRMM=1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
DO 141 KNGS=1,NGAS
if(IPT == (KNGS*5):
-1) GO TO 142
141 CONTINUE
142 IATOMNO=IZBR[I]
CALL BREMS(IATOMNO,E,DCX2,DCY2,DCZ2,EOUT,EDCX,EDCY,EDCZ,EGAMMA,GDCX,GDCY,GDCZ)
NBREM[KNGS]=NBREM[KNGS]+1
EBRTOT[KNGS]=EBRTOT[KNGS]+EGAMMA
# WRITE(6,668) EGAMMA,J11
# 668 print(' BREM EGAMMA=','%.4f' % ,' EVENT NO=',I5)
# GET NEW DRCOS DRCOSY DRCOSX AND ENERGY OF ELECTRON
E1=EOUT
DCX1=EDCX
DCY1=EDCY
DCZ1=EDCZ
# RUN BREMSSTRAHLUNG GAMMA THROUGH CASCADE : STORE CONVERTED
# ELECTRONS IN COMMON/CASRSB/
#
CALL BREMSCASC(J11,EGAMMA,X,Y,Z,ST,GDCX,GDCY,GDCZ,ILOW)
# BREMSSTRAHLUNG ENERGY TOO LOW TO IONISE
if(ILOW == 1):
GO TO 190
#
# STORE BREMSSTARHLUNG DATA IN CLUSTER STORE
#
DO 890 KBR=1,IEVNTLB
NCLUS=NCLUS+1
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
sys.exit()
# endif
ES[NCLUS]=ECASB[KBR]
XS[NCLUS]=XCASB[KBR]
YS[NCLUS]=YCASB[KBR]
ZS[NCLUS]=ZCASB[KBR]
TS[NCLUS]=TTB1[KBR]
DCX[NCLUS]=DRXB[KBR]
DCY[NCLUS]=DRYB[KBR]
DCZ[NCLUS]=DRZB[KBR]
NFLGFC[NCLUS]=NFLGFB[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPB[KBR]
NFLGBRMC[NCLUS]=2
890 CONTINUE
if(NFLGFC[NCLUS]:
> NFLGHIGH) NFLGHIGH=NFLGFC[NCLUS]
GO TO 190
# endif
891 CONTINUE
#****************************************************************
# S1=RGAS[I]
S1=1.00+GAM2*(RGAS[I]-1.00)
EI=EIN[I]
if(E < EI):
:
EI=E-0.00010
# endif
if(IPN[I]:
== 0) GO TO 666
# ATTACHMENT
if(IPN[I]:
== -1) :
NETOT=NETOT+1
NITOT=NITOT+1
NELEC=NELEC+1
NEGION=NEGION+1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
GO TO 335
# endif
EISTR=EI
if(IONMODEL[I]:
> 0) :
# CALCULATE SECONDARY ENERGY,ESEC,IN IONISATION COLLISION USING
# FIVE DIFFERENT MODELS
CALL IONSPLIT(I,E,EI,ESEC)
GO TO 544
# endif
R9=DRAND48(RDUM)
# USE OPAL PETERSON AND BEATY SPLITTING FACTOR.
ESEC=WPL[I]*TAN(R9*ATAN((E-EI)/(2.00*WPL[I])))
ESEC=WPL[I]*(ESEC/WPL[I])**0.9524
544 CONTINUE
EI=ESEC+EI
# STORE POSITION ,ENERGY, DIRECTION COSINES AND TIME OF GENERATION
# OF SECONDARY IONISATION ELECTRON
NCLUS=NCLUS+1
NMXADD=MAX[NCLUS][NMXADD]
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
546 print(2X,' def STOPPED: . NCLUS=',I7,' NREAL=',I10)
sys.exit()
# endif
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
ES[NCLUS]=ESEC
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
NTMPFLG=1
NCLTMP=NCLUS
# RANDOMISE SECONDARY ELECTRON DIRECTION
# R3=drand48(RDUM)
# F3=1.0-2.00*R3
# THETA0=DACOS(F3)
# F6=DCOS(THETA0)
# F5=DSIN(THETA0)
# R4=drand48(RDUM)
# PHI0=F4*R4
# F8=DSIN(PHI0)
# F9=DCOS(PHI0)
# DCX[NCLUS]=F9*F5
# DCY[NCLUS]=F8*F5
# DCZ[NCLUS]=F6
#*********************************************************
if(IECASC == 0):
GO TO 333
if(LEGAS[I]:
== 0) GO TO 333
# USE COMPLETE CASCADE FOR ELECTRON IONISATION
KG1=NEGAS[I]
LG1=LEGAS[I]
IGSHEL=IESHELL[I]
CALL CASCADEE(J11,KG1,LG1,X,Y,Z,ST,ESEC,IGSHEL)
#
# STORE CASCADE IN CLUSTER STORE
#
ETSUM=0.0
DO 844 KBR=1,IEVENTE
NCLUS=NCLUS+1
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
sys.exit()
# endif
ES[NCLUS]=ECASE[KBR]
ETSUM=ETSUM+ES[NCLUS]
XS[NCLUS]=XCASE[KBR]
YS[NCLUS]=YCASE[KBR]
ZS[NCLUS]=ZCASE[KBR]
TS[NCLUS]=TCASE[KBR]
DCX[NCLUS]=DRXCE[KBR]
DCY[NCLUS]=DRYCE[KBR]
DCZ[NCLUS]=DRZCE[KBR]
NFLGFC[NCLUS]=NFLGFE[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPE[KBR]
NFLGBRMC[NCLUS]=NFLGBRMM
844 CONTINUE
if(NFLGFC[NCLUS]:
> NFLGHIGH) NFLGHIGH=NFLGFC[NCLUS]
GO TO 666
#*********************************************************
# STORE POSSIBLE SHELL EMISSIONS AUGER OR FLUORESCENCE
333 if(EISTR > 30.0) :
# TEST IF FLUORESCENCE EMISSION
IFLTST=0:
if(WKLM[I]:
> 0.0) :
R9=DRAND48(RDUM)
if(R9 < WKLM[I]:
) IFLTST=1
# endif
if(IFLTST == 0):
:
# AUGER EMISSION WITHOUT FLUORESCENCE
NAUG=NC0[I]
EAVAUG=EC0[I]/float(NAUG)
DO 700 JFL=1,NC0[I]
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
700 CONTINUE
else:
# AUGER EMISSION AND FLUORESENCE
if(NG2[I]:
== 0) GO TO 702
NAUG=NG2[I]
EAVAUG=EG2[I]/float(NAUG)
DO 701 JFL=1,NG2[I]
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
701 CONTINUE
702 if(NG1[I] == 0) GO TO 704
NAUG=NG1[I]
EAVAUG=EG1[I]/float(NAUG)
R9=DRAND48(RDUM)
DFL=-math.log(R9)*DSTFL[I]
DO 703 JFL=1,NG1[I]
NCLUS=NCLUS+1
R3=DRAND48(RDUM)
THEFL=numpy.arccos(1.0-2.00*R3)
R4=DRAND48(RDUM)
PHIFL=F4*R4
XS[NCLUS]=X+DFL*numpy.sin(THEFL)*numpy.cos(PHIFL)
YS[NCLUS]=Y+DFL*numpy.sin(THEFL)*numpy.sin(PHIFL)
ZS[NCLUS]=Z+DFL*numpy.cos(THEFL)
NFLGFC[NCLUS]=NFLGHIGH+1
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
NFLGHIGH=NFLGFC[NCLUS]
703 CONTINUE
704 CONTINUE
# endif
# endif
#
# GENERATE SCATTERING ANGLES AND UPDATE LABORATORY COSINES AFTER
# COLLISION ALSO UPDATE ENERGY OF ELECTRON.
#
666 IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
# IF EXCITATION : ADD PROBABILITY ,PENFRA(1,I), OF TRANSFER TO GIVE
# IONISATION OF THE OTHER GASES IN MIXTURE
if(IPEN == 0 or NGAS == 1):
GO TO 5
if(PENFRA[1][I] != 0.0):
:
RAN=DRAND48(RDUM)
if(RAN > PENFRA[1][I]):
GO TO 5
NCLUS=NCLUS+1
# ENTER HERE POSSIBLE DELOCALISATION LENGTH FOR PENNING TRANSFER
if(PENFRA[2][I] == 0.0):
:
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
GO TO 667
# endif
ASIGN=1.0
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
XS[NCLUS]=X-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
YS[NCLUS]=Y-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
ZS[NCLUS]=Z-math.log(RAN)*PENFRA[2][I]*ASIGN
667 RAN=DRAND48(RDUM)
TS[NCLUS]=ST-math.log(RAN)*PENFRA[3][I]
# ASSIGN EXCESS ENERGY OF 1EV TO PENNING CREATED ELECTRON
ES[NCLUS]=1.0
DCX[NCLUS]=DCX1
DCY[NCLUS]=DCY1
DCZ[NCLUS]=DCZ1
GO TO 6
# endif
# GO TO 6
# CALCULATE SUM OF EXCITATION PER CLUSTER AND STORE EXCITATION X Y Z T
5 if(IPN[I] == 0) :
if((RGAS[I]:
*EIN[I]) > 4.0) :
KEXC=KEXC+1
if(KEXC > 150000):
:
WRITE(6,548) KEXC
548 print(2X,' def STOPPED: . KEXC=',I7)
sys.exit()
# endif
# FIND GAS IN WHICH EXCITATION OCCURED AND INCREMENT COUNTER
if(I <= IDG1):
:
NGEXC1=NGEXC1+1
else if(I <= IDG2) :
NGEXC2=NGEXC2+1
else if(I <= IDG3) :
NGEXC3=NGEXC3+1
else if(I <= IDG4) :
NGEXC4=NGEXC4+1
else if(I <= IDG5) :
NGEXC5=NGEXC5+1
else if(I <= IDG6) :
NGEXC6=NGEXC6+1
else:
WRITE(6,9911)
9911 print(' def STOPPED: BAD GAS ID IN MONTE')
sys.exit()
# endif
NEXCTOT=NEXCTOT+1
NSTEXC=NSTEXC+1
XSTEXC[KEXC]=X
YSTEXC[KEXC]=Y
ZSTEXC[KEXC]=Z
TSTEXC[KEXC]=ST
# endif
# endif
6 S2=(S1*S1)/(S1-1.00)
# ANISOTROPIC SCATTERING
R3=DRAND48(RDUM)
if(INDEX[I]:
== 1) :
R31=DRAND48(RDUM)
F3=1.00-R3*ANGCT[IE][I]
if(R31 > PSCT[IE][I]:
) F3=-F3
else if(INDEX[I] == 2) :
EPSI=PSCT[IE][I]
F3=1.00-(2.00*R3*(1.00-EPSI)/(1.00+EPSI*(1.00-2.00*R3)))
else:
# ISOTROPIC SCATTERING
F3=1.00-2.00*R3
# endif
THETA0=numpy.arccos(F3)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
if(E < EI):
EI=0.00
ARG1=1.00-S1*EI/E
ARG1=DMAX1[ARG1][SMALL]
D=1.00-F3*math.sqrt(ARG1)
E1=E*(1.00-EI/(S1*E)-2.00*D/S2)
E1=DMAX1[E1][SMALL]
Q=math.sqrt((E/E1)*ARG1)/S1
Q=DMIN1[Q][1.00]
THETA=numpy.arcsin(Q*numpy.sin(THETA0))
F6=numpy.cos(THETA)
U=(S1-1.00)*(S1-1.00)/ARG1
CSQD=F3*F3
if(F3 < 0.00 and CSQD > U):
F6=-1.00*F6
F5=numpy.sin(THETA)
DCZ2=DMIN1[DCZ2][1.00]
ARGZ=math.sqrt(DCX2*DCX2+DCY2*DCY2)
if(ARGZ == 0.00):
:
DCZ1=F6
DCX1=F9*F5
DCY1=F8*F5
if(NTMPFLG == 1):
:
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S > 1.0):
F5S=1.0
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=F6S
DCX[NCLTMP]=F9S*F5S
DCY[NCLTMP]=F8S*F5S
NTMPFLG=0
# endif
GO TO 190
# endif
DCZ1=DCZ2*F6+ARGZ*F5*F8
DCY1=DCY2*F6+(F5/ARGZ)*(DCX2*F9-DCY2*DCZ2*F8)
DCX1=DCX2*F6-(F5/ARGZ)*(DCY2*F9+DCX2*DCZ2*F8)
if(NTMPFLG == 1):
:
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S > 1.0):
F5S=1.0
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=DCZ2*F6S+ARGZ*F5S*F8S
DCY[NCLTMP]=DCY2*F6S+(F5S/ARGZ)*(DCX2*F9S-DCY2*DCZ2*F8S)
DCX[NCLTMP]=DCX2*F6S-(F5S/ARGZ)*(DCY2*F9S+DCX2*DCZ2*F8S)
NTMPFLG=0
# endif
190 CONTINUE
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.D-12
# VTOT=CONST9*math.sqrt(E1)
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
# TEST IF ELECTRON IS THERMALISED
if(E1 > ETHRM):
GO TO 1
# STORE POSITION AND TIME OF THERMALISED ELECTRON
191 CONTINUE
K1=K1+1
XST[K1]=X
YST[K1]=Y
ZST[K1]=Z
TST[K1]=ST
NFGF[K1]=NFLGFF
NFGPP[K1]=NFLGPPP
NFGBR[K1]=NFLGBRMM
TTIME[K1]=ST-TLAST
NELEC=NELEC+1
NETOT=NETOT+1
335 if(K1 == 150000) GO TO 889
if(NELEC == (NCLUS+1):
) :
# LAST ELECTRON IN CLUSTER , DO STATISTICS ON CLUSTER
CALL STATS(J11,J1)
GO TO 210
# endif
# GET NEW IONISATION ELECTRON FROM STORE
X=XS[NELEC]
Y=YS[NELEC]
Z=ZS[NELEC]
ST=TS[NELEC]
NFLGFF=NFLGFC[NELEC]
NFLGPPP=NFLGPPC[NELEC]
NFLGBRMM=NFLGBRMC[NELEC]
TLAST=TS[NELEC]
E1=ES[NELEC]
DCX1=DCX[NELEC]
DCY1=DCY[NELEC]
DCZ1=DCZ[NELEC]
if(E1 < ETHRM):
GO TO 191
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.D-12
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
GO TO 1
# MAIN LOOP # end
210 CONTINUE
# RESET NUMBER OF EVENTS FOR BAD EVENTS
if(IMIP > 2):
NDELTA=NDELTA-IBADTOT
#
WRITE(6,887) EMAX,NEOVFL
887 print(' EMAX=','%.7f' % ,' NEOVFL =',I5)
if(EMAX > EFINAL):
:
WRITE(6,989) EFINAL,EMAX
989 print('INCREASE ENERGY LIMIT FROM','%.6f' % ,' EV TO AT LEAST','%.6f' % ,' EV.')
sys.exit()
# endif
return
889 NLEFT=NCLUS-NELEC
WRITE(6,992) NPRIME,NLEFT,NCLUS
992 print(3(/),' WARNING STOPPED: AFTER NPRIME=',I6,' LAST PRIMARY HAS AT LEAST ',I6,' SECONDARIES LEFT TO TRACK,OUT OF ',I6,' ELECTRONS ALREADY IN CLUSTER')
sys.exit()
return
# end
def MONTEFC():
# IMPLICIT #real*8 (A-H,O-Z)
# IMPLICIT #integer*8 (I-N)
COMMON/INPT/NGAS,NSTEP,NANISO,EFINAL,ESTEP,AKT,ARY,TEMPC,TORR,IPEN
COMMON/INPT1/NDVEC
COMMON/CNSTS1/CONST1,CONST2,CONST3,CONST4,CONST5
COMMON/SETP/TMAX,SMALL,API,ESTART,THETA,PHI,TCFMAX(10),TCFMAX1,RSTART,EFIELD,ETHRM,ECUT,NDELTA,IMIP,IWRITE
COMMON/BFLD/EOVB,WB,BTHETA,BMAG
COMMON/LARGE/CF(20000,512),EIN(512),TCF(20000),IARRY(512), RGAS(512),IPN(512),WPL(512),IZBR(512),IPLAST,PENFRA[3,512]
COMMON/LARGEN/CFN(20000,60),TCFN(20000),SCLENUL(60),NPLAST
COMMON/OUTPT/ICOLL(30),NETOT,NPRIME,TMAX1,TIME(300),NNULL, NITOT,ICOLN(512),ICOLNN(60),NREAL,NEXCTOT
COMMON/RLTVY/BET[2000],GAM(20000),VC,EMS
COMMON/STTS/XST(150000),YST(150000),ZST(150000),TST(150000),TTIME(150000),NFGF(150000),NFGPP(150000),NFGBR(150000),NELEC,NEGION,EST1,EST2
COMMON/STEXC/XSTEXC(150000),YSTEXC(150000),ZSTEXC(150000),TSTEXC(150000),NSTEXC
COMMON/STEXCNUL/XSTN(150000),YSTN(150000),ZSTN(150000),TSTN(150000),IDNUL(150000),NEXCNUL
COMMON/IONC/DOUBLE(6,20000),CMINIXSC[6],CMINEXSC[6],ECLOSS[6],WPLN[6],ICOUNT,AVPFRAC(3,6)
COMMON/IONFL/NC0(512),EC0(512),NG1(512),EG1(512),NG2(512),EG2(512),WKLM(512),DSTFL(512)
COMMON/IONMOD/ESPLIT(512,20),IONMODEL(512)
COMMON/ANIS/PSCT(20000,512),ANGCT(20000,512),INDEX(512),NISO
COMMON/CASRS/ECAS(400),XCAS(400),YCAS(400),ZCAS(400),DRXS(400),DRYS(400),DRZS(400),TT1(400),NFLGF(400),NFLGPP(400),IEVNTL
COMMON/COMP/LCMP,LCFLG,LRAY,LRFLG,LPAP,LPFLG,LBRM,LBFLG,LPEFLG
COMMON/BREMG/EBRGAM(10),BRDCOSX(10),BRDCOSY(10),BRDCOSZ[10],BRX(10),BRY(10),BRZ[10],BRT(10),EBRTOT[6],NBREM[6]
COMMON/CASRSB/ECASB[400],XCASB[400],YCASB[400],ZCASB[400],DRXB[400],DRYB[400],DRZB[400],TTB1(400),NFLGFB[400],NFLGPPB[400],IEVNTLB
COMMON/CASRSE/ECASE(400),XCASE(400),YCASE(400),ZCASE(400),DRXCE(400),DRYCE(400),DRZCE(400),TCASE(400),NFLGFE(400),NFLGPPE(400),IEVENTE
COMMON/ECASC/NEGAS(512),LEGAS(512),IESHELL(512),IECASC
COMMON/IDEXC/NGEXC1,NGEXC2,NGEXC3,NGEXC4,NGEXC5,NGEXC6,IDG1,IDG2,IDG3,IDG4,IDG5,IDG6
DIMENSION XS(150000),YS(150000),ZS(150000),TS(150000),ES(150000),DCX(150000),DCY(150000),DCZ[150000],NFLGFC(150000),NFLGPPC(150000),NFLGBRMC(150000)
DIMENSION TEMP(20000)
# -------------------------------------------------------------------
# RELATIVISTIC VERSION
# CALCULATES COLLISION EVENTS AND UPDATES DIFFUSION AND VELOCITY.
# THIS ROUTINE HANDLES TERMINATIONS AT FIXED DRIFT TIMES.
# SOLVES MOTION IN COORDINATE SYSTEM WITH BFIELD ALIGNED TO X-AXIS
# ELECTRIC FIELD AT AN ANGLE BTHETA IN THE X-Z PLANE.
# THE RESULTS FOR THE VELOCITY VECTORS ARE :
# ROTATED INTO THE STANDARD COORDINATE FRAME WITH THE ELECTRIC FIELD
# ALONG THE Z-AXIS AND THE BFIELD AT AN ANGLE BTHETA TO THE ELECTRIC
# FIELD IN THE X-Z PLANE
# -------------------------------------------------------------------
# VARYING ENERGY STEPS
if(EFINAL <= 140000.):
:
ESTEP1=(EFINAL-16000.0)/float(4000)
else:
ESTEP1=20.0
ESTEP2=(EFINAL-92000.0)/float(4000)
# endif
NPRINT=0
J20000=20000
J300=300
API=numpy.arccos(-1.00)
SMALL=1.0D-20
EMAX=0.00
TMAX1=0.00
RDUM=RSTART
CONST9=CONST3*0.010
DO 25 I=1,300
25 TIME[I]=0.00
DO 26 I=1,30
26 ICOLL[I]=0
DO 27 I=1,512
27 ICOLN[I]=0
NREAL=0
NNULL=0
NETOT=0
NEXCTOT=0
NITOT=0
NMXADD=0
NTMPFLG=0
# CALC ROTATION MATRIX ANGLES
RCS=numpy.cos((BTHETA-90.00)*API/180.00)
RSN=numpy.sin((BTHETA-90.00)*API/180.00)
#
RTHETA=BTHETA*API/180.00
EFZ100=EFIELD*100.00*numpy.sin(RTHETA)
EFX100=EFIELD*100.00*numpy.cos(RTHETA)
F1=EFIELD*CONST2*numpy.cos(RTHETA)
F4=2.00*API
EOVBR=EOVB*numpy.sin(RTHETA)
THETA1=THETA
PHI1=PHI
# CALCULATE MAXIMUM COLLISION FREQUENCY
TLIM=0.0
DO 111 J=1,20000
TEMP[J]=TCFN[J]+TCF[J]
if(TLIM < TEMP[J]:
) TLIM=TEMP[J]
111 CONTINUE
NEOVFL=0
J1=0
# START OF PRIMARY EVENT LOOP
DO 210 J11=1,NDELTA
J1=J1+1
NPRIME=J1
NGEXC1=0
NGEXC2=0
NGEXC3=0
NGEXC4=0
NGEXC5=0
NGEXC6=0
# INITIAL DIRECTION COSINES
if(THETA1 == (API/2.0):
or NDVEC != 1) :
# ONLY ALLOW CASE WHERE DELTA IS ALONG E-FIELD DIRECTION
WRITE(6,22)
22 print(2(/),3X,'def STOPPED: ONLY ALLOWED TO HAVE DELTA ELECTRON PRALLEL TO E-FIELD IN CASE WITH ARBITRARY ANGLE FOR B-FIELD')
sys.exit()
# endif
# FIX DELTA TO E - FIELD DIRECTION
PHI1=0.00
THETA1=(API/2.0)-RTHETA
DCZ1=numpy.cos(THETA1)
DCX1=numpy.sin(THETA1)*numpy.cos(PHI1)
DCY1=numpy.sin(THETA1)*numpy.sin(PHI1)
NFLGFF=0
NFLGPPP=0
NFLGBRMM=0
NFLGHIGH=0
EST1=ESTART
# INITIAL VELOCITY
E1=ESTART
GAM1=(EMS+E1)/EMS
GAM12=GAM1
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.0D-12
# VTOT=CONST9*math.sqrt(E1)
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
X=0.00
Y=0.00
Z=0.00
K1=0
KEXC=0
NSTEXC=0
NEXCNUL=0
NCLUS=0
NELEC=0
NEGION=0
TLAST=0.00
ST=0.00
TDASH=0.00
if(IMIP == 2):
GO TO 1
if(IMIP > 2):
:
# READIN FIRST ELECTRON FROM BETA DECAY OR X-RAY UNTHERMALISED CLUSTERS
CALL CASRES(J11,IBADTOT,IBAD1)
# SKIP BAD EVENT
if(IBAD1 == 1):
:
J1=J1-1
GO TO 210
# endif
else if(IMIP == 1) :
# READ IN FIRST ELECTRON FROM MIP INTERACTION
CALL CASREM(J11)
EST1=ECAS[1]
EST2=EST1
# endif
X=XCAS[1]
Y=YCAS[1]
Z=ZCAS[1]
ST=TT1[1]
TS[1]=TT1[1]
E1=ECAS[1]
DCZ1=DRZS[1]
DCY1=DRYS[1]
DCX1=DRXS[1]
NFLGFF=NFLGF[1]
NFLGPPP=NFLGPP[1]
NFLGBRMM=0
NFLGHIGH=NFLGFF
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.0D-12
# VTOT=CONST9*math.sqrt(E1)
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
# PUT REMAINDER OF ELECTRONS INTO CLUSTER STORE
ISDUM=0
DO 35 IST=2,IEVNTL
ISDUM=ISDUM+1
XS[ISDUM]=XCAS[IST]
YS[ISDUM]=YCAS[IST]
ZS[ISDUM]=ZCAS[IST]
TS[ISDUM]=TT1[IST]
ES[ISDUM]=ECAS[IST]
DCX[ISDUM]=DRXS[IST]
DCY[ISDUM]=DRYS[IST]
DCZ[ISDUM]=DRZS[IST]
NFLGFC[ISDUM]=NFLGF[IST]
NFLGPPC[ISDUM]=NFLGPP[IST]
NFLGBRMC[ISDUM]=0
NCLUS=ISDUM
if(NFLGFC[IST]:
> NFLGHIGH) NFLGHIGH=NFLGFC[IST]
35 CONTINUE
GAM12=GAM1
# START OF LOOP FOR NEW ELECTRONS
1 CONTINUE
R1=DRAND48(RDUM)
T=-math.log(R1)/TLIM+TDASH
TDASH=T
WBT=WB*T/GAM12
# WBT=WB*T
COSWT=numpy.cos(WBT)
SINWT=numpy.sin(WBT)
DZ=GAM12*(CZ1*SINWT+(EOVBR-CY1)*(1.00-COSWT))/WB
# DZ=(CZ1*SINWT+(EOVBR-CY1)*(1.00-COSWT))/WB
DX=CX1*T+F1*T*T/GAM12
# DX=CX1*T+F1*T*T
E=E1+DZ*EFZ100+DX*EFX100
GAM2=(EMS+E)/EMS
BET2=math.sqrt(1.00-1.00/(GAM2+GAM2))
if(E < 0.00):
:
E=0.0010
# endif
# INSERT NEW ALGORITHM TO FIND IE FOR VARYING ENERGY STEP
if(IMIP == 1):
:
IE=int(E/ESTEP)+1
else:
if(EFINAL <= 20000.):
:
IE=int(E/ESTEP)+1
else if(EFINAL <= 140000.) :
if(E <= 16000.):
:
IE=int(E)+1
else:
IE=16000+int((E-16000.)/ESTEP1)
# endif
else:
if(E <= 12000.):
:
IE=int(E)+1
else if(E <= 92000.) :
IE=12000+int((E-12000.)/ESTEP1)
else:
IE=16000+int((E-92000.)/ESTEP2)
# endif
# endif
# endif
IE=DMIN0(IE,J20000)
#
# TEST FOR #real OR NULL COLLISION
#
R5=DRAND48(RDUM)
TEST1=TCF[IE]/TLIM
if(R5 <= TEST1):
GO TO 137
NNULL=NNULL+1
TEST2=TEMP[IE]/TLIM
if(R5 < TEST2):
:
# TEST FOR NULL LEVELS
if(NPLAST == 0):
GO TO 1
R2=DRAND48(RDUM)
I=0
888 I=I+1
if(CFN[IE][I]:
< R2) GOTO 888
# INCREMENT NULL LEVEL SUM
NEXCNUL=NEXCNUL+1
ICOLNN[I]=ICOLNN[I]+1
# STORE X Y Z T ID FOR MOLECULAR LIGHT EMISSION AND DISSOCIATION FROM
# NULL EXCITATION
# NOTE: SMALL APPROX USED POSITION OF PREVIOUS #real COLLISION
XSTN[NEXCNUL]=X
YSTN[NEXCNUL]=Y
ZSTN[NEXCNUL]=Z
TSTN[NEXCNUL]=ST
IDNUL[NEXCNUL]=I
GO TO 1
else:
# NULL
GO TO 1
# endif
#
# CALCULATE DIRECTION COSINES AND POSITIONS AT INSTANT BEFORE COLLISION
137 T2=T*T
if(E > EMAX):
EMAX=E
if(T > TMAX1):
TMAX1=T
TDASH=0.00
NREAL=NREAL+1
# CALC VELOCITY
# CX2=CX1+2.0*F1*T
CX2=CX1+2.0*F1*T/GAM12
CY2=(CY1-EOVBR)*COSWT+CZ1*SINWT+EOVBR
CZ2=CZ1*COSWT-(CY1-EOVBR)*SINWT
# CALC DIRECTION COSINES
VTOT=math.sqrt(CX2*CX2+CY2*CY2+CZ2*CZ2)
DCX2=CX2/VTOT
DCY2=CY2/VTOT
DCZ2=CZ2/VTOT
# CALC NEW POSITION
X=X+DX
Y=Y+EOVBR*T+GAM12*((CY1-EOVBR)*SINWT+CZ1*(1.00-COSWT))/WB
# Y=Y+EOVBR*T+((CY1-EOVBR)*SINWT+CZ1*(1.00-COSWT))/WB
Z=Z+DZ
GAM12=(GAM1+GAM2)/2.00
ST=ST+T
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
# ---------------------------------------------------------------------
# DETERMINATION OF #real COLLISION TYPE
# ---------------------------------------------------------------------
R2=DRAND48(RDUM)
I=0
140 I=I+1
if(CF[IE][I]:
< R2) GO TO 140
#************************************************************
# CHECK IF BREMSSTRAHLUNG
if(IZBR[I]:
!= 0 and LBRM == 1) :
NFLGBRMM=1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
DO 141 KNGS=1,NGAS
if(IPT == (KNGS*5):
-1) GO TO 142
141 CONTINUE
142 IATOMNO=IZBR[I]
CALL BREMS(IATOMNO,E,DCX2,DCY2,DCZ2,EOUT,EDCX,EDCY,EDCZ,EGAMMA,GDCX,GDCY,GDCZ)
NBREM[KNGS]=NBREM[KNGS]+1
EBRTOT[KNGS]=EBRTOT[KNGS]+EGAMMA
# WRITE(6,668) EGAMMA,J11
# 668 print(' BREM EGAMMA=','%.4f' % ,' EVENT NO=',I5)
# GET NEW DRCOS DRCOSY DRCOSX AND ENERGY OF ELECTRON
E1=EOUT
DCX1=EDCX
DCY1=EDCY
DCZ1=EDCZ
# RUN BREMSSTRAHLUNG GAMMA THROUGH CASCADE : STORE CONVERTED
# ELECTRONS IN COMMON/CASRSB/
#
CALL BREMSCASC(J11,EGAMMA,X,Y,Z,ST,GDCX,GDCY,GDCZ,ILOW)
# BREMSSTRAHLUNG ENERGY TOO LOW TO IONISE
if(ILOW == 1):
GO TO 190
#
# STORE BREMSSTRAHLUNG DATA IN CLUSTER STORE
DO 890 KBR=1,IEVNTLB
NCLUS=NCLUS+1
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
sys.exit()
# endif
ES[NCLUS]=ECASB[KBR]
XS[NCLUS]=XCASB[KBR]
YS[NCLUS]=YCASB[KBR]
ZS[NCLUS]=ZCASB[KBR]
TS[NCLUS]=TTB1[KBR]
DCX[NCLUS]=DRXB[KBR]
DCY[NCLUS]=DRYB[KBR]
DCZ[NCLUS]=DRZB[KBR]
NFLGFC[NCLUS]=NFLGFB[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPB[KBR]
NFLGBRMC[NCLUS]=2
890 CONTINUE
if(NFLGFC[NCLUS]:
> NFLGHIGH) NFLGHIGH=NFLGFC[NCLUS]
GO TO 190
# endif
891 CONTINUE
#****************************************************************
# S1=RGAS[I]
S1=1.00+GAM2*(RGAS[I]-1.00)
EI=EIN[I]
if(E < EI):
:
EI=E-0.00010
# endif
if(IPN[I]:
== 0) GO TO 666
# ATTACHMENT
if(IPN[I]== -1) :
NETOT=NETOT+1
NITOT=NITOT+1
NELEC=NELEC+1
NEGION=NEGION+1
IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
IT=int(T+1.00)
IT=DMIN0[IT][J300]
TIME[IT]=TIME[IT]+1.00
GO TO 335
# endif
EISTR=EI
if(IONMODEL[I]> 0):
# CALCULATE SECONDARY ENERGY,ESEC,IN IONISATION COLLISION USING
# FIVE DIFFERENT MODELS
CALL IONSPLIT(I,E,EI,ESEC)
GO TO 544
# endif
R9=DRAND48(RDUM)
# USE OPAL PETERSON AND BEATY SPLITTING FACTOR.
ESEC=WPL[I]*TAN(R9*ATAN((E-EI)/(2.00*WPL[I])))
ESEC=WPL[I]*(ESEC/WPL[I])**0.9524
544 CONTINUE
EI=ESEC+EI
# STORE POSITION ,ENERGY, DIRECTION COSINES AND TIME OF GENERATION
# OF SECONDARY IONISATION ELECTRON
NCLUS=NCLUS+1
NMXADD=MAX[NCLUS][NMXADD]
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
546 print(2X,' def STOPPED: . NCLUS=',I7,' NREAL=',I10)
sys.exit()
# endif
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
ES[NCLUS]=ESEC
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
NTMPFLG=1
NCLTMP=NCLUS
# RANDOMISE SECONDARY ELECTRON DIRECTION
# R3=drand48(RDUM)
# F3=1.0-2.00*R3
# THETA0=DACOS(F3)
# F6=DCOS(THETA0)
# F5=DSIN(THETA0)
# R4=drand48(RDUM)
# PHI0=F4*R4
# F8=DSIN(PHI0)
# F9=DCOS(PHI0)
# DCX[NCLUS]=F9*F5
# DCY[NCLUS]=F8*F5
# DCZ[NCLUS]=F6
#*********************************************************
if(IECASC == 0):
GO TO 333
if(LEGAS[I]:
== 0) GO TO 333
# USE COMPLETE CASCADE FOR ELECTRON IONISATION
KG1=NEGAS[I]
LG1=LEGAS[I]
IGSHEL=IESHELL[I]
CALL CASCADEE(J11,KG1,LG1,X,Y,Z,ST,ESEC,IGSHEL)
#
# STORE CASCADE IN CLUSTER STORE
#
ETSUM=0.0
DO 844 KBR=1,IEVENTE
NCLUS=NCLUS+1
if(NCLUS > 150000):
:
WRITE(6,546) NCLUS,NREAL
sys.exit()
# endif
ES[NCLUS]=ECASE[KBR]
ETSUM=ETSUM+ES[NCLUS]
XS[NCLUS]=XCASE[KBR]
YS[NCLUS]=YCASE[KBR]
ZS[NCLUS]=ZCASE[KBR]
TS[NCLUS]=TCASE[KBR]
DCX[NCLUS]=DRXCE[KBR]
DCY[NCLUS]=DRYCE[KBR]
DCZ[NCLUS]=DRZCE[KBR]
NFLGFC[NCLUS]=NFLGFE[KBR]+NFLGHIGH
NFLGPPC[NCLUS]=NFLGPPE[KBR]
NFLGBRMC[NCLUS]=NFLGBRMM
844 CONTINUE
if(NFLGFC[NCLUS]:
> NFLGHIGH) NFLGHIGH=NFLGFC[NCLUS]
GO TO 666
#*********************************************************
# STORE POSSIBLE SHELL EMISSIONS AUGER OR FLUORESCENCE
333 if(EISTR > 30.0) :
# TEST IF FLUORESCENCE EMISSION
IFLTST=0:
if(WKLM[I]:
> 0.0) :
R9=DRAND48(RDUM)
if(R9 < WKLM[I]:
) IFLTST=1
# endif
if(IFLTST == 0):
:
# AUGER EMISSION WITHOUT FLUORESCENCE
NAUG=NC0[I]
EAVAUG=EC0[I]/float(NAUG)
DO 700 JFL=1,NC0[I]
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
TS[NCLUS]=ST
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
700 CONTINUE
else:
# AUGER EMISSION AND FLUORESENCE
if(NG2[I]:
== 0) GO TO 702
NAUG=NG2[I]
EAVAUG=EG2[I]/float(NAUG)
DO 701 JFL=1,NG2[I]
NCLUS=NCLUS+1
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
701 CONTINUE
702 if(NG1[I] == 0) GO TO 704
NAUG=NG1[I]
EAVAUG=EG1[I]/float(NAUG)
R9=DRAND48(RDUM)
DFL=-math.log(R9)*DSTFL[I]
DO 703 JFL=1,NG1[I]
NCLUS=NCLUS+1
R3=DRAND48(RDUM)
THEFL=numpy.arccos(1.0-2.00*R3)
R4=DRAND48(RDUM)
PHIFL=F4*R4
XS[NCLUS]=X+DFL*numpy.sin(THEFL)*numpy.cos(PHIFL)
YS[NCLUS]=Y+DFL*numpy.sin(THEFL)*numpy.sin(PHIFL)
ZS[NCLUS]=Z+DFL*numpy.cos(THEFL)
NFLGFC[NCLUS]=NFLGHIGH+1
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
TS[NCLUS]=ST
ES[NCLUS]=EAVAUG
R3=DRAND48(RDUM)
F3=1.0-2.00*R3
THETA0=numpy.arccos(F3)
F6=numpy.cos(THETA0)
F5=numpy.sin(THETA0)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
DCX[NCLUS]=F9*F5
DCY[NCLUS]=F8*F5
DCZ[NCLUS]=F6
NFLGHIGH=NFLGFC[NCLUS]
703 CONTINUE
704 CONTINUE
# endif
# endif
#
# GENERATE SCATTERING ANGLES AND UPDATE LABORATORY COSINES AFTER
# COLLISION ALSO UPDATE ENERGY OF ELECTRON.
#
666 IPT=IARRY[I]
ICOLL[IPT]=ICOLL[IPT]+1
ICOLN[I]=ICOLN[I]+1
# IF EXCITATION : ADD PROBABILITY ,PENFRA(1,I), OF TRANSFER TO GIVE
# IONISATION OF THE OTHER GASES IN MIXTURE
if(IPEN == 0 or NGAS == 1):
GO TO 5
if(PENFRA[1][I] != 0.0):
:
RAN=DRAND48(RDUM)
if(RAN > PENFRA[1][I]):
GO TO 5
NCLUS=NCLUS+1
# ENTER HERE POSSIBLE DELOCALISATION LENGTH FOR PENNING TRANSFER
if(PENFRA[2][I] == 0.0):
:
XS[NCLUS]=X
YS[NCLUS]=Y
ZS[NCLUS]=Z
NFLGFC[NCLUS]=NFLGFF
NFLGPPC[NCLUS]=NFLGPPP
NFLGBRMC[NCLUS]=NFLGBRMM
GO TO 667
# endif
ASIGN=1.0
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
XS[NCLUS]=X-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
YS[NCLUS]=Y-math.log(RAN)*PENFRA[2][I]*ASIGN
RAN=DRAND48(RDUM)
RAN1=DRAND48(RDUM)
if(RAN1 < 0.5):
ASIGN=-ASIGN
ZS[NCLUS]=Z-math.log(RAN)*PENFRA[2][I]*ASIGN
667 RAN=DRAND48(RDUM)
TS[NCLUS]=ST-math.log(RAN)*PENFRA[3][I]
# ASSIGN EXCESS ENERGY OF 1EV TO PENNING CREATED ELECTRON
ES[NCLUS]=1.0
DCX[NCLUS]=DCX1
DCY[NCLUS]=DCY1
DCZ[NCLUS]=DCZ1
GO TO 6
# endif
# GO TO 6
# CALCULATE SUM OF EXCITATION PER CLUSTER AND STORE EXCITATION X Y Z T
5 if(IPN[I] == 0) :
if((RGAS[I]:
*EIN[I]) > 4.0) :
KEXC=KEXC+1
if(KEXC > 150000):
:
WRITE(6,548) KEXC
548 print(2X,' def STOPPED: . KEXC=',I7)
sys.exit()
# endif
# FIND GAS IN WHICH EXCITATION OCCURED AND INCREMENT COUNTER
if(I <= IDG1):
:
NGEXC1=NGEXC1+1
else if(I <= IDG2) :
NGEXC2=NGEXC2+1
else if(I <= IDG3) :
NGEXC3=NGEXC3+1
else if(I <= IDG4) :
NGEXC4=NGEXC4+1
else if(I <= IDG5) :
NGEXC5=NGEXC5+1
else if(I <= IDG6) :
NGEXC6=NGEXC6+1
else:
WRITE(6,9911)
9911 print(' def STOPPED: BAD GAS ID IN MONTE')
sys.exit()
# endif
NEXCTOT=NEXCTOT+1
NSTEXC=NSTEXC+1
XSTEXC[KEXC]=X
YSTEXC[KEXC]=Y
ZSTEXC[KEXC]=Z
TSTEXC[KEXC]=ST
# endif
# endif
6 S2=(S1*S1)/(S1-1.00)
# ANISOTROPIC SCATTERING
R3=DRAND48(RDUM)
if(INDEX[I]:
== 1) :
R31=DRAND48(RDUM)
F3=1.00-R3*ANGCT[IE][I]
if(R31 > PSCT[IE][I]:
) F3=-F3
else if(INDEX[I] == 2) :
EPSI=PSCT[IE][I]
F3=1.00-(2.00*R3*(1.00-EPSI)/(1.00+EPSI*(1.00-2.00*R3)))
else:
# ISOTROPIC SCATTERING
F3=1.00-2.00*R3
# endif
THETA0=numpy.arccos(F3)
R4=DRAND48(RDUM)
PHI0=F4*R4
F8=numpy.sin(PHI0)
F9=numpy.cos(PHI0)
if(E < EI):
EI=0.00
ARG1=1.00-S1*EI/E
ARG1=DMAX1[ARG1][SMALL]
D=1.00-F3*math.sqrt(ARG1)
E1=E*(1.00-EI/(S1*E)-2.00*D/S2)
E1=DMAX1[E1][SMALL]
Q=math.sqrt((E/E1)*ARG1)/S1
Q=DMIN1[Q][1.00]
THETA=numpy.arcsin(Q*numpy.sin(THETA0))
F6=numpy.cos(THETA)
U=(S1-1.00)*(S1-1.00)/ARG1
CSQD=F3*F3
if(F3 < 0.00 and CSQD > U):
F6=-1.00*F6
F5=numpy.sin(THETA)
DCZ2=DMIN1[DCZ2][1.00]
ARGZ=math.sqrt(DCX2*DCX2+DCY2*DCY2)
if(ARGZ == 0.00):
DCZ1=F6
DCX1=F9*F5
DCY1=F8*F5
if(NTMPFLG == 1):
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S > 1.0):
F5S=1.0
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=F6S
DCX[NCLTMP]=F9S*F5S
DCY[NCLTMP]=F8S*F5S
NTMPFLG=0
# endif
pass
# endif
else:
DCZ1=DCZ2*F6+ARGZ*F5*F8
DCY1=DCY2*F6+(F5/ARGZ)*(DCX2*F9-DCY2*DCZ2*F8)
DCX1=DCX2*F6-(F5/ARGZ)*(DCY2*F9+DCX2*DCZ2*F8)
if(NTMPFLG == 1):
# USE FREE KINEMATICS FOR IONISATION SECONDARY ANGLES
F5S=F5*math.sqrt(E1/ES[NCLTMP])
if(F5S > 1.0):
F5S=1.0
THSEC=numpy.arcsin(F5S)
F5S=numpy.sin(THSEC)
F6S=numpy.cos(THSEC)
if(F6 < 0.0):
F6S=-F6S
PHIS=PHI0+API
if(PHIS > F4):
PHIS=PHI0-F4
F8S=numpy.sin(PHIS)
F9S=numpy.cos(PHIS)
DCZ[NCLTMP]=DCZ2*F6S+ARGZ*F5S*F8S
DCY[NCLTMP]=DCY2*F6S+(F5S/ARGZ)*(DCX2*F9S-DCY2*DCZ2*F8S)
DCX[NCLTMP]=DCX2*F6S-(F5S/ARGZ)*(DCY2*F9S+DCX2*DCZ2*F8S)
NTMPFLG=0
# endif
#190 CONTINUE
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.E-12
# VTOT=CONST9*math.sqrt(E1)
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
# TEST IF ELECTRON IS THERMALISED
if(E1 > ETHRM):
GO TO 1
191 CONTINUE
# STORE POSITION AND TIME OF THERMALISED ELECTRONS
K1=K1+1
# ROTATE INTO COORDINATE SYSTEM WITH EFIELD ALONG Z
ZR=Z*RCS-X*RSN
YR=Y
XR=Z*RSN+X*RCS
XST[K1]=XR
YST[K1]=YR
ZST[K1]=ZR
TST[K1]=ST
NFGF[K1]=NFLGFF
NFGPP[K1]=NFLGPPP
NFGBR[K1]=NFLGBRMM
TTIME[K1]=ST-TLAST
NELEC=NELEC+1
NETOT=NETOT+1
335
if(K1 == 150000):
GO TO 889
if(NELEC == (NCLUS+1)):
# LAST ELECTRON IN CLUSTER. DO STATISTICS ON CLUSTER
STATS(J11,J1)
GO TO 210
# endif
# GET NEW IONISATION ELECTRON FROM STORE
X=XS[NELEC]
Y=YS[NELEC]
Z=ZS[NELEC]
ST=TS[NELEC]
NFLGFF=NFLGFC[NELEC]
NFLGPPP=NFLGPPC[NELEC]
NFLGBRMM=NFLGBRMC[NELEC]
TLAST=TS[NELEC]
E1=ES[NELEC]
DCX1=DCX[NELEC]
DCY1=DCY[NELEC]
DCZ1=DCZ[NELEC]
if(E1 < ETHRM):
GO TO 191
GAM1=(EMS+E1)/EMS
BET1=math.sqrt(1.00-1.00/(GAM1*GAM1))
VTOT=BET1*VC*1.E-12
CX1=DCX1*VTOT
CY1=DCY1*VTOT
CZ1=DCZ1*VTOT
GO TO 1
# MAIN LOOP # end
210 CONTINUE
# RESET NUMBER OF EVENTS FOR BAD EVENTS
if(IMIP > 2):
NDELTA=NDELTA-IBADTOT
print(' EMAX=','%.7f' % EMAX,' NEOVFL =',NEOVFL)
if(EMAX > EFINAL):
print('INCREASE ENERGY LIMIT FROM','%.6f' % EFINAL,' EV TO AT LEAST','%.6f' % EMAX,' EV.')
sys.exit()
# endif
return
889 NLEFT=NCLUS-NELEC
print('\n\n\n WARNING STOPPED: AFTER NPRIME=',NPRIME,' LAST PRIMARY HAS AT LEAST ',NLEFT,' SECONDARIES LEFT TO TRACK. OUT OF ',NCLUS,' ELECTRONS ALREADY IN CLUSTER')
sys.exit()
return
# end
| 30.64421
| 172
| 0.502865
| 15,071
| 113,261
| 3.779112
| 0.051423
| 0.007006
| 0.010113
| 0.018541
| 0.909613
| 0.900781
| 0.89216
| 0.851637
| 0.836415
| 0.826951
| 0
| 0.117442
| 0.348046
| 113,261
| 3,696
| 173
| 30.64421
| 0.653878
| 0.261361
| 0
| 0.881513
| 0
| 0.000357
| 0.016465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.00464
| 0
| null | null | 0.01035
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
27de1f1415a967826ef5c397e0c4153d0dfcb6e7
| 1,258
|
py
|
Python
|
008.py
|
wittycoder/project_euler
|
27878bcccf6c1d4cd6e51b220d8575ad398c7762
|
[
"MIT"
] | null | null | null |
008.py
|
wittycoder/project_euler
|
27878bcccf6c1d4cd6e51b220d8575ad398c7762
|
[
"MIT"
] | null | null | null |
008.py
|
wittycoder/project_euler
|
27878bcccf6c1d4cd6e51b220d8575ad398c7762
|
[
"MIT"
] | null | null | null |
from functools import reduce
num = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
digits = 13
max_prod = 0
for i in range(0, len(num)+1-digits):
#print(num[i:i+4])
prod = reduce((lambda x, y: int(x) * int(y)), num[i:i+digits])
if prod > max_prod:
max_prod = prod
print(max_prod)
| 89.857143
| 1,008
| 0.919714
| 48
| 1,258
| 24.020833
| 0.5
| 0.024284
| 0.008673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.839033
| 0.0469
| 1,258
| 14
| 1,009
| 89.857143
| 0.122602
| 0.013514
| 0
| 0
| 0
| 0
| 0.805802
| 0.805802
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fd77793cec6b95df2b9cf05805f92c4d78e59c96
| 5,752
|
py
|
Python
|
tests/test_unit/test_detect_pattern.py
|
loftwah/CleverCSV
|
c7d0cab7b19d969dbbda2bcafb414ca2973facf5
|
[
"MIT"
] | 989
|
2019-02-22T12:14:17.000Z
|
2022-03-28T01:33:20.000Z
|
tests/test_unit/test_detect_pattern.py
|
loftwah/CleverCSV
|
c7d0cab7b19d969dbbda2bcafb414ca2973facf5
|
[
"MIT"
] | 27
|
2019-12-22T00:09:28.000Z
|
2022-03-30T22:45:50.000Z
|
tests/test_unit/test_detect_pattern.py
|
loftwah/CleverCSV
|
c7d0cab7b19d969dbbda2bcafb414ca2973facf5
|
[
"MIT"
] | 55
|
2019-10-22T13:09:53.000Z
|
2022-01-03T04:28:26.000Z
|
# -*- coding: utf-8 -*-
"""
Unit tests for the pattern score.
Author: Gertjan van den Burg
"""
import unittest
from clevercsv import detect_pattern
from clevercsv.dialect import SimpleDialect
class PatternTestCase(unittest.TestCase):
"""
Abstraction tests
"""
def test_abstraction_1(self):
out = detect_pattern.make_abstraction(
"A,B,C", SimpleDialect(delimiter=",", quotechar="", escapechar="")
)
exp = "CDCDC"
self.assertEqual(exp, out)
def test_abstraction_2(self):
out = detect_pattern.make_abstraction(
"A,\rA,A,A\r",
SimpleDialect(delimiter=",", quotechar="", escapechar=""),
)
exp = "CDCRCDCDC"
self.assertEqual(exp, out)
def test_abstraction_3(self):
out = detect_pattern.make_abstraction(
"a,a,\n,a,a\ra,a,a\r\n",
SimpleDialect(delimiter=",", quotechar="", escapechar=""),
)
exp = "CDCDCRCDCDCRCDCDC"
self.assertEqual(exp, out)
def test_abstraction_4(self):
out = detect_pattern.make_abstraction(
'a,"bc""d""e""f""a",\r\n',
SimpleDialect(delimiter=",", quotechar='"', escapechar=""),
)
exp = "CDCDC"
self.assertEqual(exp, out)
def test_abstraction_5(self):
out = detect_pattern.make_abstraction(
'a,"bc""d"",|"f|""',
SimpleDialect(delimiter=",", quotechar='"', escapechar="|"),
)
exp = "CDC"
self.assertEqual(exp, out)
def test_abstraction_6(self):
out = detect_pattern.make_abstraction(
",,,", SimpleDialect(delimiter=",", quotechar="", escapechar="")
)
exp = "CDCDCDC"
self.assertEqual(exp, out)
def test_abstraction_7(self):
out = detect_pattern.make_abstraction(
',"",,', SimpleDialect(delimiter=",", quotechar='"', escapechar="")
)
exp = "CDCDCDC"
self.assertEqual(exp, out)
def test_abstraction_8(self):
out = detect_pattern.make_abstraction(
',"",,\r\n',
SimpleDialect(delimiter=",", quotechar='"', escapechar=""),
)
exp = "CDCDCDC"
self.assertEqual(exp, out)
"""
Escape char tests
"""
def test_abstraction_9(self):
out = detect_pattern.make_abstraction(
"A,B|,C",
SimpleDialect(delimiter=",", quotechar="", escapechar="|"),
)
exp = "CDC"
self.assertEqual(exp, out)
def test_abstraction_10(self):
out = detect_pattern.make_abstraction(
'A,"B,C|"D"',
SimpleDialect(delimiter=",", quotechar='"', escapechar="|"),
)
exp = "CDC"
self.assertEqual(exp, out)
def test_abstraction_11(self):
out = detect_pattern.make_abstraction(
"a,|b,c",
SimpleDialect(delimiter=",", quotechar="", escapechar="|"),
)
exp = "CDCDC"
self.assertEqual(exp, out)
def test_abstraction_12(self):
out = detect_pattern.make_abstraction(
"a,b|,c",
SimpleDialect(delimiter=",", quotechar="", escapechar="|"),
)
exp = "CDC"
self.assertEqual(exp, out)
def test_abstraction_13(self):
out = detect_pattern.make_abstraction(
'a,"b,c|""',
SimpleDialect(delimiter=",", quotechar='"', escapechar="|"),
)
exp = "CDC"
self.assertEqual(exp, out)
def test_abstraction_14(self):
out = detect_pattern.make_abstraction(
"a,b||c",
SimpleDialect(delimiter=",", quotechar="", escapechar="|"),
)
exp = "CDC"
self.assertEqual(exp, out)
def test_abstraction_15(self):
out = detect_pattern.make_abstraction(
'a,"b|"c||d|"e"',
SimpleDialect(delimiter=",", quotechar='"', escapechar="|"),
)
exp = "CDC"
self.assertEqual(exp, out)
def test_abstraction_16(self):
out = detect_pattern.make_abstraction(
'a,"b|"c||d","e"',
SimpleDialect(delimiter=",", quotechar='"', escapechar="|"),
)
exp = "CDCDC"
self.assertEqual(exp, out)
"""
Fill empties
"""
def test_fill_empties_1(self):
out = detect_pattern.fill_empties("DDD")
exp = "CDCDCDC"
self.assertEqual(exp, out)
"""
Pattern Score tests
"""
def test_pattern_score_1(self):
# theta_1 from paper
data = (
"7,5; Mon, Jan 12;6,40\n100; Fri, Mar 21;8,23\n8,2; Thu, Sep 17;"
'2,71\n538,0;;7,26\n"NA"; Wed, Oct 4;6,93'
)
d = SimpleDialect(delimiter=",", quotechar="", escapechar="")
out = detect_pattern.pattern_score(data, d)
exp = 7 / 4
self.assertAlmostEqual(exp, out)
def test_pattern_score_2(self):
# theta_2 from paper
data = (
"7,5; Mon, Jan 12;6,40\n100; Fri, Mar 21;8,23\n8,2; Thu, Sep 17;"
'2,71\n538,0;;7,26\n"NA"; Wed, Oct 4;6,93'
)
d = SimpleDialect(delimiter=";", quotechar="", escapechar="")
out = detect_pattern.pattern_score(data, d)
exp = 10 / 3
self.assertAlmostEqual(exp, out)
def test_pattern_score_3(self):
# theta_3 from paper
data = (
"7,5; Mon, Jan 12;6,40\n100; Fri, Mar 21;8,23\n8,2; Thu, Sep 17;"
'2,71\n538,0;;7,26\n"NA"; Wed, Oct 4;6,93'
)
d = SimpleDialect(delimiter=";", quotechar='"', escapechar="")
out = detect_pattern.pattern_score(data, d)
exp = 10 / 3
self.assertAlmostEqual(exp, out)
if __name__ == "__main__":
unittest.main()
| 28.76
| 79
| 0.547114
| 627
| 5,752
| 4.866029
| 0.154705
| 0.089479
| 0.104884
| 0.255326
| 0.856441
| 0.831858
| 0.810226
| 0.760406
| 0.722714
| 0.697148
| 0
| 0.034158
| 0.297636
| 5,752
| 199
| 80
| 28.904523
| 0.72104
| 0.02799
| 0
| 0.534247
| 0
| 0.041096
| 0.113021
| 0.021214
| 0
| 0
| 0
| 0
| 0.136986
| 1
| 0.136986
| false
| 0
| 0.020548
| 0
| 0.164384
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fd7bbce9b5f593c37883d20ef97f1c6d6ce75522
| 6,784
|
py
|
Python
|
tests/test_algorithms/test_transformTextToIndex.py
|
elangovana/kegg-pathway-extractor
|
08e9a28199bb4454e2e1a09c5d833f243f6f5768
|
[
"MIT"
] | 10
|
2019-12-17T01:17:06.000Z
|
2022-02-25T22:08:09.000Z
|
tests/test_algorithms/test_transformTextToIndex.py
|
elangovana/kegg-pathway-extractor
|
08e9a28199bb4454e2e1a09c5d833f243f6f5768
|
[
"MIT"
] | 2
|
2021-03-31T18:40:32.000Z
|
2021-12-13T20:15:20.000Z
|
tests/test_algorithms/test_transformTextToIndex.py
|
elangovana/kegg-pathway-extractor
|
08e9a28199bb4454e2e1a09c5d833f243f6f5768
|
[
"MIT"
] | 2
|
2020-08-25T19:31:33.000Z
|
2021-11-11T15:15:02.000Z
|
from unittest import TestCase
from unittest.mock import MagicMock
from torch.utils.data import DataLoader
from algorithms.transform_text_index import TransformTextToIndex
class TestTransformTextToIndex(TestCase):
def test_transform_no_vocab(self):
mock_dataset = MagicMock()
initial_vocab_dict = None # ["random", "initial"]
mock_dataset.data = [[["This is sample text", "entity1", "entity2", "phosphorylation"], ["yes"]],
[["Completey random text2", "entity11", "entity12", "phosphorylation1"], ["no"]]]
max_feature_lens = [10, 1, 1, 1]
# Unique words + pad character ( ignore labels)
expected_unique_item_no = 13 + 1
mock_dataset.__len__.return_value = len(mock_dataset.data)
mock_dataset.__getitem__.side_effect = lambda i: (mock_dataset.data[i][0], mock_dataset.data[i][1])
sut = TransformTextToIndex(max_feature_lens, vocab_dict=initial_vocab_dict, min_vocab_doc_frequency=1)
data_loader = DataLoader(mock_dataset, batch_size=1)
# Act
actual = list(sut.fit_transform(data_loader))
# Assert the max feature length matchs
unique_items = set()
for b, y in actual:
for ci, c_tensor in enumerate(b):
c = c_tensor.tolist()
for r in c:
unique_items = unique_items.union(r)
feature_len = max_feature_lens[ci]
self.assertEqual(feature_len, len(r),
"The feature length for column {} should match the max_feature_length".format(
feature_len))
# Assert
self.assertEqual(expected_unique_item_no, len(unique_items),
"The number of unique words doesnt match to unique indexes including padding{}".format(
unique_items))
def test_transform_min_frequnct(self):
mock_dataset = MagicMock()
initial_vocab_dict = None # ["random", "initial"]
mock_dataset.data = [[["This is sample sample text", "entity1", "entity2", "phosphorylation"], ["yes"]],
[["Completey random random sample text2", "entity11", "entity12", "phosphorylation1"],
["no"]]]
max_feature_lens = [10, 1, 1, 1]
# Unique words + pad character ( ignore labels)
expected_unique_item_no = 1 + 1 + 1 # unknown words
mock_dataset.__len__.return_value = len(mock_dataset.data)
mock_dataset.__getitem__.side_effect = lambda i: (mock_dataset.data[i][0], mock_dataset.data[i][1])
sut = TransformTextToIndex(max_feature_lens, vocab_dict=initial_vocab_dict, min_vocab_doc_frequency=2)
data_loader = DataLoader(mock_dataset, batch_size=1)
# Act
actual = list(sut.fit_transform(data_loader))
# Assert the max feature length matchs
unique_items = set()
for b, y in actual:
for ci, c_tensor in enumerate(b):
c = c_tensor.tolist()
for r in c:
unique_items = unique_items.union(r)
feature_len = max_feature_lens[ci]
self.assertEqual(feature_len, len(r),
"The feature length for column {} should match the max_feature_length".format(
feature_len))
# Assert
self.assertEqual(expected_unique_item_no, len(unique_items),
"The number of unique words doesnt match to unique indexes including padding{}".format(
unique_items))
def test_transform_with_vocab(self):
mock_dataset = MagicMock()
initial_vocab_dict = {"random": 0, "initial": 1}
mock_dataset.data = [[["This is sample text", "entity1", "entity2", "phosphorylation"], ["yes"]],
[["Minoritt word random unknown", "entity1", "entity2", "phosphorylation"], ["no"]]]
max_feature_lens = [10, 1, 1, 1]
# Unique words + pad character ( ignore labels)
expected_unique_item_no = 12
mock_dataset.__len__.return_value = len(mock_dataset.data)
mock_dataset.__getitem__.side_effect = lambda i: (mock_dataset.data[i][0], mock_dataset.data[i][1])
sut = TransformTextToIndex(max_feature_lens, vocab_dict=initial_vocab_dict, use_dataset_vocab=True,
min_vocab_doc_frequency=1)
data_loader = DataLoader(mock_dataset, batch_size=2)
# Act
vocab_dict = sut.construct_vocab_dict(data_loader)
sut.vocab_dict = vocab_dict
actual = list(sut.fit_transform(data_loader))
# Assert the max feature length matchs
unique_items = set()
for b, y in actual:
for ci, c_tensor in enumerate(b):
c = c_tensor.tolist()
for r in c:
unique_items = unique_items.union(r)
feature_len = max_feature_lens[ci]
self.assertEqual(feature_len, len(r),
"The feature length for column {} should match the max_feature_length".format(
feature_len))
# Assert
self.assertEqual(expected_unique_item_no, len(unique_items),
"The number of unique words doesnt match to unique indexes including padding{}".format(
unique_items))
def test_transform_pad(self):
"""
Test case to make ensure that pad index is zero
"""
mock_dataset = MagicMock()
initial_vocab_dict = {"random": 0, "initial": 1}
mock_dataset.data = [[["This is sample text", "entity1", "entity2", "phosphorylation"], ["yes"]],
[["This is sample text2", "entity1", "entity2", "phosphorylation"], ["no"]]]
max_feature_lens = [10, 1, 1, 1]
# Unique words + pad character ( ignore labels)
expected_unique_item_no = 9
mock_dataset.__len__.return_value = len(mock_dataset.data)
mock_dataset.__getitem__.side_effect = lambda i: (mock_dataset.data[i][0], mock_dataset.data[i][1])
sut = TransformTextToIndex(max_feature_lens, vocab_dict=initial_vocab_dict, use_dataset_vocab=True,
min_vocab_doc_frequency=1)
data_loader = DataLoader(mock_dataset, batch_size=2)
# Act
vocab_dict = sut.construct_vocab_dict(data_loader)
# Assert the max feature length matchs
self.assertEqual(vocab_dict[sut.pad_token()], 0, "Index of pas token {} must be zero".format(sut.pad_token()))
| 43.767742
| 118
| 0.597877
| 783
| 6,784
| 4.890166
| 0.155811
| 0.09193
| 0.06268
| 0.033429
| 0.885349
| 0.885349
| 0.885349
| 0.885349
| 0.854792
| 0.854792
| 0
| 0.015927
| 0.305867
| 6,784
| 154
| 119
| 44.051948
| 0.797197
| 0.070018
| 0
| 0.765306
| 0
| 0
| 0.150287
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.040816
| false
| 0
| 0.040816
| 0
| 0.091837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fdd70e73afa9c1b5c8e686cc00e57881e761fc19
| 96
|
py
|
Python
|
rbig_jax/density/test_histogram.py
|
jejjohnson/rbig_jax
|
112e064d5b62631aa03b7563c9eb9f115ab23eb0
|
[
"MIT"
] | null | null | null |
rbig_jax/density/test_histogram.py
|
jejjohnson/rbig_jax
|
112e064d5b62631aa03b7563c9eb9f115ab23eb0
|
[
"MIT"
] | null | null | null |
rbig_jax/density/test_histogram.py
|
jejjohnson/rbig_jax
|
112e064d5b62631aa03b7563c9eb9f115ab23eb0
|
[
"MIT"
] | null | null | null |
import pytest
from src.density.histogram import get_bin_edges
def test_bin_edges():
pass
| 12
| 47
| 0.78125
| 15
| 96
| 4.733333
| 0.8
| 0.225352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 96
| 7
| 48
| 13.714286
| 0.8875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
e329156c3c46920af5c8cf6aea71ef24c0a1102e
| 25
|
py
|
Python
|
tests/bytecode/mp-tests/slice2.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 303
|
2015-07-11T17:12:55.000Z
|
2018-01-08T03:02:37.000Z
|
tests/bytecode/mp-tests/slice2.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 13
|
2016-05-12T16:51:22.000Z
|
2018-01-10T22:33:25.000Z
|
tests/bytecode/mp-tests/slice2.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 26
|
2018-01-18T09:15:33.000Z
|
2022-02-07T13:09:14.000Z
|
x = x[a, b]
x[a, b] = x
| 6.25
| 11
| 0.32
| 8
| 25
| 1
| 0.375
| 0.5
| 0.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.36
| 25
| 3
| 12
| 8.333333
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e33044dac4ac02b711b219fcdbe7973fa99da2dc
| 1,805
|
py
|
Python
|
Day 4/Rock_Paper_Scissors.py
|
hamzaoda/100-Days-of-Code---The-Complete-Python-Pro-Bootcamp-for-2021
|
5340007d8405df2e29643b47d3ff9fa4f7af9e10
|
[
"Unlicense"
] | null | null | null |
Day 4/Rock_Paper_Scissors.py
|
hamzaoda/100-Days-of-Code---The-Complete-Python-Pro-Bootcamp-for-2021
|
5340007d8405df2e29643b47d3ff9fa4f7af9e10
|
[
"Unlicense"
] | null | null | null |
Day 4/Rock_Paper_Scissors.py
|
hamzaoda/100-Days-of-Code---The-Complete-Python-Pro-Bootcamp-for-2021
|
5340007d8405df2e29643b47d3ff9fa4f7af9e10
|
[
"Unlicense"
] | null | null | null |
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
game_images = [rock, paper, scissors]
user_choice=int(input("what is your choice? 0 for Rock, 1 for Paper, 2 for scissors :"))
computer_choice=random.randint(0,2)
if user_choice == 0:
print(f"You Choosed\n {game_images[user_choice]}")
if computer_choice == 0:
print(f"Computer Choose{game_images[computer_choice]}")
print("it's Draw ")
elif computer_choice == 1:
print(f"Computer Choose{game_images[computer_choice]}")
print("You Have Lost :(")
else :
print(f"Computer Choose{game_images[computer_choice]}")
print("You Have Win! :)")
elif user_choice == 1:
print(f"You Choosed\n {game_images[user_choice]}")
if computer_choice == 0:
print(f"Computer Choose{game_images[computer_choice]}")
print("You Have Win! :)")
elif computer_choice == 1:
print(f"Computer Choose{game_images[computer_choice]}")
print("it's Draw ")
else :
print(f"Computer Choose{game_images[computer_choice]}")
print("You Have Lost :(")
else:
print(f"You Choosed\n {game_images[user_choice]}")
if computer_choice == 0:
print(f"Computer Choose{game_images[computer_choice]}")
print("You Have Lost :(")
elif computer_choice == 1:
print(f"Computer Choose{game_images[computer_choice]}")
print("You Have Win! :)")
else :
print(f"Computer Choose{game_images[computer_choice]}")
print("it's Draw ")
| 26.940299
| 88
| 0.601662
| 204
| 1,805
| 4.588235
| 0.171569
| 0.239316
| 0.134615
| 0.192308
| 0.816239
| 0.816239
| 0.816239
| 0.816239
| 0.816239
| 0.816239
| 0
| 0.009601
| 0.249862
| 1,805
| 66
| 89
| 27.348485
| 0.681684
| 0
| 0
| 0.803279
| 0
| 0
| 0.545706
| 0.222715
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016393
| 0
| 0.016393
| 0.344262
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8b73433d6edcd43401b52bb9cbcda7f746eef8f1
| 14,185
|
py
|
Python
|
test/comp/comp_aggregate.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 69
|
2019-02-25T00:17:53.000Z
|
2022-03-31T17:26:48.000Z
|
test/comp/comp_aggregate.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 68
|
2018-07-20T09:01:01.000Z
|
2022-03-31T16:28:36.000Z
|
test/comp/comp_aggregate.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 24
|
2018-10-02T04:32:10.000Z
|
2021-11-10T08:24:17.000Z
|
import unittest
import numpy as np
from pyrolite.comp.aggregate import *
from pyrolite.util.synthetic import normal_frame
import logging
class TestCompositionalMean(unittest.TestCase):
"""Tests pandas compositional mean operator."""
def setUp(self):
self.cols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.df = normal_frame(columns=self.cols)
def test_1D(self):
"""Checks results on single records."""
df = pd.DataFrame(self.df.iloc[:, 0].head(1))
out = compositional_mean(df)
# Check closure
self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))
def test_single(self):
"""Checks results on single records."""
df = self.df.head(1).copy()
out = compositional_mean(df)
# Check closure
self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))
def test_multiple(self):
"""Checks results on multiple records."""
df = self.df.copy()
out = compositional_mean(df)
# Check closure
self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))
@unittest.expectedFailure
def test_contrasting(self):
"""Checks results on multiple contrasting records."""
df = self.df.copy()
# Create some nans to imitate contrasting analysis sets
df.iloc[
np.random.randint(1, 10, size=2),
np.random.randint(1, len(self.cols), size=2),
] = np.nan
out = compositional_mean(df)
# Check closure
self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))
def test_mean(self):
"""Checks whether the mean is accurate."""
pass
class TestWeightsFromArray(unittest.TestCase):
"""Tests the numpy array-weight generator for weighted averages."""
def setUp(self):
self.cols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.df = normal_frame(columns=self.cols)
def test_single(self):
"""Checks results on single records."""
df = self.df.head(1).copy()
out = weights_from_array(df.values)
self.assertTrue(out.size == 1)
def test_multiple(self):
"""Checks results on multiple records."""
df = self.df.copy()
out = weights_from_array(df.values)
self.assertTrue(out.size == df.index.size)
class TestGetFullColumn(unittest.TestCase):
"""Tests the nan-column checking function for numpy arrays."""
def setUp(self):
self.cols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.df = normal_frame(columns=self.cols)
nans = 10
self.df.iloc[
np.random.randint(1, 10, size=nans),
np.random.randint(1, len(self.cols), size=nans),
] = np.nan
def test_single(self):
"""Checks results on single records."""
df = self.df.head(1).copy()
out = get_full_column(df.values)
self.assertTrue(out == 0)
def test_multiple(self):
"""Checks results on multiple records."""
df = self.df.copy()
out = get_full_column(df.values)
self.assertTrue(out == 0)
class TestNANWeightedMean(unittest.TestCase):
"""Tests numpy weighted NaN-mean operator."""
def setUp(self):
self.cols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.df = normal_frame(columns=self.cols)
def test_single(self):
"""Checks results on single records."""
df = self.df.head(1).copy()
out = nan_weighted_mean(df.values)
self.assertTrue(np.allclose(out, df.values))
def test_multiple(self):
"""Checks results on multiple records."""
df = self.df.copy()
out = nan_weighted_mean(df.values)
self.assertTrue(np.allclose(out, np.mean(df.values, axis=0)))
def test_multiple_equal_weights(self):
"""Checks results on multiple records with equal weights."""
df = self.df.copy()
weights = np.array([1.0 / len(df.index)] * len(df.index))
out = nan_weighted_mean(df.values, weights=weights)
self.assertTrue(
np.allclose(out, np.average(df.values, weights=weights, axis=0))
)
def test_multiple_unequal_weights(self):
"""Checks results on multiple records with unequal weights."""
df = self.df.copy()
weights = np.random.rand(1, df.index.size).squeeze()
out = nan_weighted_mean(df.values, weights=weights)
check = np.average(df.values.T, weights=weights, axis=1)
self.assertTrue(
np.allclose(out, np.average(df.values, weights=weights, axis=0))
)
def test_multiple_unequal_weights_withnan(self):
"""
Checks results on multiple records with unequal weights,
where the data includes some null data.
"""
df = self.df.copy()
df.iloc[0, :] = np.nan # make one record nan
# Some non-negative weights
weights = np.random.rand(1, df.index.size).squeeze()
weights = np.array(weights) / np.nansum(weights)
out = nan_weighted_mean(df.values, weights=weights)
check = np.average(df.iloc[1:, :].values, weights=weights[1:], axis=0)
self.assertTrue(np.allclose(out, check))
class TestNANWeightedCompositionalMean(unittest.TestCase):
"""Tests numpy weighted compositonal NaN-mean operator."""
def setUp(self):
self.cols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.df = normal_frame(columns=self.cols)
self.df = self.df.apply(lambda x: x / np.sum(x), axis="columns")
def test_single(self):
"""Checks results on single records."""
# Should not change result, once closure is considered
df = self.df.head(1).copy()
for renorm in [True, False]:
with self.subTest(renorm=renorm):
out = nan_weighted_compositional_mean(df.values, renorm=renorm)
if renorm:
self.assertTrue(np.allclose(np.sum(out, axis=-1), 1.0))
self.assertTrue(np.allclose(out, df.values.reshape(out.shape)))
def test_multiple(self):
"""Checks results on multiple records."""
df = self.df.copy()
for renorm in [True, False]:
with self.subTest(renorm=renorm):
out = nan_weighted_compositional_mean(df.values, renorm=renorm)
if renorm:
self.assertTrue(np.allclose(np.sum(out, axis=-1), 1.0))
def test_contrasting(self):
"""Checks results on multiple contrasting records."""
# This should succeed for this function
df = self.df.copy()
# Create some nans to imitate contrasting analysis sets
df.iloc[
np.random.randint(1, 10, size=2),
np.random.randint(1, len(self.cols), size=2),
] = np.nan
out = nan_weighted_compositional_mean(df.values)
# Check closure
self.assertTrue(np.allclose(np.sum(out, axis=-1), 1.0))
def test_mean(self):
"""Checks whether the mean is accurate."""
pass
class TestCrossRatios(unittest.TestCase):
"""Tests pandas cross ratios utility."""
def setUp(self):
self.cols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.d = len(self.cols)
self.n = 10
self.df = normal_frame(columns=self.cols, size=self.n)
def test_single(self):
"""Checks results on single record."""
df = self.df.head(1).copy()
n = df.index.size
out = cross_ratios(df)
self.assertTrue(np.isfinite(out).any())
self.assertTrue((out[np.isfinite(out)] > 0).all())
self.assertTrue(out.shape == (n, self.d, self.d))
def test_multiple(self):
"""Checks results on multiple records."""
df = self.df.copy()
n = df.index.size
out = cross_ratios(df)
self.assertTrue(np.isfinite(out).any())
self.assertTrue((out[np.isfinite(out)] > 0).all())
self.assertTrue(out.shape == (n, self.d, self.d))
def test_contrasting(self):
"""Checks results on multiple contrasting records."""
df = self.df.copy()
n = df.index.size
# Create some nans to imitate contrasting analysis sets
df.iloc[
np.random.randint(1, self.n, size=2), np.random.randint(1, self.d, size=2)
] = np.nan
out = cross_ratios(df)
self.assertTrue(np.isfinite(out).any())
self.assertTrue((out[np.isfinite(out)] > 0).all())
self.assertTrue(out.shape == (n, self.d, self.d))
class TestNPCrossRatios(unittest.TestCase):
"""Tests numpy cross ratios utility."""
def setUp(self):
self.cols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.d = len(self.cols)
self.n = 10
self.df = normal_frame(columns=self.cols, size=self.n)
def test_single(self):
"""Checks results on single record."""
df = self.df.head(1).copy()
n = df.index.size
arr = df.values
out = np_cross_ratios(arr)
self.assertTrue(np.isfinite(out).any())
self.assertTrue((out[np.isfinite(out)] > 0).all())
self.assertTrue(out.shape == (n, self.d, self.d))
def test_multiple(self):
"""Checks results on multiple records."""
df = self.df.copy()
n = df.index.size
arr = df.values
out = np_cross_ratios(arr)
self.assertTrue(np.isfinite(out).any())
self.assertTrue((out[np.isfinite(out)] > 0).all())
self.assertTrue(out.shape == (n, self.d, self.d))
def test_contrasting(self):
"""Checks results on multiple contrasting records."""
df = self.df.copy()
n = df.index.size
# Create some nans to imitate contrasting analysis sets
df.iloc[
np.random.randint(1, self.n, size=2), np.random.randint(1, self.d, size=2)
] = np.nan
arr = df.values
out = np_cross_ratios(arr)
self.assertTrue(np.isfinite(out).any())
self.assertTrue((out[np.isfinite(out)] > 0).all())
self.assertTrue(out.shape == (n, self.d, self.d))
class TestStandardiseAggregate(unittest.TestCase):
"""Tests pandas internal standardisation aggregation method."""
def setUp(self):
self.mcols = ["SiO2", "CaO", "MgO", "FeO", "TiO2"]
self.mdf = pd.DataFrame(
{k: v for k, v in zip(self.mcols, np.random.rand(len(self.mcols), 10))}
)
self.mdf = self.mdf.apply(lambda x: x / np.sum(x), axis="columns")
self.tcols = ["SiO2", "Ni", "Cr", "Sn"]
self.tdf = pd.DataFrame(
{k: v for k, v in zip(self.tcols, np.random.rand(len(self.tcols), 10))}
)
self.df = self.mdf.append(self.tdf, ignore_index=True, sort=False)
def test_single(self):
"""Checks results on single records."""
df = self.df.head(1).copy()
for renorm in [True, False]:
with self.subTest(renorm=renorm):
out = standardise_aggregate(df, renorm=renorm)
outvals = out.values[~np.isnan(out.values)]
dfvals = df.values[~np.isnan(df.values)]
self.assertTrue(np.allclose(outvals, dfvals))
def test_multiple_with_IS(self):
"""
Checks results on multiple records with internal standard specifed.
"""
df = self.mdf.copy()
fixed_record_idx = 0
int_std = "SiO2"
for renorm in [True, False]:
with self.subTest(renorm=renorm):
out = standardise_aggregate(
df,
int_std=int_std,
renorm=renorm,
fixed_record_idx=fixed_record_idx,
)
if not renorm:
self.assertTrue(
np.allclose(
out[int_std],
df.iloc[fixed_record_idx, df.columns.get_loc(int_std)],
)
)
def test_multiple_without_IS(self):
"""
Checks results on multiple records without internal standard specifed.
"""
df = self.mdf
fixed_record_idx = 0
for renorm in [True, False]:
with self.subTest(renorm=renorm):
out = standardise_aggregate(
df, renorm=renorm, fixed_record_idx=fixed_record_idx
)
if not renorm:
self.assertTrue(
np.isclose(
out.values, df.iloc[fixed_record_idx, :].values
).any()
)
def test_contrasting_with_IS(self):
"""Checks results on multiple contrasting records."""
# This should succeed for records which differ by all-but-one element
df = self.df
fixed_record_idx = 0
int_std = "SiO2"
for renorm in [True, False]:
with self.subTest(renorm=renorm):
out = standardise_aggregate(
df,
int_std=int_std,
renorm=renorm,
fixed_record_idx=fixed_record_idx,
)
if not renorm:
self.assertTrue(
np.allclose(
out[int_std],
df.iloc[fixed_record_idx, df.columns.get_loc(int_std)],
)
)
def test_contrasting_without_IS(self):
"""
Checks results on multiple contrasting records
without internal standard specifed.
"""
df = self.df
fixed_record_idx = 0
for renorm in [True, False]:
with self.subTest(renorm=renorm):
out = standardise_aggregate(
df, renorm=renorm, fixed_record_idx=fixed_record_idx
)
if not renorm:
self.assertTrue(
np.isclose(
out.values, df.iloc[fixed_record_idx, :].values
).any()
)
if __name__ == "__main__":
unittest.main()
| 35.730479
| 86
| 0.567571
| 1,740
| 14,185
| 4.540805
| 0.104598
| 0.070877
| 0.058094
| 0.064928
| 0.830654
| 0.814707
| 0.799646
| 0.757246
| 0.732819
| 0.695228
| 0
| 0.011131
| 0.303349
| 14,185
| 396
| 87
| 35.820707
| 0.788403
| 0.15319
| 0
| 0.747292
| 0
| 0
| 0.015011
| 0
| 0
| 0
| 0
| 0
| 0.144404
| 1
| 0.133574
| false
| 0.00722
| 0.018051
| 0
| 0.180505
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8bcfde0fabdf44fa913fc966d320025ac02b2f42
| 183
|
py
|
Python
|
data/micro-benchmark/mro/super_call/main.py
|
vitsalis/pycg-evaluation
|
ce37eb5668465b0c17371914e863d699826447ee
|
[
"Apache-2.0"
] | 121
|
2020-12-16T20:31:37.000Z
|
2022-03-21T20:32:43.000Z
|
data/micro-benchmark/mro/super_call/main.py
|
vitsalis/pycg-evaluation
|
ce37eb5668465b0c17371914e863d699826447ee
|
[
"Apache-2.0"
] | 24
|
2021-03-13T00:04:00.000Z
|
2022-03-21T17:28:11.000Z
|
data/micro-benchmark/mro/super_call/main.py
|
vitsalis/pycg-evaluation
|
ce37eb5668465b0c17371914e863d699826447ee
|
[
"Apache-2.0"
] | 19
|
2021-03-23T10:58:47.000Z
|
2022-03-24T19:46:50.000Z
|
class A:
def __init__(self):
pass
class B(A):
def __init__(self):
super().__init__()
class C(B):
def __init__(self):
super().__init__()
c = C()
| 13.071429
| 26
| 0.530055
| 24
| 183
| 3.208333
| 0.375
| 0.272727
| 0.428571
| 0.311688
| 0.519481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.311475
| 183
| 13
| 27
| 14.076923
| 0.611111
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0.1
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
4750ec1a8164c84d661f4b6fcbda93f93661ece2
| 4,507
|
py
|
Python
|
route_sidewalk/src/planning.py
|
nutorbit/route_sidewalk
|
df32df5a0c637c129efc82d15eef62ac7d0daf13
|
[
"MIT"
] | null | null | null |
route_sidewalk/src/planning.py
|
nutorbit/route_sidewalk
|
df32df5a0c637c129efc82d15eef62ac7d0daf13
|
[
"MIT"
] | null | null | null |
route_sidewalk/src/planning.py
|
nutorbit/route_sidewalk
|
df32df5a0c637c129efc82d15eef62ac7d0daf13
|
[
"MIT"
] | null | null | null |
from heapq import heappush, heappop
def cal_weight(from_x, from_y, to_x, to_y):
"""
calculate distance
Args:
from_x: x coordinate
from_y: y coordinate
to_x: x coordinate
to_y: y coordinate
Returns:
distance
"""
# return abs(from_x - to_x) + abs(from_y - to_y) # manhattan
return ((from_x - to_x) ** 2 + (from_y - to_y) ** 2) ** 0.5 # euclidean dist
def find_closest_road(data, from_):
"""
Find closest road with Breadth First Search (BFS).
Args:
data: array for search
from_: [x, y] starting point for search
"""
queue = [(from_, [tuple(from_)])]
visited = set()
visited.add(tuple(from_))
while len(queue):
# pop position & paths
current, paths = queue.pop(0)
if data[current[0], current[1]] != data[from_[0], from_[1]]: # if found road then return paths
return paths
for (dix, diy) in [(-1, -1), (-1, 0), (0, -1), (1, 1), (1, 0), (0, 1), (1, -1), (-1, 1)]:
to_ = (
current[0] + dix,
current[1] + diy
)
if to_ not in visited and 0 <= to_[0] < data.shape[0] and 0 <= to_[1] < data.shape[1]:
# add to queue
queue.append((to_, paths + [to_]))
visited.add(to_)
def move_point_inside_road(data, from_):
"""
Find closest road with Breadth First Search (BFS). [Modify]
Args:
data: array for search
from_: [x, y] starting point for search
"""
queue = [(from_, [tuple(from_)])]
visited = set()
visited.add(tuple(from_))
while len(queue):
# pop position & paths
current, paths = queue.pop(0)
if data[current[0], current[1]] == 255: # if found road then return paths
return paths
for (dix, diy) in [(-1, -1), (-1, 0), (0, -1), (1, 1), (1, 0), (0, 1), (1, -1), (-1, 1)]:
to_ = (
current[0] + dix,
current[1] + diy
)
if to_ not in visited and 0 <= to_[0] < data.shape[0] and 0 <= to_[1] < data.shape[1]:
# add to queue
queue.append((to_, paths + [to_]))
visited.add(to_)
def route_condition(data, from_, to_, v):
"""
Route with condition
Args:
data: array of map
from_: (x, y) coordinate
to_: (x, y) coordinate
v: value
Returns:
list of path to target
"""
heap = []
heappush(heap, (0, from_[0], from_[1], [(from_[0], from_[1])])) # (weight, x, y, path)
visited = set()
visited.add((from_[0], from_[1]))
while heap:
weight, current_x, current_y, path = heappop(heap)
if current_x == to_[0] and current_y == to_[1]: # if reach target
return path
for dix in range(-1, 2):
for diy in range(-1, 2):
to_x = current_x + dix
to_y = current_y + diy
if (to_x, to_y) not in visited and \
0 <= to_x < data.shape[0] and \
0 <= to_y < data.shape[1] and \
data[to_x, to_y] == v:
weight = cal_weight(to_x, to_y, to_[0], to_[1]) + len(path) * 1000
heappush(heap, (weight, to_x, to_y, path + [(to_x, to_y)]))
visited.add((to_x, to_y))
def route(data, from_, to_):
"""
Route without condition
Args:
data: array of map
from_: (x, y) coordinate
to_: (x, y) coordinate
Returns:
list of path to target
"""
heap = []
heappush(heap, (0, from_[0], from_[1], [(from_[0], from_[1])])) # (weight, x, y, path)
visited = set()
visited.add((from_[0], from_[1]))
while heap:
weight, current_x, current_y, path = heappop(heap)
if current_x == to_[0] and current_y == to_[1]: # if reach target
return path
for dix in range(-1, 2):
for diy in range(-1, 2):
to_x = current_x + dix
to_y = current_y + diy
if (to_x, to_y) not in visited and \
0 <= to_x < data.shape[0] and \
0 <= to_y < data.shape[1]:
weight = cal_weight(to_x, to_y, to_[0], to_[1]) + len(path) * 1000
heappush(heap, (weight, to_x, to_y, path + [(to_x, to_y)]))
visited.add((to_x, to_y))
| 30.248322
| 103
| 0.490126
| 623
| 4,507
| 3.351525
| 0.123596
| 0.030172
| 0.028736
| 0.034483
| 0.824713
| 0.824713
| 0.824713
| 0.824713
| 0.824713
| 0.824713
| 0
| 0.038259
| 0.367872
| 4,507
| 148
| 104
| 30.452703
| 0.69463
| 0.22099
| 0
| 0.810811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067568
| false
| 0
| 0.013514
| 0
| 0.148649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4758f94b26862bbe4d311fbeca5395e4093d6f7b
| 1,282
|
py
|
Python
|
tests/empire/test_empire_scrape.py
|
magnublo/msc-darkweb-scraping
|
7cfb20d9013534d2ad71c388ee59f70b8450773c
|
[
"MIT"
] | null | null | null |
tests/empire/test_empire_scrape.py
|
magnublo/msc-darkweb-scraping
|
7cfb20d9013534d2ad71c388ee59f70b8450773c
|
[
"MIT"
] | null | null | null |
tests/empire/test_empire_scrape.py
|
magnublo/msc-darkweb-scraping
|
7cfb20d9013534d2ad71c388ee59f70b8450773c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from src.empire.empire_scrape import _get_final_quantity_in_stock
class TestGetFinalQuantityInStock(TestCase):
def test_should_return_0(self):
first_quantity_in_stock = 0
second_quantity_in_stock = None
final_quantity_in_stock = _get_final_quantity_in_stock(first_quantity_in_stock, second_quantity_in_stock)
self.assertEqual(0, final_quantity_in_stock)
def test_should_return_12(self):
first_quantity_in_stock = None
second_quantity_in_stock = 12
final_quantity_in_stock = _get_final_quantity_in_stock(first_quantity_in_stock, second_quantity_in_stock)
self.assertEqual(12, final_quantity_in_stock)
def test_should_return_6(self):
first_quantity_in_stock = 6
second_quantity_in_stock = 12
final_quantity_in_stock = _get_final_quantity_in_stock(first_quantity_in_stock, second_quantity_in_stock)
self.assertEqual(6, final_quantity_in_stock)
def test_should_return_none(self):
first_quantity_in_stock = None
second_quantity_in_stock = None
final_quantity_in_stock = _get_final_quantity_in_stock(first_quantity_in_stock, second_quantity_in_stock)
self.assertEqual(None, final_quantity_in_stock)
| 32.871795
| 113
| 0.780031
| 178
| 1,282
| 5
| 0.146067
| 0.325843
| 0.488764
| 0.292135
| 0.85618
| 0.747191
| 0.747191
| 0.747191
| 0.61573
| 0.61573
| 0
| 0.013245
| 0.175507
| 1,282
| 38
| 114
| 33.736842
| 0.828761
| 0
| 0
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.173913
| false
| 0
| 0.086957
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
477402d59483741bc2ba9815642155df7c49a132
| 20,246
|
py
|
Python
|
src/models/srl.py
|
diegma/span-gcn
|
b3abf2950055886d148004ddbc8b7edf71c99420
|
[
"MIT"
] | 9
|
2020-09-28T12:51:22.000Z
|
2021-12-06T03:09:31.000Z
|
src/models/srl.py
|
diegma/span-gcn
|
b3abf2950055886d148004ddbc8b7edf71c99420
|
[
"MIT"
] | 1
|
2021-03-30T08:19:03.000Z
|
2021-03-30T08:19:03.000Z
|
src/models/srl.py
|
diegma/span-gcn
|
b3abf2950055886d148004ddbc8b7edf71c99420
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.autograd import Variable
import numpy as np
from models.gcn import GCNLayer
from models.bilinear_scorer import BilinearScorer
from models.custom_allennlp.stacked_alternating_lstm import StackedAlternatingLstm
from models.custom_allennlp.elmo import Elmo
class SRL(nn.Module):
def __init__(
self,
hidden_dim,
tagset_size,
num_layers,
w_c_vocab_size,
c_c_vocab_size,
use_syntax,
eln,
num_layers_top,
use_elmo,
use_bert,
params,
gpu_id=-1,
):
super(SRL, self).__init__()
if gpu_id > -1:
self.use_gpu = True
else:
self.use_gpu = False
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.num_layers_top = num_layers_top
self.eln = eln
self.use_elmo = use_elmo
self.use_bert = use_bert
self.params = params
self.dropout = nn.Dropout(p=params.gcn_dropout)
self.embedding_dropout = nn.Dropout(p=params.emb_dropout)
if self.use_elmo:
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json"
fixed_dim = 1024
self.elmo = Elmo(
options_file, weight_file, 1, dropout=0, do_layer_norm=False
)
fixed_dim += 100
elif self.use_bert:
fixed_dim = 768
else:
fixed_dim = 100
embedding_dim = self.params.emb_dim
self.indicator_embeddings = nn.Embedding(2, embedding_dim)
self.tagset_size = tagset_size
self.use_syntax = use_syntax
self.num_layers_top = num_layers_top
gcn_type = GCNLayer
if self.params.non_linearity == "relu":
self.non_linearity = nn.ReLU()
elif self.params.non_linearity == "tanh":
self.non_linearity = nn.Tanh()
elif self.params.non_linearity == "leakyrelu":
self.non_linearity = nn.LeakyReLU()
elif self.params.non_linearity == "celu":
self.non_linearity = nn.CELU()
elif self.params.non_linearity == "selu":
self.non_linearity = nn.SELU()
else:
raise NotImplementedError
self.lstm = StackedAlternatingLstm(
fixed_dim + embedding_dim,
hidden_dim,
num_layers=num_layers,
recurrent_dropout_probability=0.1,
)
if self.use_syntax:
if num_layers_top > 0:
self.lstm_top = StackedAlternatingLstm(
hidden_dim,
hidden_dim,
num_layers=num_layers_top,
recurrent_dropout_probability=0.1,
)
self.hidden2predicate = nn.Linear(hidden_dim, hidden_dim)
self.hidden2argument = nn.Linear(hidden_dim, hidden_dim)
self.bilinear_scorer = BilinearScorer(
hidden_dim, tagset_size, params.bilinear_dropout
)
else:
self.hidden2predicate = nn.Linear(hidden_dim, hidden_dim)
self.hidden2argument = nn.Linear(hidden_dim, hidden_dim)
self.bilinear_scorer = BilinearScorer(
hidden_dim, tagset_size, params.bilinear_dropout
)
self.gcn_w_c = gcn_type(
hidden_dim,
hidden_dim,
w_c_vocab_size,
in_arcs=True,
out_arcs=True,
use_gates=True,
batch_first=True,
residual=True,
no_loop=True,
dropout=self.params.gcn_dropout,
non_linearity=self.non_linearity,
edge_dropout=self.params.edge_dropout,
)
self.gcn_c_w = gcn_type(
hidden_dim,
hidden_dim,
w_c_vocab_size,
in_arcs=True,
out_arcs=True,
use_gates=True,
batch_first=True,
residual=True,
no_loop=True,
dropout=self.params.gcn_dropout,
non_linearity=self.non_linearity,
edge_dropout=self.params.edge_dropout,
)
self.gcn_c_c = gcn_type(
hidden_dim,
hidden_dim,
c_c_vocab_size,
in_arcs=True,
out_arcs=True,
use_gates=True,
batch_first=True,
residual=True,
no_loop=False,
dropout=self.params.gcn_dropout,
non_linearity=self.non_linearity,
edge_dropout=self.params.edge_dropout,
)
else:
self.hidden2predicate = nn.Linear(hidden_dim, hidden_dim)
self.hidden2argument = nn.Linear(hidden_dim, hidden_dim)
self.bilinear_scorer = BilinearScorer(
hidden_dim, tagset_size, params.bilinear_dropout
)
if self.eln:
self.layernorm = nn.LayerNorm(fixed_dim)
def forward(
self,
sentence,
predicate_flags,
sent_mask,
lengths,
fixed_embs,
constituents,
GCN_w_c,
GCN_c_w,
GCN_c_c,
mask_const_batch,
predicate_index,
elmo_character_ids,
bert_embs,
):
if self.use_elmo:
embeds = self.elmo(elmo_character_ids)["elmo_representations"][0]
if not self.params.elmo_proj:
embeds = torch.cat([embeds, fixed_embs], dim=2)
elif self.use_bert:
embeds = bert_embs
else:
embeds = fixed_embs
if self.eln:
embeds = self.layernorm(embeds * sent_mask.unsqueeze(2))
embeds = self.embedding_dropout(embeds)
embeds = torch.cat(
(embeds, self.indicator_embeddings(predicate_flags.long())), 2
)
b, t, e = embeds.data.shape
sent_len = torch.sort(lengths, descending=True)[0]
idx_sort = torch.argsort(-lengths)
if self.use_gpu:
embeds = embeds.index_select(
0, Variable(torch.cuda.LongTensor(idx_sort.cuda()))
)
else:
embeds = embeds.index_select(0, Variable(torch.LongTensor(idx_sort)))
packed = pack_padded_sequence(embeds, sent_len, batch_first=True)
lstm_out, _ = self.lstm(packed)
lstm_out, _ = pad_packed_sequence(lstm_out, batch_first=True) # [b, t, h]
# Un-sort by length
idx_unsort = torch.argsort(idx_sort)
if self.use_gpu:
lstm_out = lstm_out.index_select(
0, Variable(torch.cuda.LongTensor(idx_unsort.cuda()))
)
else:
lstm_out = lstm_out.index_select(0, Variable(torch.LongTensor(idx_unsort)))
if self.use_syntax:
# Here I must concatenate the constituents with the lstm_out
gcn_in = torch.cat([lstm_out, constituents], dim=1)
mask_all = torch.cat([sent_mask, mask_const_batch], dim=1)
# Apply graph conv
adj_arc_in_w_c, adj_arc_out_w_c, adj_lab_in_w_c, adj_lab_out_w_c, mask_in_w_c, mask_out_w_c, mask_loop_w_c = (
GCN_w_c
)
adj_arc_in_c_w, adj_arc_out_c_w, adj_lab_in_c_w, adj_lab_out_c_w, mask_in_c_w, mask_out_c_w, mask_loop_c_w = (
GCN_c_w
)
adj_arc_in_c_c, adj_arc_out_c_c, adj_lab_in_c_c, adj_lab_out_c_c, mask_in_c_c, mask_out_c_c, mask_loop_c_c = (
GCN_c_c
)
gcn_out = self.gcn_w_c(
gcn_in,
adj_arc_in_w_c,
adj_arc_out_w_c,
adj_lab_in_w_c,
adj_lab_out_w_c,
mask_in_w_c,
mask_out_w_c,
mask_loop_w_c,
mask_all,
)
gcn_out = self.gcn_c_c(
gcn_out,
adj_arc_in_c_c,
adj_arc_out_c_c,
adj_lab_in_c_c,
adj_lab_out_c_c,
mask_in_c_c,
mask_out_c_c,
mask_loop_c_c,
mask_all,
)
gcn_out = self.gcn_c_w(
gcn_out,
adj_arc_in_c_w,
adj_arc_out_c_w,
adj_lab_in_c_w,
adj_lab_out_c_w,
mask_in_c_w,
mask_out_c_w,
mask_loop_c_w,
mask_all,
)
# Take back the lstm out only
lstm_out = gcn_out.narrow(1, 0, t)
if self.num_layers_top > 0:
if self.use_gpu:
lstm_out = lstm_out.index_select(
0, Variable(torch.cuda.LongTensor(idx_sort.cuda()))
)
else:
lstm_out = lstm_out.index_select(
0, Variable(torch.LongTensor(idx_sort))
)
packed = pack_padded_sequence(lstm_out, sent_len, batch_first=True)
lstm_out_, _ = self.lstm_top(packed)
lstm_out_, _ = pad_packed_sequence(
lstm_out_, batch_first=True
) # [b, t, h]
# Un-sort by length
if self.use_gpu:
lstm_out_ = lstm_out_.index_select(
0, Variable(torch.cuda.LongTensor(idx_unsort.cuda()))
)
else:
lstm_out_ = lstm_out_.index_select(
0, Variable(torch.LongTensor(idx_unsort))
)
lstm_out = lstm_out_
lstm_out_view = lstm_out.contiguous().view(b * t, -1)
predicate_index = predicate_index.view(b * t)
predicates_repr = lstm_out_view.index_select(0, predicate_index).view(b, t, -1)
pred_repr = self.non_linearity(
self.hidden2predicate(self.dropout(predicates_repr))
)
arg_repr = self.non_linearity(self.hidden2argument(self.dropout(lstm_out)))
tag_scores = self.bilinear_scorer(pred_repr, arg_repr) # [b*t, label_size]
return tag_scores.view(b, t, self.tagset_size)
class SRL_Framenet(nn.Module):
def __init__(
self,
hidden_dim,
tagset_size,
num_layers,
w_c_vocab_size,
c_c_vocab_size,
use_syntax,
eln,
num_layers_top,
params,
gpu_id=-1,
):
super(SRL_Framenet, self).__init__()
if gpu_id > -1:
self.use_gpu = True
else:
self.use_gpu = False
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.num_layers_top = num_layers_top
self.eln = eln
self.params = params
self.dropout = nn.Dropout(p=params.gcn_dropout)
fixed_dim = 100
embedding_dim = self.params.emb_dim
self.indicator_embeddings = nn.Embedding(2, embedding_dim)
self.tagset_size = tagset_size
self.use_syntax = use_syntax
self.num_layers_top = num_layers_top
gcn_type = GCNLayer
self.lstm = StackedAlternatingLstm(
fixed_dim + embedding_dim,
hidden_dim,
num_layers=num_layers,
recurrent_dropout_probability=0.1,
)
if self.use_syntax:
if num_layers_top > 0:
if params.alter_top:
self.lstm_top = StackedAlternatingLstm(
hidden_dim,
hidden_dim,
num_layers=num_layers_top,
recurrent_dropout_probability=0.1,
)
self.hidden2predicate = nn.Linear(hidden_dim, hidden_dim)
self.hidden2argument = nn.Linear(hidden_dim, hidden_dim)
self.bilinear_scorer = BilinearScorer(
hidden_dim, tagset_size, params.bilinear_dropout
)
else:
self.lstm_top = nn.LSTM(
hidden_dim,
hidden_dim,
num_layers=num_layers_top,
batch_first=True,
bidirectional=True,
dropout=self.params.gcn_dropout,
)
self.hidden2predicate = nn.Linear(2 * hidden_dim, hidden_dim)
self.hidden2argument = nn.Linear(2 * hidden_dim, hidden_dim)
self.bilinear_scorer = BilinearScorer(
hidden_dim, tagset_size, params.bilinear_dropout
)
else:
self.hidden2predicate = nn.Linear(2 * hidden_dim, hidden_dim)
self.hidden2argument = nn.Linear(2 * hidden_dim, hidden_dim)
self.bilinear_scorer = BilinearScorer(
hidden_dim, tagset_size, params.bilinear_dropout
)
self.gcn_w_c = gcn_type(
hidden_dim,
hidden_dim,
w_c_vocab_size,
in_arcs=True,
out_arcs=True,
use_gates=True,
batch_first=True,
residual=True,
no_loop=True,
dropout=self.params.gcn_dropout,
)
self.gcn_c_w = gcn_type(
hidden_dim,
hidden_dim,
w_c_vocab_size,
in_arcs=True,
out_arcs=True,
use_gates=True,
batch_first=True,
residual=True,
no_loop=True,
dropout=self.params.gcn_dropout,
)
self.gcn_c_c = gcn_type(
hidden_dim,
hidden_dim,
c_c_vocab_size,
in_arcs=True,
out_arcs=True,
use_gates=True,
batch_first=True,
residual=True,
no_loop=False,
dropout=self.params.gcn_dropout,
)
else:
self.hidden2predicate = nn.Linear(hidden_dim, hidden_dim)
self.hidden2argument = nn.Linear(hidden_dim, hidden_dim)
self.bilinear_scorer = BilinearScorer(
hidden_dim, tagset_size, params.bilinear_dropout
)
if self.eln:
self.layernorm = nn.LayerNorm(fixed_dim)
def forward(
self,
sentence,
predicate_flags,
sent_mask,
lengths,
fixed_embs,
constituents,
GCN_w_c,
GCN_c_w,
GCN_c_c,
mask_const_batch,
predicate_index,
softmax_constraints,
frame_emb_batch,
):
embeds = fixed_embs
if self.eln:
embeds = self.layernorm(embeds * sent_mask.unsqueeze(2))
if self.params.emb_dropout:
embeds = self.dropout(embeds)
embeds = torch.cat(
(embeds, self.indicator_embeddings(predicate_flags.long())), 2
)
b, t, e = embeds.data.shape
# Sort by length (keep idx)
sent_len = torch.sort(lengths, descending=True)[0]
idx_sort = torch.argsort(-lengths)
if self.use_gpu:
embeds = embeds.index_select(
0, Variable(torch.cuda.LongTensor(idx_sort.cuda()))
)
else:
embeds = embeds.index_select(0, Variable(torch.LongTensor(idx_sort)))
packed = pack_padded_sequence(embeds, sent_len, batch_first=True)
lstm_out, _ = self.lstm(packed)
lstm_out, _ = pad_packed_sequence(lstm_out, batch_first=True) # [b, t, h]
# Un-sort by length
idx_unsort = np.argsort(idx_sort)
if self.use_gpu:
lstm_out = lstm_out.index_select(
0, Variable(torch.cuda.LongTensor(idx_unsort.cuda()))
)
else:
lstm_out = lstm_out.index_select(0, Variable(torch.LongTensor(idx_unsort)))
if self.use_syntax:
# Here I must concatenate the constituents with the lstm_out
gcn_in = torch.cat([lstm_out, constituents], dim=1)
mask_all = torch.cat([sent_mask, mask_const_batch], dim=1)
# Apply graph conv
adj_arc_in_w_c, adj_arc_out_w_c, adj_lab_in_w_c, adj_lab_out_w_c, mask_in_w_c, mask_out_w_c, mask_loop_w_c = (
GCN_w_c
)
adj_arc_in_c_w, adj_arc_out_c_w, adj_lab_in_c_w, adj_lab_out_c_w, mask_in_c_w, mask_out_c_w, mask_loop_c_w = (
GCN_c_w
)
adj_arc_in_c_c, adj_arc_out_c_c, adj_lab_in_c_c, adj_lab_out_c_c, mask_in_c_c, mask_out_c_c, mask_loop_c_c = (
GCN_c_c
)
gcn_out = self.gcn_w_c(
gcn_in,
adj_arc_in_w_c,
adj_arc_out_w_c,
adj_lab_in_w_c,
adj_lab_out_w_c,
mask_in_w_c,
mask_out_w_c,
mask_loop_w_c,
mask_all,
)
gcn_out = self.gcn_c_c(
gcn_out,
adj_arc_in_c_c,
adj_arc_out_c_c,
adj_lab_in_c_c,
adj_lab_out_c_c,
mask_in_c_c,
mask_out_c_c,
mask_loop_c_c,
mask_all,
)
gcn_out = self.gcn_c_w(
gcn_out,
adj_arc_in_c_w,
adj_arc_out_c_w,
adj_lab_in_c_w,
adj_lab_out_c_w,
mask_in_c_w,
mask_out_c_w,
mask_loop_c_w,
mask_all,
)
# Take back the lstm out only
lstm_out = gcn_out.narrow(1, 0, t)
if self.num_layers_top > 0:
if self.use_gpu:
lstm_out = lstm_out.index_select(
0, Variable(torch.cuda.LongTensor(idx_sort.cuda()))
)
else:
lstm_out = lstm_out.index_select(
0, Variable(torch.LongTensor(idx_sort))
)
packed = pack_padded_sequence(lstm_out, sent_len, batch_first=True)
lstm_out, _ = self.lstm_top(packed)
lstm_out, _ = pad_packed_sequence(
lstm_out, batch_first=True
) # [b, t, h]
# Un-sort by length
if self.use_gpu:
lstm_out = lstm_out.index_select(
0, Variable(torch.cuda.LongTensor(idx_unsort.cuda()))
)
else:
lstm_out = lstm_out.index_select(
0, Variable(torch.LongTensor(idx_unsort))
)
lstm_out_view = lstm_out.contiguous().view(b * t, -1)
predicate_index = predicate_index.view(b * t)
predicates_repr = lstm_out_view.index_select(0, predicate_index).view(b, t, -1)
pred_repr = F.relu(self.hidden2predicate(self.dropout(predicates_repr)))
arg_repr = F.relu(self.hidden2argument(self.dropout(lstm_out)))
tag_scores = self.bilinear_scorer(pred_repr, arg_repr) # [b*t, label_size]
tag_scores = tag_scores.view(b, t, -1)
tag_scores = tag_scores.masked_fill(
(1 - softmax_constraints.view(b, 1, -1)).byte(), float("-1e13")
) # 1e-13)
tag_scores = tag_scores.view(b * t, -1)
return tag_scores.view(b, t, self.tagset_size)
| 32.866883
| 170
| 0.534772
| 2,395
| 20,246
| 4.122756
| 0.08309
| 0.055601
| 0.032813
| 0.045574
| 0.876342
| 0.860036
| 0.855985
| 0.852846
| 0.837249
| 0.82621
| 0
| 0.013941
| 0.390596
| 20,246
| 615
| 171
| 32.920325
| 0.786351
| 0.019164
| 0
| 0.76908
| 0
| 0.003914
| 0.016735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007828
| false
| 0
| 0.019569
| 0
| 0.035225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4777e199f387ab79fe32bc29c1da1101931c482a
| 104,508
|
py
|
Python
|
msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/generated/commands.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/generated/commands.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/generated/commands.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
from msgraph.cli.core.commands import CliCommandType
from azext_usersactions_v1_0.generated._client_factory import (
cf_user_calendar_calendar_view_attachment,
cf_user_calendar_calendar_view_calendar,
cf_user_calendar_calendar_view_instance,
cf_user_calendar_calendar_view,
cf_user_calendar_event_attachment,
cf_user_calendar_event_calendar,
cf_user_calendar_event_instance,
cf_user_calendar_event,
cf_user_calendar,
cf_user_calendar_group_calendar_calendar_view_attachment,
cf_user_calendar_group_calendar_calendar_view_calendar,
cf_user_calendar_group_calendar_calendar_view_instance,
cf_user_calendar_group_calendar_calendar_view,
cf_user_calendar_group_calendar_event_attachment,
cf_user_calendar_group_calendar_event_calendar,
cf_user_calendar_group_calendar_event_instance,
cf_user_calendar_group_calendar_event,
cf_user_calendar_group_calendar,
cf_user_calendar_calendar_view_attachment,
cf_user_calendar_calendar_view_calendar,
cf_user_calendar_calendar_view_instance,
cf_user_calendar_calendar_view,
cf_user_calendar_event_attachment,
cf_user_calendar_event_calendar,
cf_user_calendar_event_instance,
cf_user_calendar_event,
cf_user_calendar,
cf_user_calendar_view_attachment,
cf_user_calendar_view_calendar_calendar_view,
cf_user_calendar_view_calendar_event,
cf_user_calendar_view_calendar,
cf_user_calendar_view_instance,
cf_user_calendar_view,
cf_user_event_attachment,
cf_user_event_calendar_calendar_view,
cf_user_event_calendar_event,
cf_user_event_calendar,
cf_user_event_instance,
cf_user_event,
cf_user_mail_folder_child_folder,
cf_user_mail_folder_message_attachment,
cf_user_mail_folder_message,
cf_user_mail_folder,
cf_user_managed_device,
cf_user_message_attachment,
cf_user_message,
cf_user,
cf_user_onenote_notebook,
cf_user_onenote_notebook_section_group_parent_notebook,
cf_user_onenote_notebook_section_group_section,
cf_user_onenote_notebook_section_group_section_page,
cf_user_onenote_notebook_section_group_section_page_parent_notebook,
cf_user_onenote_notebook_section_group_section_page_parent_section,
cf_user_onenote_notebook_section_group_section_parent_notebook,
cf_user_onenote_notebook_section,
cf_user_onenote_notebook_section_page,
cf_user_onenote_notebook_section_page_parent_notebook,
cf_user_onenote_notebook_section_page_parent_section,
cf_user_onenote_notebook_section_parent_notebook,
cf_user_onenote_notebook_section_parent_section_group_parent_notebook,
cf_user_onenote_notebook_section_parent_section_group_section,
cf_user_onenote_page,
cf_user_onenote_page_parent_notebook,
cf_user_onenote_page_parent_notebook_section_group_parent_notebook,
cf_user_onenote_page_parent_notebook_section_group_section,
cf_user_onenote_page_parent_notebook_section_group_section_page,
cf_user_onenote_page_parent_notebook_section_group_section_parent_notebook,
cf_user_onenote_page_parent_notebook_section,
cf_user_onenote_page_parent_notebook_section_page,
cf_user_onenote_page_parent_notebook_section_parent_notebook,
cf_user_onenote_page_parent_notebook_section_parent_section_group_parent_notebook,
cf_user_onenote_page_parent_notebook_section_parent_section_group_section,
cf_user_onenote_page_parent_section,
cf_user_onenote_page_parent_section_page,
cf_user_onenote_page_parent_section_parent_notebook,
cf_user_onenote_page_parent_section_parent_notebook_section_group_parent_notebook,
cf_user_onenote_page_parent_section_parent_notebook_section_group_section,
cf_user_onenote_page_parent_section_parent_notebook_section,
cf_user_onenote_page_parent_section_parent_section_group_parent_notebook,
cf_user_onenote_page_parent_section_parent_section_group_parent_notebook_section,
cf_user_onenote_page_parent_section_parent_section_group_section,
cf_user_onenote_section_group_parent_notebook,
cf_user_onenote_section_group_parent_notebook_section,
cf_user_onenote_section_group_parent_notebook_section_page,
cf_user_onenote_section_group_parent_notebook_section_page_parent_notebook,
cf_user_onenote_section_group_parent_notebook_section_page_parent_section,
cf_user_onenote_section_group_parent_notebook_section_parent_notebook,
cf_user_onenote_section_group_section,
cf_user_onenote_section_group_section_page,
cf_user_onenote_section_group_section_page_parent_notebook,
cf_user_onenote_section_group_section_page_parent_notebook_section,
cf_user_onenote_section_group_section_page_parent_section,
cf_user_onenote_section_group_section_parent_notebook,
cf_user_onenote_section_group_section_parent_notebook_section,
cf_user_onenote_section,
cf_user_onenote_section_page,
cf_user_onenote_section_page_parent_notebook,
cf_user_onenote_section_page_parent_notebook_section_group_parent_notebook,
cf_user_onenote_section_page_parent_notebook_section_group_section,
cf_user_onenote_section_page_parent_notebook_section,
cf_user_onenote_section_page_parent_section,
cf_user_onenote_section_parent_notebook,
cf_user_onenote_section_parent_notebook_section_group_parent_notebook,
cf_user_onenote_section_parent_notebook_section_group_section,
cf_user_onenote_section_parent_notebook_section,
cf_user_onenote_section_parent_section_group_parent_notebook,
cf_user_onenote_section_parent_section_group_parent_notebook_section,
cf_user_onenote_section_parent_section_group_section,
cf_user_online_meeting,
)
usersactions_v1_0_user_calendar_calendar_view_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_calendar_view_attachments_operations#UsersCalendarCalendarViewAttachmentsOperations.{}',
client_factory=cf_user_calendar_calendar_view_attachment,
)
usersactions_v1_0_user_calendar_calendar_view_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_calendar_view_calendar_operations#UsersCalendarCalendarViewCalendarOperations.{}',
client_factory=cf_user_calendar_calendar_view_calendar,
)
usersactions_v1_0_user_calendar_calendar_view_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_calendar_view_instances_operations#UsersCalendarCalendarViewInstancesOperations.{}',
client_factory=cf_user_calendar_calendar_view_instance,
)
usersactions_v1_0_user_calendar_calendar_view = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_calendar_view_operations#UsersCalendarCalendarViewOperations.{}',
client_factory=cf_user_calendar_calendar_view,
)
usersactions_v1_0_user_calendar_event_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_events_attachments_operations#UsersCalendarEventsAttachmentsOperations.{}',
client_factory=cf_user_calendar_event_attachment,
)
usersactions_v1_0_user_calendar_event_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_events_calendar_operations#UsersCalendarEventsCalendarOperations.{}',
client_factory=cf_user_calendar_event_calendar,
)
usersactions_v1_0_user_calendar_event_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_events_instances_operations#UsersCalendarEventsInstancesOperations.{}',
client_factory=cf_user_calendar_event_instance,
)
usersactions_v1_0_user_calendar_event = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_events_operations#UsersCalendarEventsOperations.{}',
client_factory=cf_user_calendar_event,
)
usersactions_v1_0_user_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_operations#UsersCalendarOperations.{}',
client_factory=cf_user_calendar,
)
usersactions_v1_0_user_calendar_group_calendar_calendar_view_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_calendar_view_attachments_operations#UsersCalendarGroupsCalendarsCalendarViewAttachmentsOperations.{}',
client_factory=cf_user_calendar_group_calendar_calendar_view_attachment,
)
usersactions_v1_0_user_calendar_group_calendar_calendar_view_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_calendar_view_calendar_operations#UsersCalendarGroupsCalendarsCalendarViewCalendarOperations.{}',
client_factory=cf_user_calendar_group_calendar_calendar_view_calendar,
)
usersactions_v1_0_user_calendar_group_calendar_calendar_view_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_calendar_view_instances_operations#UsersCalendarGroupsCalendarsCalendarViewInstancesOperations.{}',
client_factory=cf_user_calendar_group_calendar_calendar_view_instance,
)
usersactions_v1_0_user_calendar_group_calendar_calendar_view = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_calendar_view_operations#UsersCalendarGroupsCalendarsCalendarViewOperations.{}',
client_factory=cf_user_calendar_group_calendar_calendar_view,
)
usersactions_v1_0_user_calendar_group_calendar_event_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_events_attachments_operations#UsersCalendarGroupsCalendarsEventsAttachmentsOperations.{}',
client_factory=cf_user_calendar_group_calendar_event_attachment,
)
usersactions_v1_0_user_calendar_group_calendar_event_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_events_calendar_operations#UsersCalendarGroupsCalendarsEventsCalendarOperations.{}',
client_factory=cf_user_calendar_group_calendar_event_calendar,
)
usersactions_v1_0_user_calendar_group_calendar_event_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_events_instances_operations#UsersCalendarGroupsCalendarsEventsInstancesOperations.{}',
client_factory=cf_user_calendar_group_calendar_event_instance,
)
usersactions_v1_0_user_calendar_group_calendar_event = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_events_operations#UsersCalendarGroupsCalendarsEventsOperations.{}',
client_factory=cf_user_calendar_group_calendar_event,
)
usersactions_v1_0_user_calendar_group_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_groups_calendars_operations#UsersCalendarGroupsCalendarsOperations.{}',
client_factory=cf_user_calendar_group_calendar,
)
usersactions_v1_0_user_calendar_calendar_view_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_calendar_view_attachments_operations#UsersCalendarsCalendarViewAttachmentsOperations.{}',
client_factory=cf_user_calendar_calendar_view_attachment,
)
usersactions_v1_0_user_calendar_calendar_view_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_calendar_view_calendar_operations#UsersCalendarsCalendarViewCalendarOperations.{}',
client_factory=cf_user_calendar_calendar_view_calendar,
)
usersactions_v1_0_user_calendar_calendar_view_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_calendar_view_instances_operations#UsersCalendarsCalendarViewInstancesOperations.{}',
client_factory=cf_user_calendar_calendar_view_instance,
)
usersactions_v1_0_user_calendar_calendar_view = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_calendar_view_operations#UsersCalendarsCalendarViewOperations.{}',
client_factory=cf_user_calendar_calendar_view,
)
usersactions_v1_0_user_calendar_event_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_events_attachments_operations#UsersCalendarsEventsAttachmentsOperations.{}',
client_factory=cf_user_calendar_event_attachment,
)
usersactions_v1_0_user_calendar_event_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_events_calendar_operations#UsersCalendarsEventsCalendarOperations.{}',
client_factory=cf_user_calendar_event_calendar,
)
usersactions_v1_0_user_calendar_event_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_events_instances_operations#UsersCalendarsEventsInstancesOperations.{}',
client_factory=cf_user_calendar_event_instance,
)
usersactions_v1_0_user_calendar_event = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_events_operations#UsersCalendarsEventsOperations.{}',
client_factory=cf_user_calendar_event,
)
usersactions_v1_0_user_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendars_operations#UsersCalendarsOperations.{}',
client_factory=cf_user_calendar,
)
usersactions_v1_0_user_calendar_view_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_view_attachments_operations#UsersCalendarViewAttachmentsOperations.{}',
client_factory=cf_user_calendar_view_attachment,
)
usersactions_v1_0_user_calendar_view_calendar_calendar_view = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_view_calendar_calendar_view_operations#UsersCalendarViewCalendarCalendarViewOperations.{}',
client_factory=cf_user_calendar_view_calendar_calendar_view,
)
usersactions_v1_0_user_calendar_view_calendar_event = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_view_calendar_events_operations#UsersCalendarViewCalendarEventsOperations.{}',
client_factory=cf_user_calendar_view_calendar_event,
)
usersactions_v1_0_user_calendar_view_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_view_calendar_operations#UsersCalendarViewCalendarOperations.{}',
client_factory=cf_user_calendar_view_calendar,
)
usersactions_v1_0_user_calendar_view_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_view_instances_operations#UsersCalendarViewInstancesOperations.{}',
client_factory=cf_user_calendar_view_instance,
)
usersactions_v1_0_user_calendar_view = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_calendar_view_operations#UsersCalendarViewOperations.{}',
client_factory=cf_user_calendar_view,
)
usersactions_v1_0_user_event_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_events_attachments_operations#UsersEventsAttachmentsOperations.{}',
client_factory=cf_user_event_attachment,
)
usersactions_v1_0_user_event_calendar_calendar_view = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_events_calendar_calendar_view_operations#UsersEventsCalendarCalendarViewOperations.{}',
client_factory=cf_user_event_calendar_calendar_view,
)
usersactions_v1_0_user_event_calendar_event = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_events_calendar_events_operations#UsersEventsCalendarEventsOperations.{}',
client_factory=cf_user_event_calendar_event,
)
usersactions_v1_0_user_event_calendar = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_events_calendar_operations#UsersEventsCalendarOperations.{}',
client_factory=cf_user_event_calendar,
)
usersactions_v1_0_user_event_instance = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_events_instances_operations#UsersEventsInstancesOperations.{}',
client_factory=cf_user_event_instance,
)
usersactions_v1_0_user_event = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_events_operations#UsersEventsOperations.{}',
client_factory=cf_user_event,
)
usersactions_v1_0_user_mail_folder_child_folder = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_mail_folders_child_folders_operations#UsersMailFoldersChildFoldersOperations.{}',
client_factory=cf_user_mail_folder_child_folder,
)
usersactions_v1_0_user_mail_folder_message_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_mail_folders_messages_attachments_operations#UsersMailFoldersMessagesAttachmentsOperations.{}',
client_factory=cf_user_mail_folder_message_attachment,
)
usersactions_v1_0_user_mail_folder_message = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_mail_folders_messages_operations#UsersMailFoldersMessagesOperations.{}',
client_factory=cf_user_mail_folder_message,
)
usersactions_v1_0_user_mail_folder = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_mail_folders_operations#UsersMailFoldersOperations.{}',
client_factory=cf_user_mail_folder,
)
usersactions_v1_0_user_managed_device = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_managed_devices_operations#UsersManagedDevicesOperations.{}',
client_factory=cf_user_managed_device,
)
usersactions_v1_0_user_message_attachment = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_messages_attachments_operations#UsersMessagesAttachmentsOperations.{}',
client_factory=cf_user_message_attachment,
)
usersactions_v1_0_user_message = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_messages_operations#UsersMessagesOperations.{}',
client_factory=cf_user_message,
)
usersactions_v1_0_user = CliCommandType(
operations_tmpl=(
'azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_operations#UsersOperations.{}'
),
client_factory=cf_user,
)
usersactions_v1_0_user_onenote_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_operations#UsersOnenoteNotebooksOperations.{}',
client_factory=cf_user_onenote_notebook,
)
usersactions_v1_0_user_onenote_notebook_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_section_groups_parent_notebook_operations#UsersOnenoteNotebooksSectionGroupsParentNotebookOperations.{}',
client_factory=cf_user_onenote_notebook_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_notebook_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_section_groups_sections_operations#UsersOnenoteNotebooksSectionGroupsSectionsOperations.{}',
client_factory=cf_user_onenote_notebook_section_group_section,
)
usersactions_v1_0_user_onenote_notebook_section_group_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_section_groups_sections_pages_operations#UsersOnenoteNotebooksSectionGroupsSectionsPagesOperations.{}',
client_factory=cf_user_onenote_notebook_section_group_section_page,
)
usersactions_v1_0_user_onenote_notebook_section_group_section_page_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_section_groups_sections_pages_parent_notebook_operations#UsersOnenoteNotebooksSectionGroupsSectionsPagesParentNotebookOperations.{}',
client_factory=cf_user_onenote_notebook_section_group_section_page_parent_notebook,
)
usersactions_v1_0_user_onenote_notebook_section_group_section_page_parent_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_section_groups_sections_pages_parent_section_operations#UsersOnenoteNotebooksSectionGroupsSectionsPagesParentSectionOperations.{}',
client_factory=cf_user_onenote_notebook_section_group_section_page_parent_section,
)
usersactions_v1_0_user_onenote_notebook_section_group_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_section_groups_sections_parent_notebook_operations#UsersOnenoteNotebooksSectionGroupsSectionsParentNotebookOperations.{}',
client_factory=cf_user_onenote_notebook_section_group_section_parent_notebook,
)
usersactions_v1_0_user_onenote_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_sections_operations#UsersOnenoteNotebooksSectionsOperations.{}',
client_factory=cf_user_onenote_notebook_section,
)
usersactions_v1_0_user_onenote_notebook_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_sections_pages_operations#UsersOnenoteNotebooksSectionsPagesOperations.{}',
client_factory=cf_user_onenote_notebook_section_page,
)
usersactions_v1_0_user_onenote_notebook_section_page_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_sections_pages_parent_notebook_operations#UsersOnenoteNotebooksSectionsPagesParentNotebookOperations.{}',
client_factory=cf_user_onenote_notebook_section_page_parent_notebook,
)
usersactions_v1_0_user_onenote_notebook_section_page_parent_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_sections_pages_parent_section_operations#UsersOnenoteNotebooksSectionsPagesParentSectionOperations.{}',
client_factory=cf_user_onenote_notebook_section_page_parent_section,
)
usersactions_v1_0_user_onenote_notebook_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_sections_parent_notebook_operations#UsersOnenoteNotebooksSectionsParentNotebookOperations.{}',
client_factory=cf_user_onenote_notebook_section_parent_notebook,
)
usersactions_v1_0_user_onenote_notebook_section_parent_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_sections_parent_section_group_parent_notebook_operations#UsersOnenoteNotebooksSectionsParentSectionGroupParentNotebookOperations.{}',
client_factory=cf_user_onenote_notebook_section_parent_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_notebook_section_parent_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_notebooks_sections_parent_section_group_sections_operations#UsersOnenoteNotebooksSectionsParentSectionGroupSectionsOperations.{}',
client_factory=cf_user_onenote_notebook_section_parent_section_group_section,
)
usersactions_v1_0_user_onenote_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_operations#UsersOnenotePagesOperations.{}',
client_factory=cf_user_onenote_page,
)
usersactions_v1_0_user_onenote_page_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_operations#UsersOnenotePagesParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_section_groups_parent_notebook_operations#UsersOnenotePagesParentNotebookSectionGroupsParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_section_groups_sections_operations#UsersOnenotePagesParentNotebookSectionGroupsSectionsOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_group_section,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_section_groups_sections_pages_operations#UsersOnenotePagesParentNotebookSectionGroupsSectionsPagesOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_group_section_page,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_section_groups_sections_parent_notebook_operations#UsersOnenotePagesParentNotebookSectionGroupsSectionsParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_group_section_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_sections_operations#UsersOnenotePagesParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_sections_pages_operations#UsersOnenotePagesParentNotebookSectionsPagesOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_page,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_sections_parent_notebook_operations#UsersOnenotePagesParentNotebookSectionsParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_parent_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_sections_parent_section_group_parent_notebook_operations#UsersOnenotePagesParentNotebookSectionsParentSectionGroupParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_parent_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_notebook_section_parent_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_notebook_sections_parent_section_group_sections_operations#UsersOnenotePagesParentNotebookSectionsParentSectionGroupSectionsOperations.{}',
client_factory=cf_user_onenote_page_parent_notebook_section_parent_section_group_section,
)
usersactions_v1_0_user_onenote_page_parent_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_operations#UsersOnenotePagesParentSectionOperations.{}',
client_factory=cf_user_onenote_page_parent_section,
)
usersactions_v1_0_user_onenote_page_parent_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_pages_operations#UsersOnenotePagesParentSectionPagesOperations.{}',
client_factory=cf_user_onenote_page_parent_section_page,
)
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_parent_notebook_operations#UsersOnenotePagesParentSectionParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_section_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_parent_notebook_section_groups_parent_notebook_operations#UsersOnenotePagesParentSectionParentNotebookSectionGroupsParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_section_parent_notebook_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_parent_notebook_section_groups_sections_operations#UsersOnenotePagesParentSectionParentNotebookSectionGroupsSectionsOperations.{}',
client_factory=cf_user_onenote_page_parent_section_parent_notebook_section_group_section,
)
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_parent_notebook_sections_operations#UsersOnenotePagesParentSectionParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_page_parent_section_parent_notebook_section,
)
usersactions_v1_0_user_onenote_page_parent_section_parent_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_parent_section_group_parent_notebook_operations#UsersOnenotePagesParentSectionParentSectionGroupParentNotebookOperations.{}',
client_factory=cf_user_onenote_page_parent_section_parent_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_page_parent_section_parent_section_group_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_parent_section_group_parent_notebook_sections_operations#UsersOnenotePagesParentSectionParentSectionGroupParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_page_parent_section_parent_section_group_parent_notebook_section,
)
usersactions_v1_0_user_onenote_page_parent_section_parent_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_pages_parent_section_parent_section_group_sections_operations#UsersOnenotePagesParentSectionParentSectionGroupSectionsOperations.{}',
client_factory=cf_user_onenote_page_parent_section_parent_section_group_section,
)
usersactions_v1_0_user_onenote_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_parent_notebook_operations#UsersOnenoteSectionGroupsParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_section_group_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_parent_notebook_sections_operations#UsersOnenoteSectionGroupsParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_section_group_parent_notebook_section,
)
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_parent_notebook_sections_pages_operations#UsersOnenoteSectionGroupsParentNotebookSectionsPagesOperations.{}',
client_factory=cf_user_onenote_section_group_parent_notebook_section_page,
)
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_page_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_parent_notebook_sections_pages_parent_notebook_operations#UsersOnenoteSectionGroupsParentNotebookSectionsPagesParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_group_parent_notebook_section_page_parent_notebook,
)
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_page_parent_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_parent_notebook_sections_pages_parent_section_operations#UsersOnenoteSectionGroupsParentNotebookSectionsPagesParentSectionOperations.{}',
client_factory=cf_user_onenote_section_group_parent_notebook_section_page_parent_section,
)
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_parent_notebook_sections_parent_notebook_operations#UsersOnenoteSectionGroupsParentNotebookSectionsParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_group_parent_notebook_section_parent_notebook,
)
usersactions_v1_0_user_onenote_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_sections_operations#UsersOnenoteSectionGroupsSectionsOperations.{}',
client_factory=cf_user_onenote_section_group_section,
)
usersactions_v1_0_user_onenote_section_group_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_sections_pages_operations#UsersOnenoteSectionGroupsSectionsPagesOperations.{}',
client_factory=cf_user_onenote_section_group_section_page,
)
usersactions_v1_0_user_onenote_section_group_section_page_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_sections_pages_parent_notebook_operations#UsersOnenoteSectionGroupsSectionsPagesParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_group_section_page_parent_notebook,
)
usersactions_v1_0_user_onenote_section_group_section_page_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_sections_pages_parent_notebook_sections_operations#UsersOnenoteSectionGroupsSectionsPagesParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_section_group_section_page_parent_notebook_section,
)
usersactions_v1_0_user_onenote_section_group_section_page_parent_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_sections_pages_parent_section_operations#UsersOnenoteSectionGroupsSectionsPagesParentSectionOperations.{}',
client_factory=cf_user_onenote_section_group_section_page_parent_section,
)
usersactions_v1_0_user_onenote_section_group_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_sections_parent_notebook_operations#UsersOnenoteSectionGroupsSectionsParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_group_section_parent_notebook,
)
usersactions_v1_0_user_onenote_section_group_section_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_section_groups_sections_parent_notebook_sections_operations#UsersOnenoteSectionGroupsSectionsParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_section_group_section_parent_notebook_section,
)
usersactions_v1_0_user_onenote_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_operations#UsersOnenoteSectionsOperations.{}',
client_factory=cf_user_onenote_section,
)
usersactions_v1_0_user_onenote_section_page = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_pages_operations#UsersOnenoteSectionsPagesOperations.{}',
client_factory=cf_user_onenote_section_page,
)
usersactions_v1_0_user_onenote_section_page_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_pages_parent_notebook_operations#UsersOnenoteSectionsPagesParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_page_parent_notebook,
)
usersactions_v1_0_user_onenote_section_page_parent_notebook_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_pages_parent_notebook_section_groups_parent_notebook_operations#UsersOnenoteSectionsPagesParentNotebookSectionGroupsParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_page_parent_notebook_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_section_page_parent_notebook_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_pages_parent_notebook_section_groups_sections_operations#UsersOnenoteSectionsPagesParentNotebookSectionGroupsSectionsOperations.{}',
client_factory=cf_user_onenote_section_page_parent_notebook_section_group_section,
)
usersactions_v1_0_user_onenote_section_page_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_pages_parent_notebook_sections_operations#UsersOnenoteSectionsPagesParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_section_page_parent_notebook_section,
)
usersactions_v1_0_user_onenote_section_page_parent_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_pages_parent_section_operations#UsersOnenoteSectionsPagesParentSectionOperations.{}',
client_factory=cf_user_onenote_section_page_parent_section,
)
usersactions_v1_0_user_onenote_section_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_parent_notebook_operations#UsersOnenoteSectionsParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_parent_notebook,
)
usersactions_v1_0_user_onenote_section_parent_notebook_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_parent_notebook_section_groups_parent_notebook_operations#UsersOnenoteSectionsParentNotebookSectionGroupsParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_parent_notebook_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_section_parent_notebook_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_parent_notebook_section_groups_sections_operations#UsersOnenoteSectionsParentNotebookSectionGroupsSectionsOperations.{}',
client_factory=cf_user_onenote_section_parent_notebook_section_group_section,
)
usersactions_v1_0_user_onenote_section_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_parent_notebook_sections_operations#UsersOnenoteSectionsParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_section_parent_notebook_section,
)
usersactions_v1_0_user_onenote_section_parent_section_group_parent_notebook = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_parent_section_group_parent_notebook_operations#UsersOnenoteSectionsParentSectionGroupParentNotebookOperations.{}',
client_factory=cf_user_onenote_section_parent_section_group_parent_notebook,
)
usersactions_v1_0_user_onenote_section_parent_section_group_parent_notebook_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_parent_section_group_parent_notebook_sections_operations#UsersOnenoteSectionsParentSectionGroupParentNotebookSectionsOperations.{}',
client_factory=cf_user_onenote_section_parent_section_group_parent_notebook_section,
)
usersactions_v1_0_user_onenote_section_parent_section_group_section = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_onenote_sections_parent_section_group_sections_operations#UsersOnenoteSectionsParentSectionGroupSectionsOperations.{}',
client_factory=cf_user_onenote_section_parent_section_group_section,
)
usersactions_v1_0_user_online_meeting = CliCommandType(
operations_tmpl='azext_usersactions_v1_0.vendored_sdks.usersactions.operations._users_online_meetings_operations#UsersOnlineMeetingsOperations.{}',
client_factory=cf_user_online_meeting,
)
def load_command_table(self, _):
with self.command_group(
'usersactions user-calendar-calendar-view-attachment',
usersactions_v1_0_user_calendar_calendar_view_attachment,
client_factory=cf_user_calendar_calendar_view_attachment,
) as g:
g.custom_command(
'create-upload-session', 'usersactions_user_calendar_calendar_view_attachment_create_upload_session'
)
with self.command_group(
'usersactions user-calendar-calendar-view-calendar',
usersactions_v1_0_user_calendar_calendar_view_calendar,
client_factory=cf_user_calendar_calendar_view_calendar,
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_calendar_view_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-calendar-view-instance',
usersactions_v1_0_user_calendar_calendar_view_instance,
client_factory=cf_user_calendar_calendar_view_instance,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_calendar_view_instance_accept')
g.custom_command('decline', 'usersactions_user_calendar_calendar_view_instance_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_calendar_view_instance_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_calendar_view_instance_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_calendar_view_instance_tentatively_accept')
with self.command_group(
'usersactions user-calendar-calendar-view',
usersactions_v1_0_user_calendar_calendar_view,
client_factory=cf_user_calendar_calendar_view,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_calendar_view_accept')
g.custom_command('decline', 'usersactions_user_calendar_calendar_view_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_calendar_view_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_calendar_view_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_calendar_view_tentatively_accept')
with self.command_group(
'usersactions user-calendar-event-attachment',
usersactions_v1_0_user_calendar_event_attachment,
client_factory=cf_user_calendar_event_attachment,
) as g:
g.custom_command('create-upload-session', 'usersactions_user_calendar_event_attachment_create_upload_session')
with self.command_group(
'usersactions user-calendar-event-calendar',
usersactions_v1_0_user_calendar_event_calendar,
client_factory=cf_user_calendar_event_calendar,
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_event_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-event-instance',
usersactions_v1_0_user_calendar_event_instance,
client_factory=cf_user_calendar_event_instance,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_event_instance_accept')
g.custom_command('decline', 'usersactions_user_calendar_event_instance_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_event_instance_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_event_instance_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_event_instance_tentatively_accept')
with self.command_group(
'usersactions user-calendar-event', usersactions_v1_0_user_calendar_event, client_factory=cf_user_calendar_event
) as g:
g.custom_command('accept', 'usersactions_user_calendar_event_accept')
g.custom_command('decline', 'usersactions_user_calendar_event_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_event_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_event_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_event_tentatively_accept')
with self.command_group(
'usersactions user-calendar', usersactions_v1_0_user_calendar, client_factory=cf_user_calendar
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-group-calendar-calendar-view-attachment',
usersactions_v1_0_user_calendar_group_calendar_calendar_view_attachment,
client_factory=cf_user_calendar_group_calendar_calendar_view_attachment,
) as g:
g.custom_command(
'create-upload-session',
'usersactions_user_calendar_group_calendar_calendar_view_attachment_create_upload_session',
)
with self.command_group(
'usersactions user-calendar-group-calendar-calendar-view-calendar',
usersactions_v1_0_user_calendar_group_calendar_calendar_view_calendar,
client_factory=cf_user_calendar_group_calendar_calendar_view_calendar,
) as g:
g.custom_command(
'get-schedule', 'usersactions_user_calendar_group_calendar_calendar_view_calendar_get_schedule'
)
with self.command_group(
'usersactions user-calendar-group-calendar-calendar-view-instance',
usersactions_v1_0_user_calendar_group_calendar_calendar_view_instance,
client_factory=cf_user_calendar_group_calendar_calendar_view_instance,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_group_calendar_calendar_view_instance_accept')
g.custom_command('decline', 'usersactions_user_calendar_group_calendar_calendar_view_instance_decline')
g.custom_command(
'dismiss-reminder', 'usersactions_user_calendar_group_calendar_calendar_view_instance_dismiss_reminder'
)
g.custom_command(
'snooze-reminder', 'usersactions_user_calendar_group_calendar_calendar_view_instance_snooze_reminder'
)
g.custom_command(
'tentatively-accept', 'usersactions_user_calendar_group_calendar_calendar_view_instance_tentatively_accept'
)
with self.command_group(
'usersactions user-calendar-group-calendar-calendar-view',
usersactions_v1_0_user_calendar_group_calendar_calendar_view,
client_factory=cf_user_calendar_group_calendar_calendar_view,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_group_calendar_calendar_view_accept')
g.custom_command('decline', 'usersactions_user_calendar_group_calendar_calendar_view_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_group_calendar_calendar_view_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_group_calendar_calendar_view_snooze_reminder')
g.custom_command(
'tentatively-accept', 'usersactions_user_calendar_group_calendar_calendar_view_tentatively_accept'
)
with self.command_group(
'usersactions user-calendar-group-calendar-event-attachment',
usersactions_v1_0_user_calendar_group_calendar_event_attachment,
client_factory=cf_user_calendar_group_calendar_event_attachment,
) as g:
g.custom_command(
'create-upload-session', 'usersactions_user_calendar_group_calendar_event_attachment_create_upload_session'
)
with self.command_group(
'usersactions user-calendar-group-calendar-event-calendar',
usersactions_v1_0_user_calendar_group_calendar_event_calendar,
client_factory=cf_user_calendar_group_calendar_event_calendar,
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_group_calendar_event_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-group-calendar-event-instance',
usersactions_v1_0_user_calendar_group_calendar_event_instance,
client_factory=cf_user_calendar_group_calendar_event_instance,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_group_calendar_event_instance_accept')
g.custom_command('decline', 'usersactions_user_calendar_group_calendar_event_instance_decline')
g.custom_command(
'dismiss-reminder', 'usersactions_user_calendar_group_calendar_event_instance_dismiss_reminder'
)
g.custom_command('snooze-reminder', 'usersactions_user_calendar_group_calendar_event_instance_snooze_reminder')
g.custom_command(
'tentatively-accept', 'usersactions_user_calendar_group_calendar_event_instance_tentatively_accept'
)
with self.command_group(
'usersactions user-calendar-group-calendar-event',
usersactions_v1_0_user_calendar_group_calendar_event,
client_factory=cf_user_calendar_group_calendar_event,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_group_calendar_event_accept')
g.custom_command('decline', 'usersactions_user_calendar_group_calendar_event_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_group_calendar_event_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_group_calendar_event_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_group_calendar_event_tentatively_accept')
with self.command_group(
'usersactions user-calendar-group-calendar',
usersactions_v1_0_user_calendar_group_calendar,
client_factory=cf_user_calendar_group_calendar,
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_group_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-calendar-view-attachment',
usersactions_v1_0_user_calendar_calendar_view_attachment,
client_factory=cf_user_calendar_calendar_view_attachment,
) as g:
g.custom_command(
'create-upload-session', 'usersactions_user_calendar_calendar_view_attachment_create_upload_session'
)
with self.command_group(
'usersactions user-calendar-calendar-view-calendar',
usersactions_v1_0_user_calendar_calendar_view_calendar,
client_factory=cf_user_calendar_calendar_view_calendar,
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_calendar_view_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-calendar-view-instance',
usersactions_v1_0_user_calendar_calendar_view_instance,
client_factory=cf_user_calendar_calendar_view_instance,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_calendar_view_instance_accept')
g.custom_command('decline', 'usersactions_user_calendar_calendar_view_instance_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_calendar_view_instance_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_calendar_view_instance_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_calendar_view_instance_tentatively_accept')
with self.command_group(
'usersactions user-calendar-calendar-view',
usersactions_v1_0_user_calendar_calendar_view,
client_factory=cf_user_calendar_calendar_view,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_calendar_view_accept')
g.custom_command('decline', 'usersactions_user_calendar_calendar_view_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_calendar_view_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_calendar_view_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_calendar_view_tentatively_accept')
with self.command_group(
'usersactions user-calendar-event-attachment',
usersactions_v1_0_user_calendar_event_attachment,
client_factory=cf_user_calendar_event_attachment,
) as g:
g.custom_command('create-upload-session', 'usersactions_user_calendar_event_attachment_create_upload_session')
with self.command_group(
'usersactions user-calendar-event-calendar',
usersactions_v1_0_user_calendar_event_calendar,
client_factory=cf_user_calendar_event_calendar,
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_event_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-event-instance',
usersactions_v1_0_user_calendar_event_instance,
client_factory=cf_user_calendar_event_instance,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_event_instance_accept')
g.custom_command('decline', 'usersactions_user_calendar_event_instance_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_event_instance_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_event_instance_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_event_instance_tentatively_accept')
with self.command_group(
'usersactions user-calendar-event', usersactions_v1_0_user_calendar_event, client_factory=cf_user_calendar_event
) as g:
g.custom_command('accept', 'usersactions_user_calendar_event_accept')
g.custom_command('decline', 'usersactions_user_calendar_event_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_event_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_event_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_event_tentatively_accept')
with self.command_group(
'usersactions user-calendar', usersactions_v1_0_user_calendar, client_factory=cf_user_calendar
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-view-attachment',
usersactions_v1_0_user_calendar_view_attachment,
client_factory=cf_user_calendar_view_attachment,
) as g:
g.custom_command('create-upload-session', 'usersactions_user_calendar_view_attachment_create_upload_session')
with self.command_group(
'usersactions user-calendar-view-calendar-calendar-view',
usersactions_v1_0_user_calendar_view_calendar_calendar_view,
client_factory=cf_user_calendar_view_calendar_calendar_view,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_view_calendar_calendar_view_accept')
g.custom_command('decline', 'usersactions_user_calendar_view_calendar_calendar_view_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_view_calendar_calendar_view_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_view_calendar_calendar_view_snooze_reminder')
g.custom_command(
'tentatively-accept', 'usersactions_user_calendar_view_calendar_calendar_view_tentatively_accept'
)
with self.command_group(
'usersactions user-calendar-view-calendar-event',
usersactions_v1_0_user_calendar_view_calendar_event,
client_factory=cf_user_calendar_view_calendar_event,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_view_calendar_event_accept')
g.custom_command('decline', 'usersactions_user_calendar_view_calendar_event_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_view_calendar_event_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_view_calendar_event_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_view_calendar_event_tentatively_accept')
with self.command_group(
'usersactions user-calendar-view-calendar',
usersactions_v1_0_user_calendar_view_calendar,
client_factory=cf_user_calendar_view_calendar,
) as g:
g.custom_command('get-schedule', 'usersactions_user_calendar_view_calendar_get_schedule')
with self.command_group(
'usersactions user-calendar-view-instance',
usersactions_v1_0_user_calendar_view_instance,
client_factory=cf_user_calendar_view_instance,
) as g:
g.custom_command('accept', 'usersactions_user_calendar_view_instance_accept')
g.custom_command('decline', 'usersactions_user_calendar_view_instance_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_view_instance_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_view_instance_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_view_instance_tentatively_accept')
with self.command_group(
'usersactions user-calendar-view', usersactions_v1_0_user_calendar_view, client_factory=cf_user_calendar_view
) as g:
g.custom_command('accept', 'usersactions_user_calendar_view_accept')
g.custom_command('decline', 'usersactions_user_calendar_view_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_calendar_view_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_calendar_view_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_calendar_view_tentatively_accept')
with self.command_group(
'usersactions user-event-attachment',
usersactions_v1_0_user_event_attachment,
client_factory=cf_user_event_attachment,
) as g:
g.custom_command('create-upload-session', 'usersactions_user_event_attachment_create_upload_session')
with self.command_group(
'usersactions user-event-calendar-calendar-view',
usersactions_v1_0_user_event_calendar_calendar_view,
client_factory=cf_user_event_calendar_calendar_view,
) as g:
g.custom_command('accept', 'usersactions_user_event_calendar_calendar_view_accept')
g.custom_command('decline', 'usersactions_user_event_calendar_calendar_view_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_event_calendar_calendar_view_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_event_calendar_calendar_view_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_event_calendar_calendar_view_tentatively_accept')
with self.command_group(
'usersactions user-event-calendar-event',
usersactions_v1_0_user_event_calendar_event,
client_factory=cf_user_event_calendar_event,
) as g:
g.custom_command('accept', 'usersactions_user_event_calendar_event_accept')
g.custom_command('decline', 'usersactions_user_event_calendar_event_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_event_calendar_event_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_event_calendar_event_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_event_calendar_event_tentatively_accept')
with self.command_group(
'usersactions user-event-calendar', usersactions_v1_0_user_event_calendar, client_factory=cf_user_event_calendar
) as g:
g.custom_command('get-schedule', 'usersactions_user_event_calendar_get_schedule')
with self.command_group(
'usersactions user-event-instance', usersactions_v1_0_user_event_instance, client_factory=cf_user_event_instance
) as g:
g.custom_command('accept', 'usersactions_user_event_instance_accept')
g.custom_command('decline', 'usersactions_user_event_instance_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_event_instance_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_event_instance_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_event_instance_tentatively_accept')
with self.command_group('usersactions user-event', usersactions_v1_0_user_event, client_factory=cf_user_event) as g:
g.custom_command('accept', 'usersactions_user_event_accept')
g.custom_command('decline', 'usersactions_user_event_decline')
g.custom_command('dismiss-reminder', 'usersactions_user_event_dismiss_reminder')
g.custom_command('snooze-reminder', 'usersactions_user_event_snooze_reminder')
g.custom_command('tentatively-accept', 'usersactions_user_event_tentatively_accept')
with self.command_group(
'usersactions user-mail-folder-child-folder',
usersactions_v1_0_user_mail_folder_child_folder,
client_factory=cf_user_mail_folder_child_folder,
) as g:
g.custom_command('copy', 'usersactions_user_mail_folder_child_folder_copy')
g.custom_command('move', 'usersactions_user_mail_folder_child_folder_move')
with self.command_group(
'usersactions user-mail-folder-message-attachment',
usersactions_v1_0_user_mail_folder_message_attachment,
client_factory=cf_user_mail_folder_message_attachment,
) as g:
g.custom_command(
'create-upload-session', 'usersactions_user_mail_folder_message_attachment_create_upload_session'
)
with self.command_group(
'usersactions user-mail-folder-message',
usersactions_v1_0_user_mail_folder_message,
client_factory=cf_user_mail_folder_message,
) as g:
g.custom_command('copy', 'usersactions_user_mail_folder_message_copy')
g.custom_command('create-forward', 'usersactions_user_mail_folder_message_create_forward')
g.custom_command('create-reply', 'usersactions_user_mail_folder_message_create_reply')
g.custom_command('create-reply-all', 'usersactions_user_mail_folder_message_create_reply_all')
g.custom_command('forward', 'usersactions_user_mail_folder_message_forward')
g.custom_command('move', 'usersactions_user_mail_folder_message_move')
g.custom_command('reply', 'usersactions_user_mail_folder_message_reply')
g.custom_command('reply-all', 'usersactions_user_mail_folder_message_reply_all')
g.custom_command('send', 'usersactions_user_mail_folder_message_send')
with self.command_group(
'usersactions user-mail-folder', usersactions_v1_0_user_mail_folder, client_factory=cf_user_mail_folder
) as g:
g.custom_command('copy', 'usersactions_user_mail_folder_copy')
g.custom_command('move', 'usersactions_user_mail_folder_move')
with self.command_group(
'usersactions user-managed-device', usersactions_v1_0_user_managed_device, client_factory=cf_user_managed_device
) as g:
g.custom_command('bypass-activation-lock', 'usersactions_user_managed_device_bypass_activation_lock')
g.custom_command('clean-window-device', 'usersactions_user_managed_device_clean_window_device')
g.custom_command(
'delete-user-from-shared-apple-device',
'usersactions_user_managed_device_delete_user_from_shared_apple_device',
)
g.custom_command('disable-lost-mode', 'usersactions_user_managed_device_disable_lost_mode')
g.custom_command('locate-device', 'usersactions_user_managed_device_locate_device')
g.custom_command(
'logout-shared-apple-device-active-user',
'usersactions_user_managed_device_logout_shared_apple_device_active_user',
)
g.custom_command('reboot-now', 'usersactions_user_managed_device_reboot_now')
g.custom_command('recover-passcode', 'usersactions_user_managed_device_recover_passcode')
g.custom_command('remote-lock', 'usersactions_user_managed_device_remote_lock')
g.custom_command('request-remote-assistance', 'usersactions_user_managed_device_request_remote_assistance')
g.custom_command('reset-passcode', 'usersactions_user_managed_device_reset_passcode')
g.custom_command('retire', 'usersactions_user_managed_device_retire')
g.custom_command('shut-down', 'usersactions_user_managed_device_shut_down')
g.custom_command('sync-device', 'usersactions_user_managed_device_sync_device')
g.custom_command(
'update-window-device-account', 'usersactions_user_managed_device_update_window_device_account'
)
g.custom_command('window-defender-scan', 'usersactions_user_managed_device_window_defender_scan')
g.custom_command(
'window-defender-update-signature', 'usersactions_user_managed_device_window_defender_update_signature'
)
g.custom_command('wipe', 'usersactions_user_managed_device_wipe')
with self.command_group(
'usersactions user-message-attachment',
usersactions_v1_0_user_message_attachment,
client_factory=cf_user_message_attachment,
) as g:
g.custom_command('create-upload-session', 'usersactions_user_message_attachment_create_upload_session')
with self.command_group(
'usersactions user-message', usersactions_v1_0_user_message, client_factory=cf_user_message
) as g:
g.custom_command('copy', 'usersactions_user_message_copy')
g.custom_command('create-forward', 'usersactions_user_message_create_forward')
g.custom_command('create-reply', 'usersactions_user_message_create_reply')
g.custom_command('create-reply-all', 'usersactions_user_message_create_reply_all')
g.custom_command('forward', 'usersactions_user_message_forward')
g.custom_command('move', 'usersactions_user_message_move')
g.custom_command('reply', 'usersactions_user_message_reply')
g.custom_command('reply-all', 'usersactions_user_message_reply_all')
g.custom_command('send', 'usersactions_user_message_send')
with self.command_group('usersactions user', usersactions_v1_0_user, client_factory=cf_user) as g:
g.custom_command('assign-license', 'usersactions_user_assign_license')
g.custom_command('change-password', 'usersactions_user_change_password')
g.custom_command('check-member-group', 'usersactions_user_check_member_group')
g.custom_command('check-member-object', 'usersactions_user_check_member_object')
g.custom_command('export-personal-data', 'usersactions_user_export_personal_data')
g.custom_command('find-meeting-time', 'usersactions_user_find_meeting_time')
g.custom_command('get-available-extension-property', 'usersactions_user_get_available_extension_property')
g.custom_command('get-by-id', 'usersactions_user_get_by_id')
g.custom_command('get-mail-tip', 'usersactions_user_get_mail_tip')
g.custom_command('get-member-group', 'usersactions_user_get_member_group')
g.custom_command('get-member-object', 'usersactions_user_get_member_object')
g.custom_command('remove-all-device-from-management', 'usersactions_user_remove_all_device_from_management')
g.custom_command('reprocess-license-assignment', 'usersactions_user_reprocess_license_assignment')
g.custom_command('restore', 'usersactions_user_restore')
g.custom_command('revoke-sign-in-session', 'usersactions_user_revoke_sign_in_session')
g.custom_command('send-mail', 'usersactions_user_send_mail')
g.custom_command('translate-exchange-id', 'usersactions_user_translate_exchange_id')
g.custom_command('validate-property', 'usersactions_user_validate_property')
g.custom_command(
'wipe-managed-app-registration-by-device-tag',
'usersactions_user_wipe_managed_app_registration_by_device_tag',
)
with self.command_group(
'usersactions user-onenote-notebook',
usersactions_v1_0_user_onenote_notebook,
client_factory=cf_user_onenote_notebook,
) as g:
g.custom_command('copy-notebook', 'usersactions_user_onenote_notebook_copy_notebook')
g.custom_command('get-notebook-from-web-url', 'usersactions_user_onenote_notebook_get_notebook_from_web_url')
with self.command_group(
'usersactions user-onenote-notebook-section-group-parent-notebook',
usersactions_v1_0_user_onenote_notebook_section_group_parent_notebook,
client_factory=cf_user_onenote_notebook_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook', 'usersactions_user_onenote_notebook_section_group_parent_notebook_copy_notebook'
)
with self.command_group(
'usersactions user-onenote-notebook-section-group-section',
usersactions_v1_0_user_onenote_notebook_section_group_section,
client_factory=cf_user_onenote_notebook_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_notebook_section_group_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group', 'usersactions_user_onenote_notebook_section_group_section_copy_to_section_group'
)
with self.command_group(
'usersactions user-onenote-notebook-section-group-section-page',
usersactions_v1_0_user_onenote_notebook_section_group_section_page,
client_factory=cf_user_onenote_notebook_section_group_section_page,
) as g:
g.custom_command(
'copy-to-section', 'usersactions_user_onenote_notebook_section_group_section_page_copy_to_section'
)
g.custom_command(
'onenote-patch-content',
'usersactions_user_onenote_notebook_section_group_section_page_onenote_patch_content',
)
with self.command_group(
'usersactions user-onenote-notebook-section-group-section-page-parent-notebook',
usersactions_v1_0_user_onenote_notebook_section_group_section_page_parent_notebook,
client_factory=cf_user_onenote_notebook_section_group_section_page_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_notebook_section_group_section_page_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-notebook-section-group-section-page-parent-section',
usersactions_v1_0_user_onenote_notebook_section_group_section_page_parent_section,
client_factory=cf_user_onenote_notebook_section_group_section_page_parent_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_notebook_section_group_section_page_parent_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_notebook_section_group_section_page_parent_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-notebook-section-group-section-parent-notebook',
usersactions_v1_0_user_onenote_notebook_section_group_section_parent_notebook,
client_factory=cf_user_onenote_notebook_section_group_section_parent_notebook,
) as g:
g.custom_command(
'copy-notebook', 'usersactions_user_onenote_notebook_section_group_section_parent_notebook_copy_notebook'
)
with self.command_group(
'usersactions user-onenote-notebook-section',
usersactions_v1_0_user_onenote_notebook_section,
client_factory=cf_user_onenote_notebook_section,
) as g:
g.custom_command('copy-to-notebook', 'usersactions_user_onenote_notebook_section_copy_to_notebook')
g.custom_command('copy-to-section-group', 'usersactions_user_onenote_notebook_section_copy_to_section_group')
with self.command_group(
'usersactions user-onenote-notebook-section-page',
usersactions_v1_0_user_onenote_notebook_section_page,
client_factory=cf_user_onenote_notebook_section_page,
) as g:
g.custom_command('copy-to-section', 'usersactions_user_onenote_notebook_section_page_copy_to_section')
g.custom_command(
'onenote-patch-content', 'usersactions_user_onenote_notebook_section_page_onenote_patch_content'
)
with self.command_group(
'usersactions user-onenote-notebook-section-page-parent-notebook',
usersactions_v1_0_user_onenote_notebook_section_page_parent_notebook,
client_factory=cf_user_onenote_notebook_section_page_parent_notebook,
) as g:
g.custom_command(
'copy-notebook', 'usersactions_user_onenote_notebook_section_page_parent_notebook_copy_notebook'
)
with self.command_group(
'usersactions user-onenote-notebook-section-page-parent-section',
usersactions_v1_0_user_onenote_notebook_section_page_parent_section,
client_factory=cf_user_onenote_notebook_section_page_parent_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_notebook_section_page_parent_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_notebook_section_page_parent_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-notebook-section-parent-notebook',
usersactions_v1_0_user_onenote_notebook_section_parent_notebook,
client_factory=cf_user_onenote_notebook_section_parent_notebook,
) as g:
g.custom_command('copy-notebook', 'usersactions_user_onenote_notebook_section_parent_notebook_copy_notebook')
with self.command_group(
'usersactions user-onenote-notebook-section-parent-section-group-parent-notebook',
usersactions_v1_0_user_onenote_notebook_section_parent_section_group_parent_notebook,
client_factory=cf_user_onenote_notebook_section_parent_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_notebook_section_parent_section_group_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-notebook-section-parent-section-group-section',
usersactions_v1_0_user_onenote_notebook_section_parent_section_group_section,
client_factory=cf_user_onenote_notebook_section_parent_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_notebook_section_parent_section_group_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_notebook_section_parent_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-page', usersactions_v1_0_user_onenote_page, client_factory=cf_user_onenote_page
) as g:
g.custom_command('copy-to-section', 'usersactions_user_onenote_page_copy_to_section')
g.custom_command('onenote-patch-content', 'usersactions_user_onenote_page_onenote_patch_content')
with self.command_group(
'usersactions user-onenote-page-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_notebook,
client_factory=cf_user_onenote_page_parent_notebook,
) as g:
g.custom_command('copy-notebook', 'usersactions_user_onenote_page_parent_notebook_copy_notebook')
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-group-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_parent_notebook,
client_factory=cf_user_onenote_page_parent_notebook_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_page_parent_notebook_section_group_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-group-section',
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_section,
client_factory=cf_user_onenote_page_parent_notebook_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_page_parent_notebook_section_group_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_page_parent_notebook_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-group-section-page',
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_section_page,
client_factory=cf_user_onenote_page_parent_notebook_section_group_section_page,
) as g:
g.custom_command(
'copy-to-section',
'usersactions_user_onenote_page_parent_notebook_section_group_section_page_copy_to_section',
)
g.custom_command(
'onenote-patch-content',
'usersactions_user_onenote_page_parent_notebook_section_group_section_page_onenote_patch_content',
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-group-section-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_notebook_section_group_section_parent_notebook,
client_factory=cf_user_onenote_page_parent_notebook_section_group_section_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_page_parent_notebook_section_group_section_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section',
usersactions_v1_0_user_onenote_page_parent_notebook_section,
client_factory=cf_user_onenote_page_parent_notebook_section,
) as g:
g.custom_command('copy-to-notebook', 'usersactions_user_onenote_page_parent_notebook_section_copy_to_notebook')
g.custom_command(
'copy-to-section-group', 'usersactions_user_onenote_page_parent_notebook_section_copy_to_section_group'
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-page',
usersactions_v1_0_user_onenote_page_parent_notebook_section_page,
client_factory=cf_user_onenote_page_parent_notebook_section_page,
) as g:
g.custom_command(
'copy-to-section', 'usersactions_user_onenote_page_parent_notebook_section_page_copy_to_section'
)
g.custom_command(
'onenote-patch-content', 'usersactions_user_onenote_page_parent_notebook_section_page_onenote_patch_content'
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_notebook_section_parent_notebook,
client_factory=cf_user_onenote_page_parent_notebook_section_parent_notebook,
) as g:
g.custom_command(
'copy-notebook', 'usersactions_user_onenote_page_parent_notebook_section_parent_notebook_copy_notebook'
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-parent-section-group-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_notebook_section_parent_section_group_parent_notebook,
client_factory=cf_user_onenote_page_parent_notebook_section_parent_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_page_parent_notebook_section_parent_section_group_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-page-parent-notebook-section-parent-section-group-section',
usersactions_v1_0_user_onenote_page_parent_notebook_section_parent_section_group_section,
client_factory=cf_user_onenote_page_parent_notebook_section_parent_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_page_parent_notebook_section_parent_section_group_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_page_parent_notebook_section_parent_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-page-parent-section',
usersactions_v1_0_user_onenote_page_parent_section,
client_factory=cf_user_onenote_page_parent_section,
) as g:
g.custom_command('copy-to-notebook', 'usersactions_user_onenote_page_parent_section_copy_to_notebook')
g.custom_command('copy-to-section-group', 'usersactions_user_onenote_page_parent_section_copy_to_section_group')
with self.command_group(
'usersactions user-onenote-page-parent-section-page',
usersactions_v1_0_user_onenote_page_parent_section_page,
client_factory=cf_user_onenote_page_parent_section_page,
) as g:
g.custom_command('copy-to-section', 'usersactions_user_onenote_page_parent_section_page_copy_to_section')
g.custom_command(
'onenote-patch-content', 'usersactions_user_onenote_page_parent_section_page_onenote_patch_content'
)
with self.command_group(
'usersactions user-onenote-page-parent-section-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook,
client_factory=cf_user_onenote_page_parent_section_parent_notebook,
) as g:
g.custom_command('copy-notebook', 'usersactions_user_onenote_page_parent_section_parent_notebook_copy_notebook')
with self.command_group(
'usersactions user-onenote-page-parent-section-parent-notebook-section-group-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook_section_group_parent_notebook,
client_factory=cf_user_onenote_page_parent_section_parent_notebook_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_page_parent_section_parent_notebook_section_group_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-page-parent-section-parent-notebook-section-group-section',
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook_section_group_section,
client_factory=cf_user_onenote_page_parent_section_parent_notebook_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_page_parent_section_parent_notebook_section_group_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_page_parent_section_parent_notebook_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-page-parent-section-parent-notebook-section',
usersactions_v1_0_user_onenote_page_parent_section_parent_notebook_section,
client_factory=cf_user_onenote_page_parent_section_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_page_parent_section_parent_notebook_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_page_parent_section_parent_notebook_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-page-parent-section-parent-section-group-parent-notebook',
usersactions_v1_0_user_onenote_page_parent_section_parent_section_group_parent_notebook,
client_factory=cf_user_onenote_page_parent_section_parent_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_page_parent_section_parent_section_group_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-page-parent-section-parent-section-group-parent-notebook-section',
usersactions_v1_0_user_onenote_page_parent_section_parent_section_group_parent_notebook_section,
client_factory=cf_user_onenote_page_parent_section_parent_section_group_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_page_parent_section_parent_section_group_parent_notebook_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_page_parent_section_parent_section_group_parent_notebook_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-page-parent-section-parent-section-group-section',
usersactions_v1_0_user_onenote_page_parent_section_parent_section_group_section,
client_factory=cf_user_onenote_page_parent_section_parent_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_page_parent_section_parent_section_group_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_page_parent_section_parent_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-group-parent-notebook',
usersactions_v1_0_user_onenote_section_group_parent_notebook,
client_factory=cf_user_onenote_section_group_parent_notebook,
) as g:
g.custom_command('copy-notebook', 'usersactions_user_onenote_section_group_parent_notebook_copy_notebook')
with self.command_group(
'usersactions user-onenote-section-group-parent-notebook-section',
usersactions_v1_0_user_onenote_section_group_parent_notebook_section,
client_factory=cf_user_onenote_section_group_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_section_group_parent_notebook_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_group_parent_notebook_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-group-parent-notebook-section-page',
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_page,
client_factory=cf_user_onenote_section_group_parent_notebook_section_page,
) as g:
g.custom_command(
'copy-to-section', 'usersactions_user_onenote_section_group_parent_notebook_section_page_copy_to_section'
)
g.custom_command(
'onenote-patch-content',
'usersactions_user_onenote_section_group_parent_notebook_section_page_onenote_patch_content',
)
with self.command_group(
'usersactions user-onenote-section-group-parent-notebook-section-page-parent-notebook',
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_page_parent_notebook,
client_factory=cf_user_onenote_section_group_parent_notebook_section_page_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_section_group_parent_notebook_section_page_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-section-group-parent-notebook-section-page-parent-section',
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_page_parent_section,
client_factory=cf_user_onenote_section_group_parent_notebook_section_page_parent_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_section_group_parent_notebook_section_page_parent_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_group_parent_notebook_section_page_parent_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-group-parent-notebook-section-parent-notebook',
usersactions_v1_0_user_onenote_section_group_parent_notebook_section_parent_notebook,
client_factory=cf_user_onenote_section_group_parent_notebook_section_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_section_group_parent_notebook_section_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-section-group-section',
usersactions_v1_0_user_onenote_section_group_section,
client_factory=cf_user_onenote_section_group_section,
) as g:
g.custom_command('copy-to-notebook', 'usersactions_user_onenote_section_group_section_copy_to_notebook')
g.custom_command(
'copy-to-section-group', 'usersactions_user_onenote_section_group_section_copy_to_section_group'
)
with self.command_group(
'usersactions user-onenote-section-group-section-page',
usersactions_v1_0_user_onenote_section_group_section_page,
client_factory=cf_user_onenote_section_group_section_page,
) as g:
g.custom_command('copy-to-section', 'usersactions_user_onenote_section_group_section_page_copy_to_section')
g.custom_command(
'onenote-patch-content', 'usersactions_user_onenote_section_group_section_page_onenote_patch_content'
)
with self.command_group(
'usersactions user-onenote-section-group-section-page-parent-notebook',
usersactions_v1_0_user_onenote_section_group_section_page_parent_notebook,
client_factory=cf_user_onenote_section_group_section_page_parent_notebook,
) as g:
g.custom_command(
'copy-notebook', 'usersactions_user_onenote_section_group_section_page_parent_notebook_copy_notebook'
)
with self.command_group(
'usersactions user-onenote-section-group-section-page-parent-notebook-section',
usersactions_v1_0_user_onenote_section_group_section_page_parent_notebook_section,
client_factory=cf_user_onenote_section_group_section_page_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_section_group_section_page_parent_notebook_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_group_section_page_parent_notebook_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-group-section-page-parent-section',
usersactions_v1_0_user_onenote_section_group_section_page_parent_section,
client_factory=cf_user_onenote_section_group_section_page_parent_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_section_group_section_page_parent_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_group_section_page_parent_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-group-section-parent-notebook',
usersactions_v1_0_user_onenote_section_group_section_parent_notebook,
client_factory=cf_user_onenote_section_group_section_parent_notebook,
) as g:
g.custom_command(
'copy-notebook', 'usersactions_user_onenote_section_group_section_parent_notebook_copy_notebook'
)
with self.command_group(
'usersactions user-onenote-section-group-section-parent-notebook-section',
usersactions_v1_0_user_onenote_section_group_section_parent_notebook_section,
client_factory=cf_user_onenote_section_group_section_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_section_group_section_parent_notebook_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_group_section_parent_notebook_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section',
usersactions_v1_0_user_onenote_section,
client_factory=cf_user_onenote_section,
) as g:
g.custom_command('copy-to-notebook', 'usersactions_user_onenote_section_copy_to_notebook')
g.custom_command('copy-to-section-group', 'usersactions_user_onenote_section_copy_to_section_group')
with self.command_group(
'usersactions user-onenote-section-page',
usersactions_v1_0_user_onenote_section_page,
client_factory=cf_user_onenote_section_page,
) as g:
g.custom_command('copy-to-section', 'usersactions_user_onenote_section_page_copy_to_section')
g.custom_command('onenote-patch-content', 'usersactions_user_onenote_section_page_onenote_patch_content')
with self.command_group(
'usersactions user-onenote-section-page-parent-notebook',
usersactions_v1_0_user_onenote_section_page_parent_notebook,
client_factory=cf_user_onenote_section_page_parent_notebook,
) as g:
g.custom_command('copy-notebook', 'usersactions_user_onenote_section_page_parent_notebook_copy_notebook')
with self.command_group(
'usersactions user-onenote-section-page-parent-notebook-section-group-parent-notebook',
usersactions_v1_0_user_onenote_section_page_parent_notebook_section_group_parent_notebook,
client_factory=cf_user_onenote_section_page_parent_notebook_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_section_page_parent_notebook_section_group_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-section-page-parent-notebook-section-group-section',
usersactions_v1_0_user_onenote_section_page_parent_notebook_section_group_section,
client_factory=cf_user_onenote_section_page_parent_notebook_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_section_page_parent_notebook_section_group_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_page_parent_notebook_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-page-parent-notebook-section',
usersactions_v1_0_user_onenote_section_page_parent_notebook_section,
client_factory=cf_user_onenote_section_page_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_section_page_parent_notebook_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_page_parent_notebook_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-page-parent-section',
usersactions_v1_0_user_onenote_section_page_parent_section,
client_factory=cf_user_onenote_section_page_parent_section,
) as g:
g.custom_command('copy-to-notebook', 'usersactions_user_onenote_section_page_parent_section_copy_to_notebook')
g.custom_command(
'copy-to-section-group', 'usersactions_user_onenote_section_page_parent_section_copy_to_section_group'
)
with self.command_group(
'usersactions user-onenote-section-parent-notebook',
usersactions_v1_0_user_onenote_section_parent_notebook,
client_factory=cf_user_onenote_section_parent_notebook,
) as g:
g.custom_command('copy-notebook', 'usersactions_user_onenote_section_parent_notebook_copy_notebook')
with self.command_group(
'usersactions user-onenote-section-parent-notebook-section-group-parent-notebook',
usersactions_v1_0_user_onenote_section_parent_notebook_section_group_parent_notebook,
client_factory=cf_user_onenote_section_parent_notebook_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook',
'usersactions_user_onenote_section_parent_notebook_section_group_parent_notebook_copy_notebook',
)
with self.command_group(
'usersactions user-onenote-section-parent-notebook-section-group-section',
usersactions_v1_0_user_onenote_section_parent_notebook_section_group_section,
client_factory=cf_user_onenote_section_parent_notebook_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_section_parent_notebook_section_group_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_parent_notebook_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-parent-notebook-section',
usersactions_v1_0_user_onenote_section_parent_notebook_section,
client_factory=cf_user_onenote_section_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_section_parent_notebook_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group', 'usersactions_user_onenote_section_parent_notebook_section_copy_to_section_group'
)
with self.command_group(
'usersactions user-onenote-section-parent-section-group-parent-notebook',
usersactions_v1_0_user_onenote_section_parent_section_group_parent_notebook,
client_factory=cf_user_onenote_section_parent_section_group_parent_notebook,
) as g:
g.custom_command(
'copy-notebook', 'usersactions_user_onenote_section_parent_section_group_parent_notebook_copy_notebook'
)
with self.command_group(
'usersactions user-onenote-section-parent-section-group-parent-notebook-section',
usersactions_v1_0_user_onenote_section_parent_section_group_parent_notebook_section,
client_factory=cf_user_onenote_section_parent_section_group_parent_notebook_section,
) as g:
g.custom_command(
'copy-to-notebook',
'usersactions_user_onenote_section_parent_section_group_parent_notebook_section_copy_to_notebook',
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_parent_section_group_parent_notebook_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-onenote-section-parent-section-group-section',
usersactions_v1_0_user_onenote_section_parent_section_group_section,
client_factory=cf_user_onenote_section_parent_section_group_section,
) as g:
g.custom_command(
'copy-to-notebook', 'usersactions_user_onenote_section_parent_section_group_section_copy_to_notebook'
)
g.custom_command(
'copy-to-section-group',
'usersactions_user_onenote_section_parent_section_group_section_copy_to_section_group',
)
with self.command_group(
'usersactions user-online-meeting', usersactions_v1_0_user_online_meeting, client_factory=cf_user_online_meeting
) as g:
g.custom_command('create-or-get', 'usersactions_user_online_meeting_create_or_get')
with self.command_group('usersactions_v1_0', is_experimental=True):
pass
| 54.318087
| 263
| 0.82209
| 12,112
| 104,508
| 6.457645
| 0.024769
| 0.065256
| 0.063095
| 0.052957
| 0.887707
| 0.872045
| 0.8549
| 0.840082
| 0.817541
| 0.790616
| 0
| 0.007124
| 0.116173
| 104,508
| 1,923
| 264
| 54.346334
| 0.839661
| 0.005454
| 0
| 0.419886
| 0
| 0.0019
| 0.447625
| 0.404767
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000633
| false
| 0.003167
| 0.001267
| 0
| 0.0019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
47ad9c165ce4d425211db91d27aa1bdf0ad1b23b
| 1,356
|
py
|
Python
|
tests/test_quotes.py
|
anterokangas/ManuscriptManagerOld
|
194bc6c7b899bb4ab61966af3ba1e619fc74c20c
|
[
"MIT"
] | null | null | null |
tests/test_quotes.py
|
anterokangas/ManuscriptManagerOld
|
194bc6c7b899bb4ab61966af3ba1e619fc74c20c
|
[
"MIT"
] | null | null | null |
tests/test_quotes.py
|
anterokangas/ManuscriptManagerOld
|
194bc6c7b899bb4ab61966af3ba1e619fc74c20c
|
[
"MIT"
] | null | null | null |
import pytest
from manuscript.tools.quotes import add_quotes
from manuscript.tools.quotes import remove_quotes
def test_add_quotes():
assert add_quotes("abc") == "abc"
assert add_quotes("abc def") == '"abc def"'
assert add_quotes("abc \"def\"") == "'abc \"def\"'"
assert add_quotes("abc\"def\"") == "abc\"def\""
assert add_quotes("abc\" def\"") == "'abc\" def\"'"
assert add_quotes("abc'cde\"fgg") == "abc'cde\"fgg"
assert add_quotes("abc 'cde\"fgg") == '"abc \'cde\"fgg"'
def test_remove_quotes():
assert remove_quotes("") == ""
assert remove_quotes("a") == "a"
assert remove_quotes("ab") == "ab"
assert remove_quotes("abc") == "abc"
assert remove_quotes("'abc'") == "abc"
assert remove_quotes('"abc"') == "abc"
assert remove_quotes("abc def") == "abc def"
assert remove_quotes("'") == "'"
assert remove_quotes("''") == ""
assert remove_quotes("'''") == "'"
assert remove_quotes("'\"'") == "\""
assert remove_quotes('"') == '"'
assert remove_quotes('""') == ''
assert remove_quotes('"""') == '"'
assert remove_quotes('"\'"') == "\'"
assert remove_quotes("'abc \"def\"'") == "abc \"def\""
assert remove_quotes("abc\"def\"") == "abc\"def\""
assert remove_quotes("'abc\" def\"'") == "abc\" def\""
assert remove_quotes("abc'cde\"fgg") == "abc'cde\"fgg"
| 39.882353
| 60
| 0.581858
| 164
| 1,356
| 4.615854
| 0.097561
| 0.332893
| 0.451783
| 0.317041
| 0.866579
| 0.784676
| 0.784676
| 0.764861
| 0.701453
| 0.638045
| 0
| 0
| 0.175516
| 1,356
| 34
| 61
| 39.882353
| 0.677102
| 0
| 0
| 0.129032
| 0
| 0
| 0.15549
| 0
| 0
| 0
| 0
| 0
| 0.83871
| 1
| 0.064516
| true
| 0
| 0.096774
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
9a212507394544c0c2e01f0f0d625bbb7d17f307
| 94,208
|
py
|
Python
|
sdk/python/pulumi_oci/marketplace/outputs.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/marketplace/outputs.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/marketplace/outputs.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'PublicationIcon',
'PublicationPackageDetails',
'PublicationPackageDetailsEula',
'PublicationPackageDetailsOperatingSystem',
'PublicationSupportContact',
'PublicationSupportedOperatingSystem',
'GetAcceptedAgreementsAcceptedAgreementResult',
'GetAcceptedAgreementsFilterResult',
'GetCategoriesCategoryResult',
'GetCategoriesFilterResult',
'GetListingBannerResult',
'GetListingDocumentationLinkResult',
'GetListingIconResult',
'GetListingLanguageResult',
'GetListingLinkResult',
'GetListingPackageAgreementsAgreementResult',
'GetListingPackageAgreementsFilterResult',
'GetListingPackageOperatingSystemResult',
'GetListingPackagePricingResult',
'GetListingPackageRegionResult',
'GetListingPackageRegionCountryResult',
'GetListingPackageVariableResult',
'GetListingPackagesFilterResult',
'GetListingPackagesListingPackageResult',
'GetListingPackagesListingPackageOperatingSystemResult',
'GetListingPackagesListingPackageRegionResult',
'GetListingPackagesListingPackageRegionCountryResult',
'GetListingPublisherResult',
'GetListingPublisherLinkResult',
'GetListingPublisherLogoResult',
'GetListingRegionResult',
'GetListingRegionCountryResult',
'GetListingScreenshotResult',
'GetListingSupportContactResult',
'GetListingSupportLinkResult',
'GetListingSupportedOperatingSystemResult',
'GetListingTaxesFilterResult',
'GetListingTaxesTaxResult',
'GetListingVideoResult',
'GetListingsFilterResult',
'GetListingsListingResult',
'GetListingsListingIconResult',
'GetListingsListingPublisherResult',
'GetListingsListingRegionResult',
'GetListingsListingRegionCountryResult',
'GetListingsListingSupportedOperatingSystemResult',
'GetPublicationIconResult',
'GetPublicationPackageDetailsResult',
'GetPublicationPackageDetailsEulaResult',
'GetPublicationPackageDetailsOperatingSystemResult',
'GetPublicationPackageOperatingSystemResult',
'GetPublicationPackageVariableResult',
'GetPublicationPackagesFilterResult',
'GetPublicationPackagesPublicationPackageResult',
'GetPublicationSupportContactResult',
'GetPublicationSupportedOperatingSystemResult',
'GetPublicationsFilterResult',
'GetPublicationsPublicationResult',
'GetPublicationsPublicationIconResult',
'GetPublicationsPublicationPackageDetailsResult',
'GetPublicationsPublicationPackageDetailsEulaResult',
'GetPublicationsPublicationPackageDetailsOperatingSystemResult',
'GetPublicationsPublicationSupportContactResult',
'GetPublicationsPublicationSupportedOperatingSystemResult',
'GetPublishersFilterResult',
'GetPublishersPublisherResult',
]
@pulumi.output_type
class PublicationIcon(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentUrl":
suggest = "content_url"
elif key == "fileExtension":
suggest = "file_extension"
elif key == "mimeType":
suggest = "mime_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PublicationIcon. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PublicationIcon.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PublicationIcon.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
content_url: Optional[str] = None,
file_extension: Optional[str] = None,
mime_type: Optional[str] = None,
name: Optional[str] = None):
"""
:param str content_url: The content URL of the upload data.
:param str file_extension: The file extension of the upload data.
:param str mime_type: The MIME type of the upload data.
:param str name: (Updatable) The name of the contact.
"""
if content_url is not None:
pulumi.set(__self__, "content_url", content_url)
if file_extension is not None:
pulumi.set(__self__, "file_extension", file_extension)
if mime_type is not None:
pulumi.set(__self__, "mime_type", mime_type)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> Optional[str]:
"""
The content URL of the upload data.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> Optional[str]:
"""
The file extension of the upload data.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> Optional[str]:
"""
The MIME type of the upload data.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
(Updatable) The name of the contact.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class PublicationPackageDetails(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "operatingSystem":
suggest = "operating_system"
elif key == "packageType":
suggest = "package_type"
elif key == "packageVersion":
suggest = "package_version"
elif key == "imageId":
suggest = "image_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PublicationPackageDetails. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PublicationPackageDetails.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PublicationPackageDetails.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
eulas: Sequence['outputs.PublicationPackageDetailsEula'],
operating_system: 'outputs.PublicationPackageDetailsOperatingSystem',
package_type: str,
package_version: str,
image_id: Optional[str] = None):
"""
:param Sequence['PublicationPackageDetailsEulaArgs'] eulas: End User License Agreeement that a consumer of this listing has to accept
:param 'PublicationPackageDetailsOperatingSystemArgs' operating_system: OS used by the listing.
:param str package_type: Type of the artifact of the listing
:param str package_version: The version of the package
:param str image_id: base image id of the listing
"""
pulumi.set(__self__, "eulas", eulas)
pulumi.set(__self__, "operating_system", operating_system)
pulumi.set(__self__, "package_type", package_type)
pulumi.set(__self__, "package_version", package_version)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
@property
@pulumi.getter
def eulas(self) -> Sequence['outputs.PublicationPackageDetailsEula']:
"""
End User License Agreeement that a consumer of this listing has to accept
"""
return pulumi.get(self, "eulas")
@property
@pulumi.getter(name="operatingSystem")
def operating_system(self) -> 'outputs.PublicationPackageDetailsOperatingSystem':
"""
OS used by the listing.
"""
return pulumi.get(self, "operating_system")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
Type of the artifact of the listing
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="packageVersion")
def package_version(self) -> str:
"""
The version of the package
"""
return pulumi.get(self, "package_version")
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[str]:
"""
base image id of the listing
"""
return pulumi.get(self, "image_id")
@pulumi.output_type
class PublicationPackageDetailsEula(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eulaType":
suggest = "eula_type"
elif key == "licenseText":
suggest = "license_text"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PublicationPackageDetailsEula. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PublicationPackageDetailsEula.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PublicationPackageDetailsEula.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
eula_type: str,
license_text: Optional[str] = None):
"""
:param str eula_type: the specified eula's type
:param str license_text: text of the eula
"""
pulumi.set(__self__, "eula_type", eula_type)
if license_text is not None:
pulumi.set(__self__, "license_text", license_text)
@property
@pulumi.getter(name="eulaType")
def eula_type(self) -> str:
"""
the specified eula's type
"""
return pulumi.get(self, "eula_type")
@property
@pulumi.getter(name="licenseText")
def license_text(self) -> Optional[str]:
"""
text of the eula
"""
return pulumi.get(self, "license_text")
@pulumi.output_type
class PublicationPackageDetailsOperatingSystem(dict):
def __init__(__self__, *,
name: Optional[str] = None):
"""
:param str name: (Updatable) The name of the contact.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
(Updatable) The name of the contact.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class PublicationSupportContact(dict):
def __init__(__self__, *,
email: Optional[str] = None,
name: Optional[str] = None,
phone: Optional[str] = None,
subject: Optional[str] = None):
"""
:param str email: (Updatable) The email of the contact.
:param str name: (Updatable) The name of the contact.
:param str phone: (Updatable) The phone number of the contact.
:param str subject: (Updatable) The email subject line to use when contacting support.
"""
if email is not None:
pulumi.set(__self__, "email", email)
if name is not None:
pulumi.set(__self__, "name", name)
if phone is not None:
pulumi.set(__self__, "phone", phone)
if subject is not None:
pulumi.set(__self__, "subject", subject)
@property
@pulumi.getter
def email(self) -> Optional[str]:
"""
(Updatable) The email of the contact.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
(Updatable) The name of the contact.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def phone(self) -> Optional[str]:
"""
(Updatable) The phone number of the contact.
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def subject(self) -> Optional[str]:
"""
(Updatable) The email subject line to use when contacting support.
"""
return pulumi.get(self, "subject")
@pulumi.output_type
class PublicationSupportedOperatingSystem(dict):
def __init__(__self__, *,
name: Optional[str] = None):
"""
:param str name: (Updatable) The name of the contact.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
(Updatable) The name of the contact.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetAcceptedAgreementsAcceptedAgreementResult(dict):
def __init__(__self__, *,
agreement_id: str,
compartment_id: str,
defined_tags: Mapping[str, Any],
display_name: str,
freeform_tags: Mapping[str, Any],
id: str,
listing_id: str,
package_version: str,
signature: str,
time_accepted: str):
"""
:param str agreement_id: The unique identifier for the terms of use agreement itself.
:param str compartment_id: The unique identifier for the compartment.
:param Mapping[str, Any] defined_tags: The defined tags associated with this resource, if any. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param str display_name: The display name of the resource.
:param Mapping[str, Any] freeform_tags: The freeform tags associated with this resource, if any. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param str id: The unique identifier for the acceptance of the agreement within a specific compartment.
:param str listing_id: The unique identifier for the listing.
:param str package_version: The version of the package. Package versions are unique within a listing.
:param str time_accepted: The time the agreement was accepted.
"""
pulumi.set(__self__, "agreement_id", agreement_id)
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "defined_tags", defined_tags)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "freeform_tags", freeform_tags)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "listing_id", listing_id)
pulumi.set(__self__, "package_version", package_version)
pulumi.set(__self__, "signature", signature)
pulumi.set(__self__, "time_accepted", time_accepted)
@property
@pulumi.getter(name="agreementId")
def agreement_id(self) -> str:
"""
The unique identifier for the terms of use agreement itself.
"""
return pulumi.get(self, "agreement_id")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The unique identifier for the compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
The defined tags associated with this resource, if any. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The display name of the resource.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
The freeform tags associated with this resource, if any. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique identifier for the acceptance of the agreement within a specific compartment.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="listingId")
def listing_id(self) -> str:
"""
The unique identifier for the listing.
"""
return pulumi.get(self, "listing_id")
@property
@pulumi.getter(name="packageVersion")
def package_version(self) -> str:
"""
The version of the package. Package versions are unique within a listing.
"""
return pulumi.get(self, "package_version")
@property
@pulumi.getter
def signature(self) -> str:
return pulumi.get(self, "signature")
@property
@pulumi.getter(name="timeAccepted")
def time_accepted(self) -> str:
"""
The time the agreement was accepted.
"""
return pulumi.get(self, "time_accepted")
@pulumi.output_type
class GetAcceptedAgreementsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetCategoriesCategoryResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: Name of the product category.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the product category.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetCategoriesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: Name of the product category.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the product category.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetListingBannerResult(dict):
def __init__(__self__, *,
content_url: str,
file_extension: str,
mime_type: str,
name: str):
"""
:param str content_url: The content URL of the screenshot.
:param str file_extension: The file extension of the screenshot.
:param str mime_type: The MIME type of the screenshot.
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "file_extension", file_extension)
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the screenshot.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> str:
"""
The file extension of the screenshot.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
"""
The MIME type of the screenshot.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingDocumentationLinkResult(dict):
def __init__(__self__, *,
document_category: str,
name: str,
url: str):
"""
:param str document_category: The category that the document belongs to.
:param str name: Text that describes the resource.
:param str url: The URL of the resource.
"""
pulumi.set(__self__, "document_category", document_category)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="documentCategory")
def document_category(self) -> str:
"""
The category that the document belongs to.
"""
return pulumi.get(self, "document_category")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def url(self) -> str:
"""
The URL of the resource.
"""
return pulumi.get(self, "url")
@pulumi.output_type
class GetListingIconResult(dict):
def __init__(__self__, *,
content_url: str,
file_extension: str,
mime_type: str,
name: str):
"""
:param str content_url: The content URL of the screenshot.
:param str file_extension: The file extension of the screenshot.
:param str mime_type: The MIME type of the screenshot.
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "file_extension", file_extension)
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the screenshot.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> str:
"""
The file extension of the screenshot.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
"""
The MIME type of the screenshot.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingLanguageResult(dict):
def __init__(__self__, *,
code: str,
name: str):
"""
:param str code: A code assigned to the item.
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingLinkResult(dict):
def __init__(__self__, *,
href: str,
rel: str):
"""
:param str href: The anchor tag.
:param str rel: Reference links to the previous page, next page, and other pages.
"""
pulumi.set(__self__, "href", href)
pulumi.set(__self__, "rel", rel)
@property
@pulumi.getter
def href(self) -> str:
"""
The anchor tag.
"""
return pulumi.get(self, "href")
@property
@pulumi.getter
def rel(self) -> str:
"""
Reference links to the previous page, next page, and other pages.
"""
return pulumi.get(self, "rel")
@pulumi.output_type
class GetListingPackageAgreementsAgreementResult(dict):
def __init__(__self__, *,
author: str,
content_url: str,
id: str,
prompt: str):
"""
:param str author: Who authored the agreement.
:param str content_url: The content URL of the agreement.
:param str id: The unique identifier for the agreement.
:param str prompt: Textual prompt to read and accept the agreement.
"""
pulumi.set(__self__, "author", author)
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "prompt", prompt)
@property
@pulumi.getter
def author(self) -> str:
"""
Who authored the agreement.
"""
return pulumi.get(self, "author")
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the agreement.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique identifier for the agreement.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def prompt(self) -> str:
"""
Textual prompt to read and accept the agreement.
"""
return pulumi.get(self, "prompt")
@pulumi.output_type
class GetListingPackageAgreementsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetListingPackageOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: The name of the variable.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingPackagePricingResult(dict):
def __init__(__self__, *,
currency: str,
pay_go_strategy: str,
rate: float,
type: str):
"""
:param str currency: The currency of the pricing model.
:param str pay_go_strategy: The type of pricing for a PAYGO model, eg PER_OCPU_LINEAR, PER_OCPU_MIN_BILLING, PER_INSTANCE. Null if type is not PAYGO.
:param float rate: The pricing rate.
:param str type: The type of the pricing model.
"""
pulumi.set(__self__, "currency", currency)
pulumi.set(__self__, "pay_go_strategy", pay_go_strategy)
pulumi.set(__self__, "rate", rate)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def currency(self) -> str:
"""
The currency of the pricing model.
"""
return pulumi.get(self, "currency")
@property
@pulumi.getter(name="payGoStrategy")
def pay_go_strategy(self) -> str:
"""
The type of pricing for a PAYGO model, eg PER_OCPU_LINEAR, PER_OCPU_MIN_BILLING, PER_INSTANCE. Null if type is not PAYGO.
"""
return pulumi.get(self, "pay_go_strategy")
@property
@pulumi.getter
def rate(self) -> float:
"""
The pricing rate.
"""
return pulumi.get(self, "rate")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the pricing model.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetListingPackageRegionResult(dict):
def __init__(__self__, *,
code: str,
countries: Sequence['outputs.GetListingPackageRegionCountryResult'],
name: str):
"""
:param str code: A code assigned to the item.
:param Sequence['GetListingPackageRegionCountryArgs'] countries: Countries in the region.
:param str name: The name of the variable.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "countries", countries)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def countries(self) -> Sequence['outputs.GetListingPackageRegionCountryResult']:
"""
Countries in the region.
"""
return pulumi.get(self, "countries")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingPackageRegionCountryResult(dict):
def __init__(__self__, *,
code: str,
name: str):
"""
:param str code: A code assigned to the item.
:param str name: The name of the variable.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingPackageVariableResult(dict):
def __init__(__self__, *,
data_type: str,
default_value: str,
description: str,
hint_message: str,
is_mandatory: bool,
name: str):
"""
:param str data_type: The data type of the variable.
:param str default_value: The variable's default value.
:param str description: A description of the variable.
:param str hint_message: A brief textual description that helps to explain the variable.
:param bool is_mandatory: Whether the variable is mandatory.
:param str name: The name of the variable.
"""
pulumi.set(__self__, "data_type", data_type)
pulumi.set(__self__, "default_value", default_value)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "hint_message", hint_message)
pulumi.set(__self__, "is_mandatory", is_mandatory)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="dataType")
def data_type(self) -> str:
"""
The data type of the variable.
"""
return pulumi.get(self, "data_type")
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> str:
"""
The variable's default value.
"""
return pulumi.get(self, "default_value")
@property
@pulumi.getter
def description(self) -> str:
"""
A description of the variable.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="hintMessage")
def hint_message(self) -> str:
"""
A brief textual description that helps to explain the variable.
"""
return pulumi.get(self, "hint_message")
@property
@pulumi.getter(name="isMandatory")
def is_mandatory(self) -> bool:
"""
Whether the variable is mandatory.
"""
return pulumi.get(self, "is_mandatory")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingPackagesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: The name of the variable.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetListingPackagesListingPackageResult(dict):
def __init__(__self__, *,
listing_id: str,
operating_system: 'outputs.GetListingPackagesListingPackageOperatingSystemResult',
package_type: str,
package_version: str,
regions: Sequence['outputs.GetListingPackagesListingPackageRegionResult'],
resource_id: str,
time_created: str):
"""
:param str listing_id: The unique identifier for the listing.
:param 'GetListingPackagesListingPackageOperatingSystemArgs' operating_system: OS used by the listing.
:param str package_type: A filter to return only packages that match the given package type exactly.
:param str package_version: The version of the package. Package versions are unique within a listing.
:param Sequence['GetListingPackagesListingPackageRegionArgs'] regions: The regions where the listing is available.
:param str resource_id: The unique identifier for the package resource.
:param str time_created: The date and time this listing package was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
pulumi.set(__self__, "listing_id", listing_id)
pulumi.set(__self__, "operating_system", operating_system)
pulumi.set(__self__, "package_type", package_type)
pulumi.set(__self__, "package_version", package_version)
pulumi.set(__self__, "regions", regions)
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="listingId")
def listing_id(self) -> str:
"""
The unique identifier for the listing.
"""
return pulumi.get(self, "listing_id")
@property
@pulumi.getter(name="operatingSystem")
def operating_system(self) -> 'outputs.GetListingPackagesListingPackageOperatingSystemResult':
"""
OS used by the listing.
"""
return pulumi.get(self, "operating_system")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
A filter to return only packages that match the given package type exactly.
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="packageVersion")
def package_version(self) -> str:
"""
The version of the package. Package versions are unique within a listing.
"""
return pulumi.get(self, "package_version")
@property
@pulumi.getter
def regions(self) -> Sequence['outputs.GetListingPackagesListingPackageRegionResult']:
"""
The regions where the listing is available.
"""
return pulumi.get(self, "regions")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
The unique identifier for the package resource.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time this listing package was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@pulumi.output_type
class GetListingPackagesListingPackageOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: The name of the variable.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingPackagesListingPackageRegionResult(dict):
def __init__(__self__, *,
code: str,
countries: Sequence['outputs.GetListingPackagesListingPackageRegionCountryResult'],
name: str):
"""
:param str code: A code assigned to the item.
:param Sequence['GetListingPackagesListingPackageRegionCountryArgs'] countries: Countries in the region.
:param str name: The name of the variable.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "countries", countries)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def countries(self) -> Sequence['outputs.GetListingPackagesListingPackageRegionCountryResult']:
"""
Countries in the region.
"""
return pulumi.get(self, "countries")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingPackagesListingPackageRegionCountryResult(dict):
def __init__(__self__, *,
code: str,
name: str):
"""
:param str code: A code assigned to the item.
:param str name: The name of the variable.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingPublisherResult(dict):
def __init__(__self__, *,
contact_email: str,
contact_phone: str,
description: str,
hq_address: str,
id: str,
links: Sequence['outputs.GetListingPublisherLinkResult'],
logo: 'outputs.GetListingPublisherLogoResult',
name: str,
website_url: str,
year_founded: str):
"""
:param str contact_email: The email address of the publisher.
:param str contact_phone: The phone number of the publisher.
:param str description: A description of the screenshot.
:param str hq_address: The address of the publisher's headquarters.
:param str id: Unique identifier for the publisher.
:param Sequence['GetListingPublisherLinkArgs'] links: Reference links.
:param 'GetListingPublisherLogoArgs' logo: The model for upload data for images and icons.
:param str name: Text that describes the resource.
:param str website_url: The publisher's website.
:param str year_founded: The year the publisher's company or organization was founded.
"""
pulumi.set(__self__, "contact_email", contact_email)
pulumi.set(__self__, "contact_phone", contact_phone)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "hq_address", hq_address)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "links", links)
pulumi.set(__self__, "logo", logo)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "website_url", website_url)
pulumi.set(__self__, "year_founded", year_founded)
@property
@pulumi.getter(name="contactEmail")
def contact_email(self) -> str:
"""
The email address of the publisher.
"""
return pulumi.get(self, "contact_email")
@property
@pulumi.getter(name="contactPhone")
def contact_phone(self) -> str:
"""
The phone number of the publisher.
"""
return pulumi.get(self, "contact_phone")
@property
@pulumi.getter
def description(self) -> str:
"""
A description of the screenshot.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="hqAddress")
def hq_address(self) -> str:
"""
The address of the publisher's headquarters.
"""
return pulumi.get(self, "hq_address")
@property
@pulumi.getter
def id(self) -> str:
"""
Unique identifier for the publisher.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def links(self) -> Sequence['outputs.GetListingPublisherLinkResult']:
"""
Reference links.
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def logo(self) -> 'outputs.GetListingPublisherLogoResult':
"""
The model for upload data for images and icons.
"""
return pulumi.get(self, "logo")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="websiteUrl")
def website_url(self) -> str:
"""
The publisher's website.
"""
return pulumi.get(self, "website_url")
@property
@pulumi.getter(name="yearFounded")
def year_founded(self) -> str:
"""
The year the publisher's company or organization was founded.
"""
return pulumi.get(self, "year_founded")
@pulumi.output_type
class GetListingPublisherLinkResult(dict):
def __init__(__self__, *,
href: str,
rel: str):
"""
:param str href: The anchor tag.
:param str rel: Reference links to the previous page, next page, and other pages.
"""
pulumi.set(__self__, "href", href)
pulumi.set(__self__, "rel", rel)
@property
@pulumi.getter
def href(self) -> str:
"""
The anchor tag.
"""
return pulumi.get(self, "href")
@property
@pulumi.getter
def rel(self) -> str:
"""
Reference links to the previous page, next page, and other pages.
"""
return pulumi.get(self, "rel")
@pulumi.output_type
class GetListingPublisherLogoResult(dict):
def __init__(__self__, *,
content_url: str,
file_extension: str,
mime_type: str,
name: str):
"""
:param str content_url: The content URL of the screenshot.
:param str file_extension: The file extension of the screenshot.
:param str mime_type: The MIME type of the screenshot.
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "file_extension", file_extension)
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the screenshot.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> str:
"""
The file extension of the screenshot.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
"""
The MIME type of the screenshot.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingRegionResult(dict):
def __init__(__self__, *,
code: str,
countries: Sequence['outputs.GetListingRegionCountryResult'],
name: str):
"""
:param str code: A code assigned to the item.
:param Sequence['GetListingRegionCountryArgs'] countries: Countries in the region.
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "countries", countries)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def countries(self) -> Sequence['outputs.GetListingRegionCountryResult']:
"""
Countries in the region.
"""
return pulumi.get(self, "countries")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingRegionCountryResult(dict):
def __init__(__self__, *,
code: str,
name: str):
"""
:param str code: A code assigned to the item.
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingScreenshotResult(dict):
def __init__(__self__, *,
content_url: str,
description: str,
file_extension: str,
mime_type: str,
name: str):
"""
:param str content_url: The content URL of the screenshot.
:param str description: A description of the screenshot.
:param str file_extension: The file extension of the screenshot.
:param str mime_type: The MIME type of the screenshot.
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "file_extension", file_extension)
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the screenshot.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter
def description(self) -> str:
"""
A description of the screenshot.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> str:
"""
The file extension of the screenshot.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
"""
The MIME type of the screenshot.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingSupportContactResult(dict):
def __init__(__self__, *,
email: str,
name: str,
phone: str,
subject: str):
"""
:param str email: The email of the contact.
:param str name: Text that describes the resource.
:param str phone: The phone number of the contact.
:param str subject: The email subject line to use when contacting support.
"""
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "phone", phone)
pulumi.set(__self__, "subject", subject)
@property
@pulumi.getter
def email(self) -> str:
"""
The email of the contact.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def phone(self) -> str:
"""
The phone number of the contact.
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def subject(self) -> str:
"""
The email subject line to use when contacting support.
"""
return pulumi.get(self, "subject")
@pulumi.output_type
class GetListingSupportLinkResult(dict):
def __init__(__self__, *,
name: str,
url: str):
"""
:param str name: Text that describes the resource.
:param str url: The URL of the resource.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def url(self) -> str:
"""
The URL of the resource.
"""
return pulumi.get(self, "url")
@pulumi.output_type
class GetListingSupportedOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: Text that describes the resource.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingTaxesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: Name of the tax code.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the tax code.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetListingTaxesTaxResult(dict):
def __init__(__self__, *,
code: str,
country: str,
name: str,
url: str):
"""
:param str code: Unique code for the tax.
:param str country: Country, which imposes the tax.
:param str name: Name of the tax code.
:param str url: The URL with more details about this tax.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "country", country)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def code(self) -> str:
"""
Unique code for the tax.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def country(self) -> str:
"""
Country, which imposes the tax.
"""
return pulumi.get(self, "country")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the tax code.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def url(self) -> str:
"""
The URL with more details about this tax.
"""
return pulumi.get(self, "url")
@pulumi.output_type
class GetListingVideoResult(dict):
def __init__(__self__, *,
name: str,
url: str):
"""
:param str name: Text that describes the resource.
:param str url: The URL of the resource.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def name(self) -> str:
"""
Text that describes the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def url(self) -> str:
"""
The URL of the resource.
"""
return pulumi.get(self, "url")
@pulumi.output_type
class GetListingsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: The name of the listing.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetListingsListingResult(dict):
def __init__(__self__, *,
categories: Sequence[str],
icon: 'outputs.GetListingsListingIconResult',
id: str,
is_featured: bool,
listing_type: str,
name: str,
package_type: str,
pricing_types: Sequence[str],
publishers: Sequence['outputs.GetListingsListingPublisherResult'],
regions: Sequence['outputs.GetListingsListingRegionResult'],
short_description: str,
supported_operating_systems: Sequence['outputs.GetListingsListingSupportedOperatingSystemResult']):
"""
:param Sequence[str] categories: Product categories that the listing belongs to.
:param 'GetListingsListingIconArgs' icon: The model for upload data for images and icons.
:param str id: Unique identifier for the publisher.
:param bool is_featured: Indicates whether to show only featured listings. If this is set to `false` or is omitted, then all listings will be returned.
:param str listing_type: In which catalog the listing should exist.
:param str name: The name of the listing.
:param str package_type: A filter to return only packages that match the given package type exactly.
:param Sequence['GetListingsListingPublisherArgs'] publishers: Summary details about the publisher of the listing.
:param Sequence['GetListingsListingRegionArgs'] regions: The regions where the listing is eligible to be deployed.
:param str short_description: A short description of the listing.
:param Sequence['GetListingsListingSupportedOperatingSystemArgs'] supported_operating_systems: List of operating systems supported.
"""
pulumi.set(__self__, "categories", categories)
pulumi.set(__self__, "icon", icon)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_featured", is_featured)
pulumi.set(__self__, "listing_type", listing_type)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "package_type", package_type)
pulumi.set(__self__, "pricing_types", pricing_types)
pulumi.set(__self__, "publishers", publishers)
pulumi.set(__self__, "regions", regions)
pulumi.set(__self__, "short_description", short_description)
pulumi.set(__self__, "supported_operating_systems", supported_operating_systems)
@property
@pulumi.getter
def categories(self) -> Sequence[str]:
"""
Product categories that the listing belongs to.
"""
return pulumi.get(self, "categories")
@property
@pulumi.getter
def icon(self) -> 'outputs.GetListingsListingIconResult':
"""
The model for upload data for images and icons.
"""
return pulumi.get(self, "icon")
@property
@pulumi.getter
def id(self) -> str:
"""
Unique identifier for the publisher.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isFeatured")
def is_featured(self) -> bool:
"""
Indicates whether to show only featured listings. If this is set to `false` or is omitted, then all listings will be returned.
"""
return pulumi.get(self, "is_featured")
@property
@pulumi.getter(name="listingType")
def listing_type(self) -> str:
"""
In which catalog the listing should exist.
"""
return pulumi.get(self, "listing_type")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
A filter to return only packages that match the given package type exactly.
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="pricingTypes")
def pricing_types(self) -> Sequence[str]:
return pulumi.get(self, "pricing_types")
@property
@pulumi.getter
def publishers(self) -> Sequence['outputs.GetListingsListingPublisherResult']:
"""
Summary details about the publisher of the listing.
"""
return pulumi.get(self, "publishers")
@property
@pulumi.getter
def regions(self) -> Sequence['outputs.GetListingsListingRegionResult']:
"""
The regions where the listing is eligible to be deployed.
"""
return pulumi.get(self, "regions")
@property
@pulumi.getter(name="shortDescription")
def short_description(self) -> str:
"""
A short description of the listing.
"""
return pulumi.get(self, "short_description")
@property
@pulumi.getter(name="supportedOperatingSystems")
def supported_operating_systems(self) -> Sequence['outputs.GetListingsListingSupportedOperatingSystemResult']:
"""
List of operating systems supported.
"""
return pulumi.get(self, "supported_operating_systems")
@pulumi.output_type
class GetListingsListingIconResult(dict):
def __init__(__self__, *,
content_url: str,
file_extension: str,
mime_type: str,
name: str):
"""
:param str content_url: The content URL of the screenshot.
:param str file_extension: The file extension of the screenshot.
:param str mime_type: The MIME type of the screenshot.
:param str name: The name of the listing.
"""
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "file_extension", file_extension)
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the screenshot.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> str:
"""
The file extension of the screenshot.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
"""
The MIME type of the screenshot.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingsListingPublisherResult(dict):
def __init__(__self__, *,
description: str,
id: str,
name: str):
"""
:param str description: A description of the screenshot.
:param str id: Unique identifier for the publisher.
:param str name: The name of the listing.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def description(self) -> str:
"""
A description of the screenshot.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Unique identifier for the publisher.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingsListingRegionResult(dict):
def __init__(__self__, *,
code: str,
countries: Sequence['outputs.GetListingsListingRegionCountryResult'],
name: str):
"""
:param str code: A code assigned to the item.
:param Sequence['GetListingsListingRegionCountryArgs'] countries: Countries in the region.
:param str name: The name of the listing.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "countries", countries)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def countries(self) -> Sequence['outputs.GetListingsListingRegionCountryResult']:
"""
Countries in the region.
"""
return pulumi.get(self, "countries")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingsListingRegionCountryResult(dict):
def __init__(__self__, *,
code: str,
name: str):
"""
:param str code: A code assigned to the item.
:param str name: The name of the listing.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def code(self) -> str:
"""
A code assigned to the item.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetListingsListingSupportedOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: The name of the listing.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationIconResult(dict):
def __init__(__self__, *,
content_url: str,
file_extension: str,
mime_type: str,
name: str):
"""
:param str content_url: The content URL of the upload data.
:param str file_extension: The file extension of the upload data.
:param str mime_type: The MIME type of the upload data.
:param str name: name of the operating system
"""
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "file_extension", file_extension)
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the upload data.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> str:
"""
The file extension of the upload data.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
"""
The MIME type of the upload data.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> str:
"""
name of the operating system
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationPackageDetailsResult(dict):
def __init__(__self__, *,
eulas: Sequence['outputs.GetPublicationPackageDetailsEulaResult'],
image_id: str,
operating_system: 'outputs.GetPublicationPackageDetailsOperatingSystemResult',
package_type: str,
package_version: str):
"""
:param str package_type: The listing's package type.
"""
pulumi.set(__self__, "eulas", eulas)
pulumi.set(__self__, "image_id", image_id)
pulumi.set(__self__, "operating_system", operating_system)
pulumi.set(__self__, "package_type", package_type)
pulumi.set(__self__, "package_version", package_version)
@property
@pulumi.getter
def eulas(self) -> Sequence['outputs.GetPublicationPackageDetailsEulaResult']:
return pulumi.get(self, "eulas")
@property
@pulumi.getter(name="imageId")
def image_id(self) -> str:
return pulumi.get(self, "image_id")
@property
@pulumi.getter(name="operatingSystem")
def operating_system(self) -> 'outputs.GetPublicationPackageDetailsOperatingSystemResult':
return pulumi.get(self, "operating_system")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
The listing's package type.
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="packageVersion")
def package_version(self) -> str:
return pulumi.get(self, "package_version")
@pulumi.output_type
class GetPublicationPackageDetailsEulaResult(dict):
def __init__(__self__, *,
eula_type: str,
license_text: str):
pulumi.set(__self__, "eula_type", eula_type)
pulumi.set(__self__, "license_text", license_text)
@property
@pulumi.getter(name="eulaType")
def eula_type(self) -> str:
return pulumi.get(self, "eula_type")
@property
@pulumi.getter(name="licenseText")
def license_text(self) -> str:
return pulumi.get(self, "license_text")
@pulumi.output_type
class GetPublicationPackageDetailsOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: name of the operating system
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
name of the operating system
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationPackageOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: The name of the variable.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationPackageVariableResult(dict):
def __init__(__self__, *,
data_type: str,
default_value: str,
description: str,
hint_message: str,
is_mandatory: bool,
name: str):
"""
:param str data_type: The data type of the variable.
:param str default_value: The variable's default value.
:param str description: A description of the variable.
:param str hint_message: A brief textual description that helps to explain the variable.
:param bool is_mandatory: Whether the variable is mandatory.
:param str name: The name of the variable.
"""
pulumi.set(__self__, "data_type", data_type)
pulumi.set(__self__, "default_value", default_value)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "hint_message", hint_message)
pulumi.set(__self__, "is_mandatory", is_mandatory)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="dataType")
def data_type(self) -> str:
"""
The data type of the variable.
"""
return pulumi.get(self, "data_type")
@property
@pulumi.getter(name="defaultValue")
def default_value(self) -> str:
"""
The variable's default value.
"""
return pulumi.get(self, "default_value")
@property
@pulumi.getter
def description(self) -> str:
"""
A description of the variable.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="hintMessage")
def hint_message(self) -> str:
"""
A brief textual description that helps to explain the variable.
"""
return pulumi.get(self, "hint_message")
@property
@pulumi.getter(name="isMandatory")
def is_mandatory(self) -> bool:
"""
Whether the variable is mandatory.
"""
return pulumi.get(self, "is_mandatory")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationPackagesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: The name of the variable.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the variable.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetPublicationPackagesPublicationPackageResult(dict):
def __init__(__self__, *,
listing_id: str,
package_type: str,
package_version: str,
resource_id: str,
time_created: str):
"""
:param str listing_id: The ID of the listing that the specified package belongs to.
:param str package_type: A filter to return only packages that match the given package type exactly.
:param str package_version: The version of the package. Package versions are unique within a listing.
:param str resource_id: The unique identifier for the package resource.
:param str time_created: The date and time this listing package was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
pulumi.set(__self__, "listing_id", listing_id)
pulumi.set(__self__, "package_type", package_type)
pulumi.set(__self__, "package_version", package_version)
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="listingId")
def listing_id(self) -> str:
"""
The ID of the listing that the specified package belongs to.
"""
return pulumi.get(self, "listing_id")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
A filter to return only packages that match the given package type exactly.
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="packageVersion")
def package_version(self) -> str:
"""
The version of the package. Package versions are unique within a listing.
"""
return pulumi.get(self, "package_version")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
The unique identifier for the package resource.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time this listing package was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@pulumi.output_type
class GetPublicationSupportContactResult(dict):
def __init__(__self__, *,
email: str,
name: str,
phone: str,
subject: str):
"""
:param str email: The email of the contact.
:param str name: name of the operating system
:param str phone: The phone number of the contact.
:param str subject: The email subject line to use when contacting support.
"""
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "phone", phone)
pulumi.set(__self__, "subject", subject)
@property
@pulumi.getter
def email(self) -> str:
"""
The email of the contact.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def name(self) -> str:
"""
name of the operating system
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def phone(self) -> str:
"""
The phone number of the contact.
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def subject(self) -> str:
"""
The email subject line to use when contacting support.
"""
return pulumi.get(self, "subject")
@pulumi.output_type
class GetPublicationSupportedOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: name of the operating system
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
name of the operating system
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: The name of the listing.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetPublicationsPublicationResult(dict):
def __init__(__self__, *,
compartment_id: str,
defined_tags: Mapping[str, Any],
freeform_tags: Mapping[str, Any],
icon: 'outputs.GetPublicationsPublicationIconResult',
id: str,
is_agreement_acknowledged: bool,
listing_type: str,
long_description: str,
name: str,
package_details: 'outputs.GetPublicationsPublicationPackageDetailsResult',
package_type: str,
short_description: str,
state: str,
support_contacts: Sequence['outputs.GetPublicationsPublicationSupportContactResult'],
supported_operating_systems: Sequence['outputs.GetPublicationsPublicationSupportedOperatingSystemResult'],
time_created: str):
"""
:param str compartment_id: The unique identifier for the compartment.
:param Mapping[str, Any] defined_tags: The defined tags associated with this resource, if any. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param Mapping[str, Any] freeform_tags: The freeform tags associated with this resource, if any. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param 'GetPublicationsPublicationIconArgs' icon: The model for upload data for images and icons.
:param str id: The unique identifier for the listing in Marketplace.
:param str listing_type: The type of the listing
:param str long_description: A long description of the listing.
:param str name: The name of the listing.
:param str package_type: The listing's package type.
:param str short_description: A short description of the listing.
:param str state: The state of the listing in its lifecycle
:param Sequence['GetPublicationsPublicationSupportContactArgs'] support_contacts: Contact information to use to get support from the publisher for the listing.
:param Sequence['GetPublicationsPublicationSupportedOperatingSystemArgs'] supported_operating_systems: List of operating systems supprted.
:param str time_created: The date and time this publication was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "defined_tags", defined_tags)
pulumi.set(__self__, "freeform_tags", freeform_tags)
pulumi.set(__self__, "icon", icon)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "is_agreement_acknowledged", is_agreement_acknowledged)
pulumi.set(__self__, "listing_type", listing_type)
pulumi.set(__self__, "long_description", long_description)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "package_details", package_details)
pulumi.set(__self__, "package_type", package_type)
pulumi.set(__self__, "short_description", short_description)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "support_contacts", support_contacts)
pulumi.set(__self__, "supported_operating_systems", supported_operating_systems)
pulumi.set(__self__, "time_created", time_created)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The unique identifier for the compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
The defined tags associated with this resource, if any. Each key is predefined and scoped to namespaces. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
The freeform tags associated with this resource, if any. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def icon(self) -> 'outputs.GetPublicationsPublicationIconResult':
"""
The model for upload data for images and icons.
"""
return pulumi.get(self, "icon")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique identifier for the listing in Marketplace.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isAgreementAcknowledged")
def is_agreement_acknowledged(self) -> bool:
return pulumi.get(self, "is_agreement_acknowledged")
@property
@pulumi.getter(name="listingType")
def listing_type(self) -> str:
"""
The type of the listing
"""
return pulumi.get(self, "listing_type")
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> str:
"""
A long description of the listing.
"""
return pulumi.get(self, "long_description")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="packageDetails")
def package_details(self) -> 'outputs.GetPublicationsPublicationPackageDetailsResult':
return pulumi.get(self, "package_details")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
The listing's package type.
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="shortDescription")
def short_description(self) -> str:
"""
A short description of the listing.
"""
return pulumi.get(self, "short_description")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the listing in its lifecycle
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="supportContacts")
def support_contacts(self) -> Sequence['outputs.GetPublicationsPublicationSupportContactResult']:
"""
Contact information to use to get support from the publisher for the listing.
"""
return pulumi.get(self, "support_contacts")
@property
@pulumi.getter(name="supportedOperatingSystems")
def supported_operating_systems(self) -> Sequence['outputs.GetPublicationsPublicationSupportedOperatingSystemResult']:
"""
List of operating systems supprted.
"""
return pulumi.get(self, "supported_operating_systems")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time this publication was created, expressed in [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamp format. Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@pulumi.output_type
class GetPublicationsPublicationIconResult(dict):
def __init__(__self__, *,
content_url: str,
file_extension: str,
mime_type: str,
name: str):
"""
:param str content_url: The content URL of the upload data.
:param str file_extension: The file extension of the upload data.
:param str mime_type: The MIME type of the upload data.
:param str name: The name of the listing.
"""
pulumi.set(__self__, "content_url", content_url)
pulumi.set(__self__, "file_extension", file_extension)
pulumi.set(__self__, "mime_type", mime_type)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="contentUrl")
def content_url(self) -> str:
"""
The content URL of the upload data.
"""
return pulumi.get(self, "content_url")
@property
@pulumi.getter(name="fileExtension")
def file_extension(self) -> str:
"""
The file extension of the upload data.
"""
return pulumi.get(self, "file_extension")
@property
@pulumi.getter(name="mimeType")
def mime_type(self) -> str:
"""
The MIME type of the upload data.
"""
return pulumi.get(self, "mime_type")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationsPublicationPackageDetailsResult(dict):
def __init__(__self__, *,
eulas: Sequence['outputs.GetPublicationsPublicationPackageDetailsEulaResult'],
image_id: str,
operating_system: 'outputs.GetPublicationsPublicationPackageDetailsOperatingSystemResult',
package_type: str,
package_version: str):
"""
:param str package_type: The listing's package type.
"""
pulumi.set(__self__, "eulas", eulas)
pulumi.set(__self__, "image_id", image_id)
pulumi.set(__self__, "operating_system", operating_system)
pulumi.set(__self__, "package_type", package_type)
pulumi.set(__self__, "package_version", package_version)
@property
@pulumi.getter
def eulas(self) -> Sequence['outputs.GetPublicationsPublicationPackageDetailsEulaResult']:
return pulumi.get(self, "eulas")
@property
@pulumi.getter(name="imageId")
def image_id(self) -> str:
return pulumi.get(self, "image_id")
@property
@pulumi.getter(name="operatingSystem")
def operating_system(self) -> 'outputs.GetPublicationsPublicationPackageDetailsOperatingSystemResult':
return pulumi.get(self, "operating_system")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
The listing's package type.
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="packageVersion")
def package_version(self) -> str:
return pulumi.get(self, "package_version")
@pulumi.output_type
class GetPublicationsPublicationPackageDetailsEulaResult(dict):
def __init__(__self__, *,
eula_type: str,
license_text: str):
pulumi.set(__self__, "eula_type", eula_type)
pulumi.set(__self__, "license_text", license_text)
@property
@pulumi.getter(name="eulaType")
def eula_type(self) -> str:
return pulumi.get(self, "eula_type")
@property
@pulumi.getter(name="licenseText")
def license_text(self) -> str:
return pulumi.get(self, "license_text")
@pulumi.output_type
class GetPublicationsPublicationPackageDetailsOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: The name of the listing.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublicationsPublicationSupportContactResult(dict):
def __init__(__self__, *,
email: str,
name: str,
phone: str,
subject: str):
"""
:param str email: The email of the contact.
:param str name: The name of the listing.
:param str phone: The phone number of the contact.
:param str subject: The email subject line to use when contacting support.
"""
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "phone", phone)
pulumi.set(__self__, "subject", subject)
@property
@pulumi.getter
def email(self) -> str:
"""
The email of the contact.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def phone(self) -> str:
"""
The phone number of the contact.
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def subject(self) -> str:
"""
The email subject line to use when contacting support.
"""
return pulumi.get(self, "subject")
@pulumi.output_type
class GetPublicationsPublicationSupportedOperatingSystemResult(dict):
def __init__(__self__, *,
name: str):
"""
:param str name: The name of the listing.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the listing.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPublishersFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: The name of the publisher.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the publisher.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetPublishersPublisherResult(dict):
def __init__(__self__, *,
description: str,
id: str,
name: str):
"""
:param str description: A description of the publisher.
:param str id: Unique identifier for the publisher.
:param str name: The name of the publisher.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def description(self) -> str:
"""
A description of the publisher.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Unique identifier for the publisher.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the publisher.
"""
return pulumi.get(self, "name")
| 31.122564
| 343
| 0.603112
| 9,925
| 94,208
| 5.504685
| 0.042821
| 0.030109
| 0.055204
| 0.080682
| 0.79068
| 0.776732
| 0.761028
| 0.734744
| 0.71267
| 0.693927
| 0
| 0.002374
| 0.289041
| 94,208
| 3,026
| 344
| 31.132849
| 0.813327
| 0.238164
| 0
| 0.799539
| 1
| 0.001728
| 0.15071
| 0.070985
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176843
| false
| 0
| 0.003456
| 0.020737
| 0.355415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9a6e49cee57ef1529a3df692eac1133d101cf3e1
| 5,657
|
py
|
Python
|
overseed_tests/test_create_plant.py
|
Raihanbook/overseed
|
73d7bfdd967d4c43b88d8aadd0cd0abbbd5f2842
|
[
"MIT"
] | null | null | null |
overseed_tests/test_create_plant.py
|
Raihanbook/overseed
|
73d7bfdd967d4c43b88d8aadd0cd0abbbd5f2842
|
[
"MIT"
] | null | null | null |
overseed_tests/test_create_plant.py
|
Raihanbook/overseed
|
73d7bfdd967d4c43b88d8aadd0cd0abbbd5f2842
|
[
"MIT"
] | null | null | null |
from overseed_tests.overseed_test_case import OverseedTestCase
# Create Plant test
# ---------------
# This test case covers all the Create Plant pages (both Admin and Supervisor)
class TestCreatePlant(OverseedTestCase):
def test_create_plant_admin(self):
self.client.post("/login",
data=dict(email='admin@admin.com', password='admin', remember=False),
follow_redirects=True)
# now navigate to the create plant page and create a new plant (with new valid data)
result = self.client.post("create/plant",
data=dict(type='Aloe Vera',
icon='bush_healthy.png',
company='Company X'),
follow_redirects=True)
self.assert_template_used('plants_list.html')
self.assert_message_flashed('The new plant has been created.', 'success')
self.assertIn(b'bush_healthy.png', result.data)
def test_create_plant_supervisor(self):
self.client.post("/login",
data=dict(email='supervisor@supervisor.com', password='supervisor', remember=False),
follow_redirects=True)
# now navigate to the create plant page and create a new plant (with new valid data)
result = self.client.post("create/plant",
data=dict(type='Aloe Vera',
icon='bush_healthy.png',
company='Company X'),
follow_redirects=True)
self.assert_template_used('plants_list.html')
self.assert_message_flashed('The new plant has been created.', 'success')
self.assertIn(b'bush_healthy.png', result.data)
def test_create_plant_admin_invalid_type(self):
self.client.post("/login",
data=dict(email='admin@admin.com', password='admin', remember=False),
follow_redirects=True)
# now navigate to the create plant page and create a new plant (with invalid type data)
result = self.client.post("create/plant",
data=dict(type='Fictional Plant',
icon='pilea_healthy.png',
company='Company X'),
follow_redirects=True)
self.assert_template_used('create_plant.html')
result = self.client.get("/plants")
self.assertNotIn(b'Fictional Plant', result.data)
def test_create_plant_admin_invalid_icon(self):
self.client.post("/login",
data=dict(email='admin@admin.com', password='admin', remember=False),
follow_redirects=True)
# now navigate to the create plant page and create a new plant (with invalid icon data)
result = self.client.post("create/plant",
data=dict(type='Aloe Vera',
icon='fictional.png',
company='Company X'),
follow_redirects=True)
self.assert_template_used('create_plant.html')
result = self.client.get("/plants")
self.assertNotIn(b'fictional.png', result.data)
def test_create_plant_admin_invalid_company(self):
self.client.post("/login",
data=dict(email='admin@admin.com', password='admin', remember=False),
follow_redirects=True)
# now navigate to the create plant page and create a new plant (with invalid company data)
result = self.client.post("create/plant",
data=dict(type='Aloe Vera',
icon='tree_healthy.png',
company='Fictional Company'),
follow_redirects=True)
self.assert_template_used('create_plant.html')
result = self.client.get("/plants")
self.assertNotIn(b'Fictional Company', result.data)
def test_create_plant_user(self):
self.client.post("/login",
data=dict(email='user@user.com', password='user', remember=False),
follow_redirects=True)
# now navigate to the create plant page and create a new plant (with new valid data)
result = self.client.post("create/plant",
data=dict(type='Aloe Vera',
icon='bush_healthy.png',
company='Company X'),
follow_redirects=True)
self.assert403(result)
def test_create_plant_logged_out(self):
# DON'T log in.
# now navigate to the create plant page and create a new plant (with new valid data)
result = self.client.post("create/plant",
data=dict(type='Aloe Vera',
icon='bush_healthy.png',
company='Company X'),
follow_redirects=True)
self.assert403(result)
| 46.368852
| 109
| 0.501326
| 555
| 5,657
| 4.981982
| 0.145946
| 0.103436
| 0.065823
| 0.04557
| 0.85642
| 0.848101
| 0.837975
| 0.837975
| 0.797468
| 0.777577
| 0
| 0.001793
| 0.408344
| 5,657
| 121
| 110
| 46.752066
| 0.82432
| 0.126746
| 0
| 0.759494
| 0
| 0
| 0.154018
| 0.005073
| 0
| 0
| 0
| 0
| 0.177215
| 1
| 0.088608
| false
| 0.075949
| 0.012658
| 0
| 0.113924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
9a6e97bb736bd52130b81168ba7495160de773c2
| 30,540
|
py
|
Python
|
bin/ML/model.py
|
ArtaSeify/BOSS-SC2
|
a86db4bcb1cd0a9cfd71c4583ccb9bd87c0cb415
|
[
"MIT"
] | null | null | null |
bin/ML/model.py
|
ArtaSeify/BOSS-SC2
|
a86db4bcb1cd0a9cfd71c4583ccb9bd87c0cb415
|
[
"MIT"
] | null | null | null |
bin/ML/model.py
|
ArtaSeify/BOSS-SC2
|
a86db4bcb1cd0a9cfd71c4583ccb9bd87c0cb415
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import layers
import os
import math
import numpy as np
from scipy.special import softmax
from datetime import datetime
# from data_loader import DataLoader
class CustomTensorBoard(tf.keras.callbacks.TensorBoard):
def __init__(self, model, *args, **kwargs):
super(CustomTensorBoard, self).__init__(*args, **kwargs)
self.model = model
def get_lr(self):
return tf.keras.backend.eval(self.model.optimizer.lr)
def on_epoch_end(self, epoch, logs=None):
# test_dataset = DataLoader(self.model.feature_shape, self.model.prediction_shape, True, self.model.batch_size, 8)
# test_iterator = test_dataset.make_iterator(sess, [args.testset_file])
# testset_samples = sum(1 for line in open(args.testset_file))
# evaluations.append(network.evaluate(test_iterator, floor(testset_samples/self.model.batch_size), verbose))
logs.update({"learning rate": np.float_(self.get_lr())
})
super(CustomTensorBoard, self).on_epoch_end(epoch, logs)
class Model():
def __init__(self):
return
class IntegralValueNN(Model):
def __init__(self, input_shape, output_shape, model_name, batch_size, learning_rate, model_path, create_network=True):
self.model_name = model_name
self.model_path = model_path
self.feature_shape = input_shape
self.prediction_shape = output_shape
self.batch_size = batch_size
self.epochs = 0
self.checkpoint_best = tf.keras.callbacks.ModelCheckpoint(self.model_path.split(".")[0] + "_best.h5", monitor='loss', save_best_only=True, mode='min')
self.checkpoint = tf.keras.callbacks.ModelCheckpoint(self.model_path)
if create_network:
self.create(input_shape, output_shape, model_name, batch_size, learning_rate)
def percent_error(self, y_true, y_pred):
return tf.math.multiply(tf.math.divide(tf.math.abs(tf.math.subtract(y_true, y_pred)), tf.math.maximum(y_true, 1)), 100)
def exponential_decay(self, epoch, lr):
decay_rate = 0.70
reduce_every_epochs = 1.0
return lr * pow(decay_rate, math.floor((epoch+1) / reduce_every_epochs))
def create(self, input_shape, output_shape, model_name, batch_size, learning_rate):
inputs = tf.keras.Input(shape=(input_shape, ), name="state")
layer = layers.Dense(2048, activation='elu')(inputs)
layer = layers.Dense(1024, activation='elu')(layer)
layer = layers.Dense(1024, activation='elu')(layer)
#layer = layers.Dropout(0.30)(layer)
#layer = layers.Dropout(0.30)(layer)
layer = layers.Dense(512, activation='elu')(layer)
layer = layers.Dense(512, activation='elu')(layer)
prediction = layers.Dense(output_shape, name="value")(layer)
self.model = tf.keras.Model(inputs=inputs, outputs=prediction)
#self.lrs = tf.keras.callbacks.LearningRateScheduler(self.exponential_decay)
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate),
loss='mse',
metrics=['mae', self.percent_error])
def train(self, iterator, epochs, steps_per_epoch, verbose, class_weight=None):
return self.model.fit(iterator, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose,
callbacks=[CustomTensorBoard(self.model, log_dir=os.path.join(os.getcwd(), os.path.join("logs", self.model_name)), write_graph=False, batch_size=self.batch_size),
self.checkpoint, self.checkpoint_best],
class_weight = class_weight)
def evaluate(self, iterator, steps, verbose):
return self.model.evaluate(iterator, steps=steps, verbose=verbose)
def predict(self, nn_input, batch_size=None, steps=1, verbose=0):
return self.model.predict(nn_input, batch_size=batch_size, steps=steps, verbose=verbose)
def predict_on_batch(self, nn_input):
output = self.model.predict_on_batch(nn_input)
output = np.ndarray.tolist(np.squeeze(output))
if isinstance(output, float):
output = [output]
return output
def save(self, path):
tf.keras.models.save_model(self.model, path)
def load(self, path):
self.model = tf.keras.models.load_model(path, custom_objects={"percent_error": self.percent_error})
class PolicyNetwork(Model):
def __init__(self, input_shape, output_shape, model_name, batch_size, learning_rate, model_path, create_network=True):
self.model_name = model_name
self.model_path = model_path
self.feature_shape = input_shape
self.prediction_shape = output_shape
self.batch_size = batch_size
self.epochs = 0
self.checkpoint_best = tf.keras.callbacks.ModelCheckpoint(self.model_path.split(".")[0] + "_best.h5", monitor='categorical_accuracy', save_best_only=True, mode='max')
self.checkpoint = tf.keras.callbacks.ModelCheckpoint(self.model_path)
if create_network:
self.create(input_shape, output_shape, model_name, batch_size, learning_rate)
def exponential_decay(self, epoch, lr):
decay_rate = 0.70
reduce_every_epochs = 1.0
return lr * pow(decay_rate, math.floor((epoch+1) / reduce_every_epochs))
def top_2_accuracy(self, y_true, y_pred):
return tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=2)
def CCELogits(self, y_true, y_pred):
return tf.keras.backend.categorical_crossentropy(y_true, y_pred, from_logits=True)
def accuracy(self, y_true, y_pred):
indices = tf.concat([tf.convert_to_tensor([[i] for i in range(self.batch_size)], dtype=tf.int64),
tf.expand_dims(tf.keras.backend.argmax(y_pred, axis=-1), 1)], 1)
nonzeros = tf.math.divide(tf.math.count_nonzero(tf.gather_nd(y_true, indices)),self.batch_size)
return nonzeros
# BEST MODEL SO FAR:
# inputs = tf.keras.Input(shape=(input_shape, ))
# layer = layers.Dense(512, activation='elu')(inputs)
# layer = layers.Dense(256, activation='elu')(layer)
# layer = layers.Dense(256, activation='elu')(layer)
# layer = layers.Dense(128, activation='elu')(layer)
# layer = layers.Dense(128, activation='elu')(layer)
# prediction = layers.Dense(output_shape, activation='linear')(layer)
def create(self, input_shape, output_shape, model_name, batch_size, learning_rate):
inputs = tf.keras.Input(shape=(input_shape, ), name="state")
layer = layers.Dense(512, activation='elu')(inputs)
layer = layers.Dense(256, activation='elu')(layer)
layer = layers.Dense(256, activation='elu')(layer)
layer = layers.Dense(128, activation='elu')(layer)
layer = layers.Dense(128, activation='elu')(layer)
prediction = layers.Dense(output_shape, activation='linear', name="policy")(layer)
#prediction = layers.Dense(output_shape, activation='softmax')(layer)
self.model = tf.keras.Model(inputs=inputs, outputs=prediction)
#self.lrs = tf.keras.callbacks.LearningRateScheduler(self.exponential_decay)
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate),
#loss='categorical_crossentropy',
#loss='kld',
loss = self.CCELogits,
metrics=['categorical_accuracy', self.top_2_accuracy, self.accuracy])
def train(self, iterator, epochs, steps_per_epoch, verbose, class_weight=None):
return self.model.fit(iterator, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose,
callbacks=[CustomTensorBoard(self.model, log_dir=os.path.join(os.getcwd(), os.path.join("logs", self.model_name)), write_graph=False, batch_size=self.batch_size),
self.checkpoint, self.checkpoint_best],
class_weight = class_weight)
def evaluate(self, iterator, steps, verbose):
return self.model.evaluate(iterator, steps=steps, verbose=verbose)
def predict(self, nn_input, batch_size=None, steps=1, verbose=0):
return softmax(self.model.predict(nn_input, batch_size=batch_size, steps=steps, verbose=verbose))
def predict_on_batch(self, nn_input):
return np.ndarray.tolist(np.squeeze(softmax(self.model.predict_on_batch(nn_input))))
def save(self, path):
tf.keras.models.save_model(self.model, path)
def load(self, path):
self.model = tf.keras.models.load_model(path,
custom_objects={"top_2_accuracy": self.top_2_accuracy, "CCELogits": self.CCELogits, "accuracy": self.accuracy})
class PolicyAndValueNetwork(Model):
def __init__(self, input_shape, policy_shape, value_shape, model_name, batch_size, learning_rate, model_path, create_network=True):
self.model_name = model_name
self.model_path = model_path
self.feature_shape = input_shape
self.policy_shape = policy_shape
self.value_shape = value_shape
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epochs = 0
self.checkpoint_best = tf.keras.callbacks.ModelCheckpoint(self.model_path.split(".")[0] + "_best.h5", monitor='categorical_accuracy', save_best_only=True, mode='max')
self.checkpoint = tf.keras.callbacks.ModelCheckpoint(self.model_path)
if create_network:
self.create()
def exponential_decay(self, epoch, lr):
decay_rate = 0.70
reduce_every_epochs = 1.0
return lr * pow(decay_rate, math.floor((epoch+1) / reduce_every_epochs))
def top_2_accuracy(self, y_true, y_pred):
return tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=2)
def CCELogits(self, y_true, y_pred):
return tf.keras.backend.categorical_crossentropy(y_true, y_pred, from_logits=True)
def accuracy(self, y_true, y_pred):
indices = tf.concat([tf.convert_to_tensor([[i] for i in range(self.batch_size)], dtype=tf.int64),
tf.expand_dims(tf.keras.backend.argmax(y_pred, axis=-1), 1)], 1)
nonzeros = tf.math.divide(tf.math.count_nonzero(tf.gather_nd(y_true, indices)),self.batch_size)
return nonzeros
def percent_error(self, y_true, y_pred):
return tf.math.multiply(tf.math.divide(tf.math.abs(tf.math.subtract(y_true, y_pred)), tf.math.maximum(y_true, 1)), 100)
def create(self):
inputs = tf.keras.Input(shape=(self.feature_shape, ), name="state")
layer = layers.Dense(512, activation='elu')(inputs)
layer = layers.Dense(512, activation='elu')(layer)
layer = layers.Dense(256, activation='elu')(layer)
layer = layers.Dense(128, activation='elu')(layer)
layer = layers.Dense(128, activation='elu')(layer)
policy = layers.Dense(self.policy_shape, activation='linear', name="policy")(layer)
value = layers.Dense(self.value_shape, name="value")(layer)
self.model = tf.keras.Model(inputs=inputs, outputs=[policy, value])
#self.lrs = tf.keras.callbacks.LearningRateScheduler(self.exponential_decay)
self.model.compile(optimizer=tf.keras.optimizers.Adam(self.learning_rate),
#loss='categorical_crossentropy',
#loss='kld',
loss = {"policy": self.CCELogits,
"value" : 'mse'},
metrics={"policy": ['categorical_accuracy', self.top_2_accuracy, self.accuracy],
"value" : ['mae', self.percent_error]})
def train(self, iterator, epochs, steps_per_epoch, verbose, class_weight=None):
return self.model.fit(iterator, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose,
callbacks=[CustomTensorBoard(self.model, log_dir=os.path.join(os.getcwd(), os.path.join("logs", self.model_name)), write_graph=False, batch_size=self.batch_size),
self.checkpoint, self.checkpoint_best],
class_weight = class_weight)
def evaluate(self, iterator, steps, verbose):
return self.model.evaluate(iterator, steps=steps, verbose=verbose)
def predict(self, nn_input, batch_size=None, steps=1, verbose=0):
return softmax(self.model.predict(nn_input, batch_size=batch_size, steps=steps, verbose=verbose))
def predict_on_batch(self, nn_input):
prediction = self.model.predict_on_batch(nn_input)
value = np.ndarray.tolist(np.squeeze(prediction[1]))
if isinstance(value, float):
value = [value]
policy = softmax(prediction[0])
policy = np.ndarray.tolist(policy)
return [policy, value]
def save(self, path):
tf.keras.models.save_model(self.model, path)
def load(self, path):
self.model = tf.keras.models.load_model(path,
custom_objects={"top_2_accuracy": self.top_2_accuracy, "CCELogits": self.CCELogits, "accuracy": self.accuracy
, "percent_error": self.percent_error})
class RelationsPolicyNetwork(Model):
def __init__(self, units_features_size, extra_features_size, policy_shape, model_name, batch_size, learning_rate, model_path, create_network=True):
self.model_name = model_name
self.model_path = model_path
self.units_features_size = units_features_size
self.extra_features_size = extra_features_size
self.policy_shape = policy_shape
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epochs = 0
#self.checkpoint_best = tf.keras.callbacks.ModelCheckpoint(self.model_path.split(".")[0] + "_bestCA.h5", monitor='categorical_accuracy', save_best_only=True, mode='max')
self.checkpoint = tf.keras.callbacks.ModelCheckpoint(self.model_path)
self.early_stop = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=7, mode="min", restore_best_weights=True)
if create_network:
self.create()
#def exponential_decay(self, epoch, lr):
# decay_rate = 0.70
# reduce_every_epochs = 1.0
# return lr * pow(decay_rate, math.floor((epoch+1) / reduce_every_epochs))
def top_3_accuracy(self, y_true, y_pred):
return tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
def CCELogits(self, y_true, y_pred):
return tf.keras.backend.categorical_crossentropy(y_true, y_pred, from_logits=True)
def accuracy(self, y_true, y_pred):
indices = tf.concat([tf.convert_to_tensor([[i] for i in range(self.batch_size)], dtype=tf.int64),
tf.expand_dims(tf.keras.backend.argmax(y_pred, axis=-1), 1)], 1)
#indices = tf.keras.backend.argmax(y_pred, axis=-1)
nonzeros = tf.math.divide(tf.math.count_nonzero(tf.gather_nd(y_true, indices)),self.batch_size)
return nonzeros
def create(self):
units_output_size = 512
units_input = tf.keras.Input(shape=(None, self.units_features_size), name="units_input")
layer_units = layers.Dense(1024, activation='elu', name="units_layer1")(units_input)
layer_units = layers.Dense(512, activation='elu', name="units_layer2")(layer_units)
layer_units = layers.Dense(512, activation='elu', name="units_layer3")(layer_units)
units_output = layers.Dense(units_output_size, activation='elu', name="units_output")(layer_units)
units_output = layers.Lambda(lambda x: tf.keras.backend.mean(x, axis=1), name="average_units_output")(units_output)
extra_features_input = tf.keras.Input(shape=(self.extra_features_size, ), name="extra_features_input")
concatenate_layer = layers.Concatenate()([units_output, extra_features_input])
layer = layers.Dense(2048, activation='elu', name="state_layer1")(concatenate_layer)
layer = layers.Dense(1024, activation='elu', name="state_layer2")(layer)
layer = layers.Dense(512, activation='elu', name="state_layer3")(layer)
layer = layers.Dense(512, activation='elu', name="state_layer4")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer5")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer6")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer7")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer8")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer9")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer10")(layer)
policy = layers.Dense(self.policy_shape, activation='linear', name="policy")(layer)
self.model = tf.keras.Model(inputs=[units_input, extra_features_input], outputs=policy)
#self.lrs = tf.keras.callbacks.LearningRateScheduler(self.exponential_decay)
self.model.compile(optimizer=tf.keras.optimizers.Nadam(self.learning_rate),
loss = {"policy" : self.CCELogits},
metrics= {"policy" : ['categorical_accuracy', self.top_3_accuracy, self.accuracy]})
def train(self, iterator, epochs, steps_per_epoch, verbose, validation_iterator, validation_steps):
return self.model.fit(iterator, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose, validation_data=validation_iterator, validation_steps=validation_steps,
callbacks=[CustomTensorBoard(self.model, log_dir=os.path.join(os.getcwd(), os.path.join("logs", self.model_name)), write_graph=False, batch_size=self.batch_size),
self.checkpoint, self.early_stop]) #self.checkpoint_best])
def evaluate(self, iterator, steps, verbose):
return self.model.evaluate(iterator, steps=steps, verbose=verbose)
def predict(self, nn_input, batch_size=None, steps=1, verbose=0):
predictions = softmax(self.model.predict(nn_input, batch_size=batch_size, steps=steps, verbose=verbose), axis=1)
output = []
for i in range(len(predictions)):
output.append(np.ndarray.tolist(predictions[i]))
return output
def predict_on_batch(self, nn_input):
return np.ndarray.tolist(np.squeeze(softmax(self.model.predict_on_batch(nn_input))))
def save(self, path):
tf.keras.models.save_model(self.model, path,)
def load(self, path):
self.model = tf.keras.models.load_model(path, compile=False,
custom_objects={"top_3_accuracy": self.top_3_accuracy, "CCELogits": self.CCELogits, "accuracy": self.accuracy})
def compile(self):
self.model.compile(optimizer=tf.keras.optimizers.Adam(self.learning_rate),
loss = {"policy" : self.CCELogits},
metrics= {"policy" : ['categorical_accuracy', self.top_3_accuracy, self.accuracy]})
class RelationsValueNetwork(Model):
def __init__(self, units_features_size, extra_features_size, value_normalize_factor, model_name, batch_size, learning_rate, model_path, create_network=True):
self.model_name = model_name
self.model_path = model_path
self.units_features_size = units_features_size
self.extra_features_size = extra_features_size
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epochs = 0
#self.checkpoint_best = tf.keras.callbacks.ModelCheckpoint(self.model_path.split(".")[0] + "_bestCA.h5", monitor='categorical_accuracy', save_best_only=True, mode='max')
self.checkpoint = tf.keras.callbacks.ModelCheckpoint(self.model_path)
self.early_stop = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=7, mode="min", restore_best_weights=True)
if create_network:
self.create()
def create(self):
units_output_size = 512
units_input = tf.keras.Input(shape=(None, self.units_features_size), name="units_input")
layer_units = layers.Dense(1024, activation='elu', name="units_layer1")(units_input)
layer_units = layers.Dense(512, activation='elu', name="units_layer2")(layer_units)
layer_units = layers.Dense(512, activation='elu', name="units_layer3")(layer_units)
units_output = layers.Dense(units_output_size, activation='elu', name="units_output")(layer_units)
units_output = layers.Lambda(lambda x: tf.keras.backend.mean(x, axis=1), name="average_units_output")(units_output)
extra_features_input = tf.keras.Input(shape=(self.extra_features_size, ), name="extra_features_input")
concatenate_layer = layers.Concatenate()([units_output, extra_features_input])
layer = layers.Dense(2048, activation='elu', name="state_layer1")(concatenate_layer)
layer = layers.Dense(1024, activation='elu', name="state_layer2")(layer)
layer = layers.Dense(512, activation='elu', name="state_layer3")(layer)
layer = layers.Dense(512, activation='elu', name="state_layer4")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer5")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer6")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer7")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer8")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer9")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer10")(layer)
value = layers.Dense(1, activation='relu', name="value")(layer)
self.model = tf.keras.Model(inputs=[units_input, extra_features_input], outputs=value)
#self.lrs = tf.keras.callbacks.LearningRateScheduler(self.exponential_decay)
self.model.compile(optimizer=tf.keras.optimizers.Nadam(self.learning_rate),
loss = 'mae',
metrics = ['mae'])
def train(self, iterator, epochs, steps_per_epoch, verbose, validation_iterator, validation_steps):
return self.model.fit(iterator, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose, validation_data=validation_iterator, validation_steps=validation_steps,
callbacks=[CustomTensorBoard(self.model, log_dir=os.path.join(os.getcwd(), os.path.join("logs", self.model_name)), write_graph=False, batch_size=self.batch_size),
self.checkpoint, self.early_stop]) #self.checkpoint_best])
def evaluate(self, iterator, steps, verbose):
return self.model.evaluate(iterator, steps=steps, verbose=verbose)
def predict(self, nn_input, batch_size=None, steps=1, verbose=0):
predictions = softmax(self.model.predict(nn_input, batch_size=batch_size, steps=steps, verbose=verbose), axis=1)
output = []
for i in range(len(predictions)):
output.append(np.ndarray.tolist(predictions[i]))
return output
def predict_on_batch(self, nn_input):
return np.ndarray.tolist(np.squeeze(softmax(self.model.predict_on_batch(nn_input))))
def save(self, path):
tf.keras.models.save_model(self.model, path,)
def load(self, path):
self.model = tf.keras.models.load_model(path, compile=False)
def compile(self):
self.model.compile(optimizer=tf.keras.optimizers.Adam(self.learning_rate),
loss = 'mae',
metrics = 'mae')
class RelationsPolicyAndValueNetwork(Model):
def __init__(self, units_features_size, extra_features_size, policy_shape, value_normalize_factor, model_name, batch_size, learning_rate, model_path, create_network=True):
self.model_name = model_name
self.model_path = model_path
self.units_features_size = units_features_size
self.extra_features_size = extra_features_size
self.policy_shape = policy_shape
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epochs = 0
#self.checkpoint_best = tf.keras.callbacks.ModelCheckpoint(self.model_path.split(".")[0] + "_bestCA.h5", monitor='categorical_accuracy', save_best_only=True, mode='max')
self.checkpoint = tf.keras.callbacks.ModelCheckpoint(self.model_path)
self.early_stop = tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=7, mode="min", restore_best_weights=True)
self.value_loss_scale = tf.keras.backend.variable(value_normalize_factor, dtype=tf.float32)
if create_network:
self.create()
def top_3_accuracy(self, y_true, y_pred):
return tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
def CCELogits(self, y_true, y_pred):
return tf.keras.backend.categorical_crossentropy(y_true, y_pred, from_logits=True)
def accuracy(self, y_true, y_pred):
indices = tf.concat([tf.convert_to_tensor([[i] for i in range(self.batch_size)], dtype=tf.int64),
tf.expand_dims(tf.keras.backend.argmax(y_pred, axis=-1), 1)], 1)
#indices = tf.keras.backend.argmax(y_pred, axis=-1)
nonzeros = tf.math.divide(tf.math.count_nonzero(tf.gather_nd(y_true, indices)),self.batch_size)
return nonzeros
def MAEWithScalar(self, y_true, y_pred):
return tf.math.divide(tf.keras.losses.MAE(y_true, y_pred), self.value_loss_scale)
def MSEWithScaling(self, y_true, y_pred):
return tf.keras.losses.MSE(tf.math.divide(y_true, 30000),
tf.math.divide(y_pred, 30000))
def create(self):
units_output_size = 512
units_input = tf.keras.Input(shape=(None, self.units_features_size), name="units_input")
layer_units = layers.Dense(1024, activation='elu', name="units_layer1")(units_input)
layer_units = layers.Dense(512, activation='elu', name="units_layer2")(layer_units)
layer_units = layers.Dense(512, activation='elu', name="units_layer3")(layer_units)
units_output = layers.Dense(units_output_size, activation='elu', name="units_output")(layer_units)
units_output = layers.Lambda(lambda x: tf.keras.backend.mean(x, axis=1), name="average_units_output")(units_output)
extra_features_input = tf.keras.Input(shape=(self.extra_features_size, ), name="extra_features_input")
concatenate_layer = layers.Concatenate()([units_output, extra_features_input])
layer = layers.Dense(2048, activation='elu', name="state_layer1")(concatenate_layer)
layer = layers.Dense(1024, activation='elu', name="state_layer2")(layer)
layer = layers.Dense(512, activation='elu', name="state_layer3")(layer)
layer = layers.Dense(512, activation='elu', name="state_layer4")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer5")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer6")(layer)
layer = layers.Dense(256, activation='elu', name="state_layer7")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer8")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer9")(layer)
layer = layers.Dense(128, activation='elu', name="state_layer10")(layer)
policy_layer = layers.Dense(128, activation='elu', name="policy_layer1")(layer)
policy_layer = layers.Dense(128, activation='elu', name="policy_layer2")(policy_layer)
policy = layers.Dense(self.policy_shape, activation='linear', name="policy")(policy_layer)
value_layer = layers.Dense(64, activation='elu', name="value_layer1")(layer)
value_layer = layers.Dense(64, activation='elu', name="value_layer2")(value_layer)
value = layers.Dense(1, activation='relu', name="value")(value_layer)
self.model = tf.keras.Model(inputs=[units_input, extra_features_input], outputs=[policy, value])
#self.lrs = tf.keras.callbacks.LearningRateScheduler(self.exponential_decay)
self.model.compile(optimizer=tf.keras.optimizers.Nadam(self.learning_rate),
loss = {"policy" : self.CCELogits,
"value" : self.MAEWithScalar},
metrics= {"policy" : ['categorical_accuracy', self.top_3_accuracy, self.accuracy],
"value" : ['mae', 'msle']})
def train(self, iterator, epochs, steps_per_epoch, verbose, validation_iterator, validation_steps):
return self.model.fit(iterator, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=verbose, validation_data=validation_iterator, validation_steps=validation_steps,
callbacks=[CustomTensorBoard(self.model, log_dir=os.path.join(os.getcwd(), os.path.join("logs", self.model_name)), write_graph=False, batch_size=self.batch_size),
self.checkpoint, self.early_stop]) #self.checkpoint_best])
def evaluate(self, iterator, steps, verbose):
return self.model.evaluate(iterator, steps=steps, verbose=verbose)
def predict(self, nn_input, batch_size=None, steps=1, verbose=0):
prediction = self.model.predict(nn_input, batch_size=batch_size, steps=steps, verbose=verbose)
softmaxed_values = softmax(prediction[0], axis=1)
output = []
for i in range(len(prediction[0])):
output.append([np.ndarray.tolist(softmaxed_values[i]), np.ndarray.tolist(prediction[1][i])])
return output
def predict_on_batch(self, nn_input):
return np.ndarray.tolist(np.squeeze(softmax(self.model.predict_on_batch(nn_input))))
def save(self, path):
tf.keras.models.save_model(self.model, path,)
def load(self, path):
self.model = tf.keras.models.load_model(path, compile=False,
custom_objects={"top_3_accuracy": self.top_3_accuracy, "CCELogits": self.CCELogits, "accuracy": self.accuracy
, "MAEWithScalar": self.MAEWithScalar})
def compile(self):
self.model.compile(optimizer=tf.keras.optimizers.Nadam(self.learning_rate),
loss = {"policy" : self.CCELogits,
"value" : self.MAEWithScalar},
metrics= {"policy" : ['categorical_accuracy', self.top_3_accuracy, self.accuracy],
"value" : ['mae', 'msle']})
| 53.298429
| 195
| 0.674034
| 3,907
| 30,540
| 5.051446
| 0.060148
| 0.042866
| 0.043778
| 0.041498
| 0.918069
| 0.910418
| 0.900841
| 0.89243
| 0.878851
| 0.868312
| 0
| 0.016217
| 0.200426
| 30,540
| 572
| 196
| 53.391608
| 0.792006
| 0.075115
| 0
| 0.786765
| 0
| 0
| 0.054774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181373
| false
| 0
| 0.017157
| 0.080882
| 0.328431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7be05eb610b6b1b99369bc0bb423c4e716a229c7
| 104
|
py
|
Python
|
tests/test_polyshape.py
|
abey79/lines
|
09fbd84f9eaaba40d24b07227e8c95c0493a75c2
|
[
"MIT"
] | 39
|
2019-10-23T09:19:34.000Z
|
2022-02-16T21:44:12.000Z
|
tests/test_polyshape.py
|
abey79/lines
|
09fbd84f9eaaba40d24b07227e8c95c0493a75c2
|
[
"MIT"
] | 2
|
2020-11-13T14:06:02.000Z
|
2021-09-29T08:18:44.000Z
|
tests/test_polyshape.py
|
abey79/lines
|
09fbd84f9eaaba40d24b07227e8c95c0493a75c2
|
[
"MIT"
] | 2
|
2020-11-06T22:21:00.000Z
|
2021-06-09T18:40:02.000Z
|
def test_transform_identity():
# TODO
pass
def test_transform_rotation():
# TODO
pass
| 11.555556
| 30
| 0.653846
| 12
| 104
| 5.333333
| 0.583333
| 0.21875
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 104
| 8
| 31
| 13
| 0.842105
| 0.086538
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
d035f09f377a472f6d08cceaf7e1debfa748e7ee
| 1,551
|
py
|
Python
|
netbox_aws/choices.py
|
lampwins/interop2020-netbox-plugins
|
e983b1875c58230228448deca6b129be9fc40c1e
|
[
"Apache-2.0"
] | 4
|
2020-10-06T19:00:01.000Z
|
2021-04-26T19:37:00.000Z
|
netbox_aws/choices.py
|
lampwins/interop2020-netbox-plugins
|
e983b1875c58230228448deca6b129be9fc40c1e
|
[
"Apache-2.0"
] | null | null | null |
netbox_aws/choices.py
|
lampwins/interop2020-netbox-plugins
|
e983b1875c58230228448deca6b129be9fc40c1e
|
[
"Apache-2.0"
] | 1
|
2021-04-16T15:24:24.000Z
|
2021-04-16T15:24:24.000Z
|
from utilities.choices import ChoiceSet
class VPCRegionChoices(ChoiceSet):
REGION_US_EAST_2 = "US-East(Ohio)"
REGION_US_EAST_1 = "US-East(N-Virginia)"
REGION_US_WEST_1 = "US-West(N-California)"
REGION_US_WEST_2 = "US-West-(Oregon)"
REGION_AP_NORTHEAST_1 = "Asia-Pacific-(Tokyo)"
REGION_AP_NORTHEAST_2 = "Asia-Pacific-(Seoul)"
REGION_AP_SOUTH_1 = "Asia-Pacific-(Mumbai)"
REGION_AP_SOUTHEAST_1 = "Asia-Pacific-(Singapore)"
REGION_AP_SOUTHEAST_2 = "Asia-Pacific-(Sydney)"
REGION_CA_CENTRAL_1 = "Canada-(Central)"
REGION_EU_CENTRAL_1 = "EU-(Frankfurt)"
REGION_EU_WEST_1 = "EU-(Ireland)"
REGION_EU_WEST_2 = "EU-(London)"
REGION_EU_WEST_3 = "EU-(Paris)"
REGION_SA_EAST_1 = "South-America-(São-Paulo)"
CHOICES = (
(REGION_US_EAST_2, "US-East(Ohio)"),
(REGION_US_EAST_1, "US-East(N-Virginia)"),
(REGION_US_WEST_1, "US-West(N-California)"),
(REGION_US_WEST_2, "US-West-(Oregon)"),
(REGION_AP_NORTHEAST_1, "Asia-Pacific-(Tokyo)"),
(REGION_AP_NORTHEAST_2, "Asia-Pacific-(Seoul)"),
(REGION_AP_SOUTH_1, "Asia-Pacific-(Mumbai)"),
(REGION_AP_SOUTHEAST_1, "Asia-Pacific-(Singapore)"),
(REGION_AP_SOUTHEAST_2, "Asia-Pacific-(Sydney)"),
(REGION_CA_CENTRAL_1, "Canada-(Central)"),
(REGION_EU_CENTRAL_1, "EU-(Frankfurt)"),
(REGION_EU_WEST_1, "EU-(Ireland)"),
(REGION_EU_WEST_2, "EU-(London)"),
(REGION_EU_WEST_3, "EU-(Paris)"),
(REGION_SA_EAST_1, "South-America-(São-Paulo)"),
)
| 39.769231
| 60
| 0.65893
| 215
| 1,551
| 4.334884
| 0.195349
| 0.085837
| 0.077253
| 0.027897
| 0.922747
| 0.922747
| 0.922747
| 0.922747
| 0.922747
| 0.922747
| 0
| 0.023548
| 0.178594
| 1,551
| 38
| 61
| 40.815789
| 0.708006
| 0
| 0
| 0
| 0
| 0
| 0.339136
| 0.144423
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
d0a7be0a378d5903371dc911d082c2ade426a942
| 3,637
|
py
|
Python
|
Arase/Tools/FitKappaDist.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
Arase/Tools/FitKappaDist.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | 1
|
2021-06-10T22:51:09.000Z
|
2021-06-10T22:51:09.000Z
|
Arase/Tools/FitKappaDist.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
import numpy as np
from .KappaDist import KappaDist,KappaDistCts
from .KappaDist import KappaDistE,KappaDistCtsE
from scipy.optimize import minimize
def _GetMisfitFunc(v,f,mass):
def Func(X):
n,T,K = X
fk = KappaDist(n,v,T,mass,K)
#if np.isnan(fk[0]):
# print(n,T,K,fk)
lf = np.log10(f)
lk = np.log10(fk)
diff = np.sqrt(np.sum(((lf-lk)**2))/f.size)
return diff
return Func
def FitKappaDist(v,f,n0,T0,mass,Verbose=False,MaxIter=None):
#select only good data to fit to
good = np.where(np.isfinite(f) & (f > 0))[0]
if (good.size < 3.0):
return -1, -1, -1, False
Func = _GetMisfitFunc(v[good],f[good],mass)
if MaxIter is None:
opt = {}
else:
opt = { 'maxiter' : MaxIter }
res = minimize(Func,[n0,T0,5.0],method='nelder-mead',options=opt)
n,t,k = res.x
if not res.success and Verbose:
print('Warning - potentially bad Kappa fit')
print(res.message)
#return n,T and Kappa fitted
return n,t,k,res.success
def _GetMisfitFuncCts(v,C,mass,dOmega=1.0,Eff=1.0,nSpec=1.0,Tau=1.0,g=1.0):
def Func(X):
n,T,K = X
Cm = KappaDistCts(n,v,T,mass,K,Eff,dOmega,nSpec,Tau,g)
diff = np.sqrt(np.sum(((C-Cm)**2))/C.size)
return diff
return Func
def FitKappaDistCts(v,Counts,n0,T0,mass,dOmega=1.0,Eff=1.0,nSpec=1.0,Tau=1.0,g=1.0,Verbose=False,MaxIter=None):
bad = np.where(np.isfinite(Counts) == False)[0]
Cs = np.copy(Counts)
Cs[bad] = 0.0
#select only good data to fit to
good = np.where((Cs >= 0.0))[0]
if (good.size < 3.0):
return -1, -1, -1, False
Func = _GetMisfitFuncCts(v[good],Cs[good],mass,dOmega,Eff,nSpec,Tau,g)
if MaxIter is None:
opt = {}
else:
opt = { 'maxiter' : MaxIter }
res = minimize(Func,[n0,T0,5.0],method='nelder-mead',options=opt)
n,t,k = res.x
if not res.success and Verbose:
print('Warning - potentially bad Kappa fit')
print(res.message)
#return n,T fitted
return n,t,k,res.success
def _GetMisfitFuncE(E,f,mass):
def Func(X):
n,T,K = X
fk = KappaDistE(n,E,T,mass,K)
#if np.isnan(fk[0]):
# print(n,T,K,fk)
lf = np.log10(f)
lk = np.log10(fk)
diff = np.sqrt(np.sum(((lf-lk)**2))/f.size)
return diff
return Func
def FitKappaDistE(E,f,n0,T0,mass,Verbose=False,MaxIter=None):
#select only good data to fit to
good = np.where(np.isfinite(f) & (f > 0))[0]
if (good.size < 3.0):
return -1, -1, -1, False
Func = _GetMisfitFuncE(E[good],f[good],mass)
if MaxIter is None:
opt = {}
else:
opt = { 'maxiter' : MaxIter }
res = minimize(Func,[n0,T0,5.0],method='nelder-mead',options=opt)
n,t,k = res.x
if not res.success and Verbose:
print('Warning - potentially bad Kappa fit')
print(res.message)
#return n,T and Kappa fitted
return n,t,k,res.success
def _GetMisfitFuncCtsE(E,C,mass,dOmega=1.0,Eff=1.0,nSpec=1.0,Tau=1.0,g=1.0):
def Func(X):
n,T,K = X
Cm = KappaDistCtsE(n,E,T,mass,K,Eff,dOmega,nSpec,Tau,g)
diff = np.sqrt(np.sum(((C-Cm)**2))/C.size)
return diff
return Func
def FitKappaDistCtsE(E,Counts,n0,T0,mass,dOmega=1.0,Eff=1.0,nSpec=1.0,Tau=1.0,g=1.0,Verbose=False,MaxIter=None):
bad = np.where(np.isfinite(Counts) == False)[0]
Cs = np.copy(Counts)
Cs[bad] = 0.0
#select only good data to fit to
good = np.where((Cs >= 0.0))[0]
if (good.size < 3.0):
return -1, -1, -1, False
Func = _GetMisfitFuncCtsE(E[good],Cs[good],mass,dOmega,Eff,nSpec,Tau,g)
if MaxIter is None:
opt = {}
else:
opt = { 'maxiter' : MaxIter }
res = minimize(Func,[n0,T0,5.0],method='nelder-mead',options=opt)
n,t,k = res.x
if not res.success and Verbose:
print('Warning - potentially bad Kappa fit')
print(res.message)
#return n,T fitted
return n,t,k,res.success
| 24.086093
| 112
| 0.649986
| 679
| 3,637
| 3.469809
| 0.129602
| 0.016978
| 0.017827
| 0.020374
| 0.845925
| 0.84253
| 0.84253
| 0.84253
| 0.841256
| 0.841256
| 0
| 0.037587
| 0.166071
| 3,637
| 150
| 113
| 24.246667
| 0.739202
| 0.078086
| 0
| 0.803922
| 0
| 0
| 0.063454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.039216
| 0
| 0.313725
| 0.078431
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
190517b24d3ead7752c44ae48efcf168ac4cbb3d
| 86
|
py
|
Python
|
hyperspherical_vae_pytorch/__init__.py
|
pimdh/svae-temp
|
49d3974e66abc761312432f28ae57fe714d17451
|
[
"MIT"
] | 3
|
2018-06-10T00:15:50.000Z
|
2021-12-08T11:07:59.000Z
|
hyperspherical_vae_pytorch/__init__.py
|
pimdh/svae-temp
|
49d3974e66abc761312432f28ae57fe714d17451
|
[
"MIT"
] | null | null | null |
hyperspherical_vae_pytorch/__init__.py
|
pimdh/svae-temp
|
49d3974e66abc761312432f28ae57fe714d17451
|
[
"MIT"
] | null | null | null |
import hyperspherical_vae_pytorch.ops
import hyperspherical_vae_pytorch.distributions
| 28.666667
| 47
| 0.930233
| 10
| 86
| 7.6
| 0.6
| 0.526316
| 0.605263
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 86
| 2
| 48
| 43
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
efb37752dd7214c7300db5ebcde51f141fa275eb
| 79,605
|
py
|
Python
|
GR/bssnUtils.py
|
paralab/SymPyGR
|
3aa4164a64773b9015b83744cd104550ae465e8a
|
[
"MIT"
] | 7
|
2019-08-29T20:41:39.000Z
|
2022-03-26T17:47:16.000Z
|
GR/bssnUtils.py
|
paralab/SymPyGR
|
3aa4164a64773b9015b83744cd104550ae465e8a
|
[
"MIT"
] | 2
|
2019-02-01T22:20:48.000Z
|
2019-05-24T20:39:33.000Z
|
GR/bssnUtils.py
|
paralab/SymPyGR
|
3aa4164a64773b9015b83744cd104550ae465e8a
|
[
"MIT"
] | 1
|
2018-12-18T19:36:13.000Z
|
2018-12-18T19:36:13.000Z
|
##########################################################################
# author: Milinda Fernando
# email: milinda@cs.utah.edu,
# date: 08/13/2018
#
# python module to generate bssn derivative calls and support function
# calls to call the generated code by bssn.py
# (python code for perl script written by David)
#
##########################################################################
# Note: gbx, gby, gbz are not needed for the RHS, but the derivatives
# are needed for the boundary conditions. The allocation of derivatives
# and calls to derivative routines for the boundaries uses the functions
# required for the rhs, so I include them here.
from collections import namedtuple
from datetime import datetime
from time import strftime
import dendro as dendro
import math as math
#import bssn_stages as bssn_stages
import bssn as bssn
import sympy as sympy
import re as re
import os as os
import cudaSharedMemManager as SharedMemManager
## ==== BSSN GPU code generation paramerters
D = ["alpha", "beta0", "beta1", "beta2",
"B0", "B1", "B2",
"chi", "Gt0", "Gt1", "Gt2", "K",
"gt0", "gt1", "gt2", "gt3", "gt4", "gt5",
"At0", "At1", "At2", "At3", "At4", "At5" ]
# variable names, to access the 2D array.
VAR_ENUM=["cuda::VAR::U_ALPHA",
"cuda::VAR::U_BETA0",
"cuda::VAR::U_BETA1",
"cuda::VAR::U_BETA2",
"cuda::VAR::U_B0",
"cuda::VAR::U_B1",
"cuda::VAR::U_B2",
"cuda::VAR::U_CHI",
"cuda::VAR::U_GT0",
"cuda::VAR::U_GT1",
"cuda::VAR::U_GT2",
"cuda::VAR::U_K",
"cuda::VAR::U_SYMGT0",
"cuda::VAR::U_SYMGT1",
"cuda::VAR::U_SYMGT2",
"cuda::VAR::U_SYMGT3",
"cuda::VAR::U_SYMGT4",
"cuda::VAR::U_SYMGT5",
"cuda::VAR::U_SYMAT0",
"cuda::VAR::U_SYMAT1",
"cuda::VAR::U_SYMAT2",
"cuda::VAR::U_SYMAT3",
"cuda::VAR::U_SYMAT4",
"cuda::VAR::U_SYMAT5"]
# enum to symbolic input vars dictionary
VAR_ENUM_TO_INPUT_SYM = { "alpha" : "cuda::VAR::U_ALPHA",
"beta0" : "cuda::VAR::U_BETA0",
"beta1" : "cuda::VAR::U_BETA1",
"beta2" : "cuda::VAR::U_BETA2",
"B0" : "cuda::VAR::U_B0",
"B1" : "cuda::VAR::U_B1",
"B2" : "cuda::VAR::U_B2",
"chi" : "cuda::VAR::U_CHI",
"Gt0" : "cuda::VAR::U_GT0",
"Gt1" : "cuda::VAR::U_GT1",
"Gt2" : "cuda::VAR::U_GT2",
"K" : "cuda::VAR::U_K",
"gt0" : "cuda::VAR::U_SYMGT0",
"gt1" : "cuda::VAR::U_SYMGT1",
"gt2" : "cuda::VAR::U_SYMGT2",
"gt3" : "cuda::VAR::U_SYMGT3",
"gt4" : "cuda::VAR::U_SYMGT4",
"gt5" : "cuda::VAR::U_SYMGT5",
"At0" :"cuda::VAR::U_SYMAT0",
"At1" :"cuda::VAR::U_SYMAT1",
"At2" :"cuda::VAR::U_SYMAT2",
"At3" :"cuda::VAR::U_SYMAT3",
"At4" :"cuda::VAR::U_SYMAT4",
"At5" :"cuda::VAR::U_SYMAT5"
}
# enum to symbolic output vars dictionary
VAR_ENUM_TO_OUTPUT_SYM={ "a_rhs" : "cuda::VAR::U_ALPHA",
"b_rhs0" : "cuda::VAR::U_BETA0",
"b_rhs1" : "cuda::VAR::U_BETA1",
"b_rhs2" : "cuda::VAR::U_BETA2",
"B_rhs0" : "cuda::VAR::U_B0",
"B_rhs1" : "cuda::VAR::U_B1",
"B_rhs2" : "cuda::VAR::U_B2",
"chi_rhs" : "cuda::VAR::U_CHI",
"Gt_rhs0" : "cuda::VAR::U_GT0",
"Gt_rhs1" : "cuda::VAR::U_GT1",
"Gt_rhs2" : "cuda::VAR::U_GT2",
"K_rhs" : "cuda::VAR::U_K",
"gt_rhs00" : "cuda::VAR::U_SYMGT0",
"gt_rhs01" : "cuda::VAR::U_SYMGT1",
"gt_rhs02" : "cuda::VAR::U_SYMGT2",
"gt_rhs11" : "cuda::VAR::U_SYMGT3",
"gt_rhs12" : "cuda::VAR::U_SYMGT4",
"gt_rhs22" : "cuda::VAR::U_SYMGT5",
"At_rhs00" :"cuda::VAR::U_SYMAT0",
"At_rhs01" :"cuda::VAR::U_SYMAT1",
"At_rhs02" :"cuda::VAR::U_SYMAT2",
"At_rhs11" :"cuda::VAR::U_SYMAT3",
"At_rhs12" :"cuda::VAR::U_SYMAT4",
"At_rhs22" :"cuda::VAR::U_SYMAT5"
}
# custom functions for code generation in cse.
custom_functions = {'grad': 'grad', 'grad2': 'grad2', 'agrad': 'agrad', 'kograd': 'kograd'}
# second derivs required for RHS
DD = ["gt0", "gt1", "gt2", "gt3", "gt4", "gt5", "chi",
"alpha", "beta0", "beta1", "beta2" ]
# advective derivatives
AD = ["gt0", "gt1", "gt2", "gt3", "gt4", "gt5",
"At0", "At1", "At2", "At3", "At4", "At5",
"alpha", "beta0", "beta1", "beta2", "chi", "Gt0", "Gt1", "Gt2", "K",
"B0", "B1", "B2"]
KO=AD
# first derivs required for constraints--no gauge variables
CONSTRAINT_D = [ "chi", "Gt0", "Gt1", "Gt2", "K",
"gt0", "gt1", "gt2", "gt3", "gt4", "gt5",
"At0", "At1", "At2", "At3", "At4", "At5" ]
# second derivs required for constraints--no gauge variables
CONSTRAINT_DD = ["gt0", "gt1", "gt2", "gt3", "gt4", "gt5", "chi"]
PREFIX_D = ["grad_0_", "grad_1_", "grad_2_"]
PREFIX_AD = ["agrad_0_", "agrad_1_", "agrad_2_"]
PREFIX_KOD = ["kograd_0_", "kograd_1_", "kograd_2_"]
PREFIX_DD = ["grad2_0_0_", "grad2_0_1_", "grad2_0_2_", "grad2_1_1_", "grad2_1_2_", "grad2_2_2_"]
# first derivative in i direction
FUNC_D_I=[]
for f in D:
for p in PREFIX_D:
FUNC_D_I.append(p+f)
# second derivative in ij direction
FUNC_D_IJ=[]
for f in DD:
for p in PREFIX_DD:
FUNC_D_IJ.append(p+f)
#advective derivative in i direction
FUNC_AD_I=[]
for f in AD:
for p in PREFIX_AD:
FUNC_AD_I.append(p+f)
#Kriess-Oliger derivative in i direction
FUNC_KOD_I=[]
for f in D:
for p in PREFIX_KOD:
FUNC_KOD_I.append(p+f)
# cuda utility functions
## Note all the device vars which is global starts with __
FUNC_LOAD_VAR="cuda::__loadGlobalToShared3D<double>"
FUNC_STORE_VAR="cuda::__storeSharedToGlobal3D<double>"
FUNC_SIGN_EXT="cuda::__extractSign3D<double>"
VAR_UNZIP_IN="__unzipInVar"
VAR_UNZIP_OUT="__unzipOutVar"
## shift vector block shared variables to compute advective derivs
VAR_BETA0="beta0"
VAR_BETA1="beta1"
VAR_BETA2="beta2"
VAR_BETA0_BOOL="beta0_bool"
VAR_BETA1_BOOL="beta1_bool"
VAR_BETA2_BOOL="beta2_bool"
# shared input variable name for derivative kernels
VAR_IN_SHARED="unzipVarInShared"
# shared output variable name for derivative kernels
VAR_OUT_SHARED_0="unzipVarOutShared0"
VAR_OUT_SHARED_1="unzipVarOutShared1"
# block ids
VAR_BLK_ID_X="blockIdx.x"
VAR_BLK_ID_Y="blockIdx.y"
VAR_BLK_ID_Z="blockIdx.z"
# thread ids
VAR_TRD_ID_X="threadIdx.x"
VAR_TRD_ID_Y="threadIdx.y"
VAR_TRD_ID_Z="threadIdx.z"
# block dim
VAR_BLK_DIM_X="blockDim.x"
VAR_BLK_DIM_Y="blockDim.y"
VAR_BLK_DIM_Z="blockDim.z"
# x,y,z bounds of the time i_lm[0] is the min and i_lm[1] is the max.
VAR_TILE_SZ="tile_sz"
VAR_DENDRO_BLK_ALIGNED_SZ="alignedSz"
VAR_TILE_LIMITS="ijk_lm"
VAR_TILE_LIMITS_STORE="tile_lm"
TYPE_DERIV_STRUCT="MemoryDerivs"
TYPE_BLK_CU="cuda::_Block"
TYPE_BSSN_COMP_PARS="BSSNComputeParams"
##
# generate the code to allocate derivative memory variables (allocated size unzip_dof)
##
def cudaDerivAllocDeallocHeader(fname,headers=[]):
func_i=FUNC_D_I
func_ij=FUNC_D_IJ
afunc_i=FUNC_AD_I
kofunc_i=FUNC_KOD_I
with open(fname, 'w') as ofile:
ofile.write("// generated by Dendro-GR SymPyGR code gernation framework\n")
ofile.write("//date: "+str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))+"\n")
fileName,fileExt=os.path.splitext(os.path.basename(fname))
ofile.write("#ifndef "+fileName.upper()+"_"+fileExt[1:].upper()+" \n")
ofile.write("#define "+fileName.upper()+"_"+fileExt[1:].upper()+" \n")
ofile.write("\n")
ofile.write("#include <iostream>\n")
ofile.write("#include \"cuda_runtime.h\"\n")
for header in headers:
ofile.write("#include \""+header+"\"\n")
ofile.write("\n")
ofile.write("namespace cuda {\n")
ofile.write("\tstruct "+TYPE_DERIV_STRUCT+"{\n\n")
ofile.write("/**@brief upper bound of the block size processed by the GPU*/\n")
ofile.write("\t unsigned int __maxBlkSz;\n")
ofile.write("/**@brief number of streams the kernel get executed*/\n")
ofile.write("\t unsigned int __numStream;\n")
ofile.write("/**@brief size per stream*/\n")
ofile.write("\t unsigned int __szPerStream;\n")
for deriv in func_i:
ofile.write("\t double* __"+deriv+";\n")
for deriv in func_ij:
ofile.write("\t double* __"+deriv+";\n")
for deriv in afunc_i:
ofile.write("\t double* __"+deriv+";\n")
for deriv in kofunc_i:
ofile.write("\t double* __"+deriv+";\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t/**@brief memory allocation for deriv variables */\n")
ofile.write("\tvoid allocateDerivMemory(unsigned int maxBlkSz, unsigned int numSM,unsigned int numStreams=1); \n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t/**@brief memory deallocation for deriv variables */\n")
ofile.write("\tvoid deallocateDerivMemory(); \n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t};\n\n")
ofile.write("}// end of namespace cuda\n")
ofile.write("\n")
ofile.write("#endif\n")
ofile.close()
##
# generate the code to allocate derivative memory variables (allocated size unzip_dof)
##
def cudaDerivAllocDeallocSource(fname,headers=[]):
func_i=FUNC_D_I
func_ij=FUNC_D_IJ
afunc_i=FUNC_AD_I
kofunc_i=FUNC_KOD_I
with open(fname, 'w') as ofile:
ofile.write("// generated by Dendro-GR SymPyGR code gernation framework\n")
ofile.write("//date: "+str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))+"\n")
for header in headers:
ofile.write("#include \""+header+"\"\n")
ofile.write("\n")
ofile.write("namespace cuda {\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t/**@brief memory allocation for deriv variables */\n")
ofile.write("\tvoid cuda::"+TYPE_DERIV_STRUCT+"::allocateDerivMemory(unsigned int maxBlkSz, unsigned int numSM,unsigned int numStreams){ \n")
ofile.write("\t\t __maxBlkSz=maxBlkSz;\n")
ofile.write("\t\t __numStream=numStreams;\n")
ofile.write("\t\t __szPerStream=numSM*maxBlkSz;\n")
ofile.write("\t\t const size_t bytes=sizeof(double)*numSM*maxBlkSz*numStreams;\n")
for deriv in func_i:
ofile.write("\t\t cudaMalloc((void **)&__"+deriv+",bytes);\n")
#ofile.write("\t\tCUDA_CHECK_ERROR();\n")
for deriv in func_ij:
ofile.write("\t\t cudaMalloc((void **)&__"+deriv+",bytes);\n")
#ofile.write("\t\tCUDA_CHECK_ERROR();\n")
for deriv in afunc_i:
ofile.write("\t\t cudaMalloc((void **)&__"+deriv+",bytes);\n")
#ofile.write("\t\tCUDA_CHECK_ERROR();\n")
for deriv in kofunc_i:
ofile.write("\t\t cudaMalloc((void **)&__"+deriv+",bytes);\n")
#ofile.write("\t\tCUDA_CHECK_ERROR()\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("} \n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t/**@brief memory deallocation for deriv variables */\n")
ofile.write("\tvoid cuda::"+TYPE_DERIV_STRUCT+"::deallocateDerivMemory(){ \n")
for deriv in func_i:
ofile.write("\t\t cudaFree((void *)__"+deriv+");\n")
#ofile.write("\t\tCUDA_CHECK_ERROR()\n")
for deriv in func_ij:
ofile.write("\t\t cudaFree((void *)__"+deriv+");\n")
#ofile.write("\t\tCUDA_CHECK_ERROR()\n")
for deriv in afunc_i:
ofile.write("\t\t cudaFree((void *)__"+deriv+");\n")
#ofile.write("\t\tCUDA_CHECK_ERROR()\n")
for deriv in kofunc_i:
ofile.write("\t\t cudaFree((void *)__"+deriv+");\n")
#ofile.write("\t\tCUDA_CHECK_ERROR()\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("} \n")
ofile.write("\n")
ofile.write("\n")
ofile.write("}// end of namespace cuda\n")
ofile.close()
def computeTileStore(dir,out,padWidth=3):
padWidth=str(padWidth)
out.write("\n")
if(dir=="x"):
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[0]="+padWidth+";\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[1]=("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0]);\n")
out.write("\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[2]=(iter_y)? "+padWidth+": 0;\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? ("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]) : "+"("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]-"+padWidth+")" +";\n")
out.write("\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[4]=(iter_z)? "+padWidth+": 0;\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? ("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]) : "+"("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]-"+padWidth+")" +";\n")
out.write("\n")
elif(dir=="y"):
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[0]=(iter_x)? "+padWidth+": 0;\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? ("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0]) : "+"("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0]-"+padWidth+")" +";\n")
out.write("\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[2]="+padWidth+";\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[3]=("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]);\n")
out.write("\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[4]=(iter_z)? "+padWidth+": 0;\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? ("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]) : "+"("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]-"+padWidth+")" +";\n")
out.write("\n")
elif(dir=="z"):
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[0]=(iter_x)? "+padWidth+": 0;\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? ("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0]) : "+"("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0]-"+padWidth+")" +";\n")
out.write("\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[2]=(iter_y)? "+padWidth+": 0;\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? ("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]) : "+"("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]-"+padWidth+")" +";\n")
out.write("\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[4]="+padWidth+";\n")
out.write("\t\t"+VAR_TILE_LIMITS_STORE+"[5]=("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]);\n")
out.write("\n")
out.write("\n")
def cudaCompute(fname_cuh,fname_cu,derivs,outs,varnames,kernelName,headers=[]):
# cuda device properties
VAR_CUDA_DEVICE="__deviceProperties"
# dendro block list parameters
VAR_DENDRO_BLK_LIST="__dendroBlkList"
VAR_NUM_BLOCKS="cuda::__DENDRO_NUM_BLOCKS"
VAR_DERIV_WORKSPACE="__derivWorkspace"
VAR_GPU_BLOCK_MAP="__gpuBlockMap"
VAR_MAX_DENDRO_BLK_SZ=VAR_DERIV_WORKSPACE+"->__maxBlkSz"
VAR_DW_SZ_PER_STREAM=VAR_DERIV_WORKSPACE+"->__szPerStream"
VAR_BSSN_PARAMS="__bssnParams"
TYPE_BSSN_PARAMS="cuda::BSSNComputeParams"
FUNC_DERIV_COMP="__compute_derivatives"
FUNC_COMP_RHS_PRE="__compute"
FUNC_KO_DISS="__ko_dissipation"
VAR_SHARED_MEM="__sm_base"
TYPE_SHARED_MEM="double"
VAR_ADV_COMPRESS_0=VAR_BETA0_BOOL
VAR_ADV_COMPRESS_1=VAR_BETA1_BOOL
VAR_ADV_COMPRESS_2=VAR_BETA2_BOOL
TYPE_ADV_COMPRESS="bool"
VAR_DBLOCK="dblock"
VAR_STREAM_ID="stream_id"
VAR_DERIV_WORKSPACE_OFFSET=VAR_STREAM_ID+"*("+VAR_DW_SZ_PER_STREAM+") + SM_ID*("+VAR_MAX_DENDRO_BLK_SZ+")"
######################################################
# Writing the header
######################################################
with open(fname_cuh, 'w') as ofile:
ofile.write("// generated by Dendro-GR SymPyGR code gernation framework\n")
ofile.write("//date: "+str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))+"\n")
fileName,fileExt=os.path.splitext(os.path.basename(fname_cuh))
ofile.write("#ifndef "+fileName.upper()+"_"+fileExt[1:].upper()+" \n")
ofile.write("#define "+fileName.upper()+"_"+fileExt[1:].upper()+" \n")
ofile.write("#include<iostream>\n")
ofile.write("#include\"cuda_runtime.h\"\n")
ofile.write("#include<device_launch_parameters.h>\n")
for header in headers:
ofile.write("#include \""+header+"\"\n")
ofile.write("namespace cuda {\n")
ofile.write("\n")
ofile.write("/**@brief compute derivs \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
#ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
ofile.write("__device__ void "+FUNC_DERIV_COMP+"(const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* dblock, const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+",const cudaDeviceProp* "+VAR_CUDA_DEVICE+", "+TYPE_SHARED_MEM+"* "+VAR_SHARED_MEM+", "+TYPE_ADV_COMPRESS+"* "+VAR_ADV_COMPRESS_0+", "+TYPE_ADV_COMPRESS+"* "+VAR_ADV_COMPRESS_1+", "+TYPE_ADV_COMPRESS+"* "+VAR_ADV_COMPRESS_2+",unsigned int "+VAR_STREAM_ID+");\n")
ofile.write("\n")
for var in varnames:
ofile.write("/**@brief compute "+var+" \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
ofile.write("__device__ void "+FUNC_COMP_RHS_PRE+"_"+var+"(double **"+VAR_UNZIP_OUT+", const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* dblock, const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+",const cudaDeviceProp* "+VAR_CUDA_DEVICE+", "+TYPE_SHARED_MEM+"* "+VAR_SHARED_MEM+", unsigned int "+VAR_STREAM_ID+");\n")
ofile.write("\n")
ofile.write("/**@brief apply KO dissipation \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
ofile.write("__device__ void "+FUNC_KO_DISS+"(double **"+VAR_UNZIP_OUT+", const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* dblock, const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+",const cudaDeviceProp* "+VAR_CUDA_DEVICE+", "+TYPE_SHARED_MEM+"* "+VAR_SHARED_MEM+", unsigned int "+VAR_STREAM_ID+");\n")
ofile.write("\n")
ofile.write("/**@brief compute RHS \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
ofile.write("__global__ void "+kernelName+"(double **"+VAR_UNZIP_OUT+", const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* "+VAR_DENDRO_BLK_LIST+", const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+",const cudaDeviceProp* "+VAR_CUDA_DEVICE+", unsigned int "+VAR_STREAM_ID+");\n")
ofile.write("\n")
ofile.write("}// end of namespace cuda\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("#endif\n")
ofile.close()
######################################################
# Writing the source
######################################################
with open(fname_cu, 'w') as ofile:
ofile.write("// generated by Dendro-GR SymPyGR code gernation framework\n")
ofile.write("//date: "+str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))+"\n")
ofile.write("#include \""+os.path.basename(fname_cuh)+"\"\n")
# namespace begin
ofile.write("namespace cuda {\n\n")
ofile.write("/**@brief compute RHS \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
# function begin
ofile.write("__global__ void "+kernelName+"(double **"+VAR_UNZIP_OUT+", const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* "+VAR_DENDRO_BLK_LIST+", const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+" ,const cudaDeviceProp* "+VAR_CUDA_DEVICE+", unsigned int "+VAR_STREAM_ID+"){\n\n")
ofile.write("// shared memory allocation for deriv and rhs computation\n")
memManager=SharedMemManager.MemoryManager(maxMemSz=48*1024,memUsable=41*1024,cout=ofile,baseName=VAR_SHARED_MEM,varType=TYPE_SHARED_MEM)
deriv_tile_sz_1d=0
deriv_req_pad=0
deriv_max_pad=3
for deriv in derivs:
if deriv_tile_sz_1d <deriv.DerivTile1D :
deriv_tile_sz_1d=deriv.DerivTile1D
if deriv_req_pad < deriv.padWidth :
deriv_req_pad =deriv.padWidth
deriv_tile_sz=deriv_tile_sz_1d**3
ofile.write("\t__shared__ bool "+VAR_BETA0_BOOL+ "["+str(deriv_tile_sz)+"];\n")
ofile.write("\t__shared__ bool "+VAR_BETA1_BOOL+ "["+str(deriv_tile_sz)+"];\n")
ofile.write("\t__shared__ bool "+VAR_BETA2_BOOL+ "["+str(deriv_tile_sz)+"];\n\n")
ofile.write("\tfor(unsigned int blk="+VAR_GPU_BLOCK_MAP+"[2*"+VAR_BLK_ID_X+"];blk<"+VAR_GPU_BLOCK_MAP+"[2*"+VAR_BLK_ID_X+"+1];++blk){\n\n\n")
ofile.write("\t// blocks assigned to each gpu block \n")
ofile.write("\tconst _Block * "+VAR_DBLOCK+"=&"+VAR_DENDRO_BLK_LIST+"[blk];\n")
ofile.write("\t// compute the derivatives\n")
ofile.write("\t"+FUNC_DERIV_COMP+"("+VAR_UNZIP_IN+","+VAR_DERIV_WORKSPACE+","+VAR_DBLOCK+","+VAR_GPU_BLOCK_MAP+","+VAR_BSSN_PARAMS+","+VAR_CUDA_DEVICE+","+VAR_SHARED_MEM+","+VAR_ADV_COMPRESS_0+","+VAR_ADV_COMPRESS_1+","+VAR_ADV_COMPRESS_2+","+VAR_STREAM_ID+");\n")
ofile.write("\t__syncthreads();\n")
ofile.write("\t// compute the RHS\n")
for var in varnames:
ofile.write("\t"+FUNC_COMP_RHS_PRE+"_"+var+"("+VAR_UNZIP_OUT+","+VAR_UNZIP_IN+","+VAR_DERIV_WORKSPACE+","+VAR_DBLOCK+","+VAR_GPU_BLOCK_MAP+","+VAR_BSSN_PARAMS+","+VAR_CUDA_DEVICE+","+VAR_SHARED_MEM+","+VAR_STREAM_ID+");\n")
ofile.write("\t__syncthreads();\n")
ofile.write("\t"+FUNC_KO_DISS+"("+VAR_UNZIP_OUT+","+VAR_UNZIP_IN+","+VAR_DERIV_WORKSPACE+","+VAR_DBLOCK+","+VAR_GPU_BLOCK_MAP+","+VAR_BSSN_PARAMS+","+VAR_CUDA_DEVICE+","+VAR_SHARED_MEM+","+VAR_STREAM_ID+");\n")
ofile.write("\t__syncthreads();\n")
ofile.write("\t}// end of the block loop\n")
ofile.write("} // end of kernel \n\n")
ofile.write("\n")
ofile.write("/**@brief compute derivs \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
#ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
ofile.write("__device__ void "+FUNC_DERIV_COMP+"(const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* dblock, const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+",const cudaDeviceProp* "+VAR_CUDA_DEVICE+", "+TYPE_SHARED_MEM+"* "+VAR_SHARED_MEM+", "+TYPE_ADV_COMPRESS+"* "+VAR_ADV_COMPRESS_0+", "+TYPE_ADV_COMPRESS+"* "+VAR_ADV_COMPRESS_1+", "+TYPE_ADV_COMPRESS+"* "+VAR_ADV_COMPRESS_2+", unsigned int "+VAR_STREAM_ID+"){\n")
ofile.write("\n")
ofile.write("\tconst unsigned int NUM_SM_UNITS="+VAR_CUDA_DEVICE+"->multiProcessorCount;\n")
ofile.write("\tconst unsigned int SM_ID=get_smid();//"+VAR_BLK_ID_X+"%NUM_SM_UNITS;\n")
ofile.write("\tconst unsigned int offset=dblock->getOffset();\n")
ofile.write("\tconst unsigned int *sz=dblock->getSz();\n")
ofile.write("\tconst unsigned int *"+VAR_DENDRO_BLK_ALIGNED_SZ+"=dblock->getAlignedSz();\n")
ofile.write("\tconst double* hx=dblock->getDx();\n")
ofile.write("\tconst double dx=hx[0];\n")
ofile.write("\tconst double dy=hx[1];\n")
ofile.write("\tconst double dz=hx[2];\n")
ofile.write("\tconst double* ptmin=dblock->getPtMin();\n")
ofile.write("\tconst double* ptmax=dblock->getPtMax();\n")
ofile.write("\tconst unsigned int bflag=dblock->getBFlag();\n")
ofile.write("\n")
if(deriv_req_pad>deriv_max_pad):
print("code generation error : maxPadwith for derivatives is larger than the dendro block pad width\n")
os.sys.exit(0)
ofile.write("\tconst unsigned int "+VAR_TILE_SZ+"[3]={"+str(deriv_tile_sz_1d)+","+str(deriv_tile_sz_1d)+","+str(deriv_tile_sz_1d)+"};\n")
memManager.malloc(VAR_IN_SHARED,deriv_tile_sz,ofile,prefix="\t")
memManager.malloc(VAR_OUT_SHARED_0,deriv_tile_sz,ofile,prefix="\t")
memManager.malloc(VAR_OUT_SHARED_1,deriv_tile_sz,ofile,prefix="\t")
ofile.write("\tconst unsigned int Lb = "+str(deriv_max_pad-deriv_req_pad)+";// load begin bound\n")
ofile.write("\tconst unsigned int Le = sz[0]-"+str(deriv_max_pad-deriv_req_pad)+";// load end bound\n")
# !! Note that we assume tile size are cubic.
ofile.write("//!! Note that we assume tile size are cubic.\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_X = ((Le-Lb)<"+VAR_TILE_SZ+"[0])? 1: ((int)ceil((double)(Le-Lb-"+VAR_TILE_SZ+"[0])/("+VAR_TILE_SZ+"[0]-2*" +str(deriv_req_pad)+")))+1;\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X;\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X;\n")
ofile.write("\n")
ofile.write("\tunsigned int "+VAR_TILE_LIMITS+"[3*2];\n")
ofile.write("\tunsigned int "+VAR_TILE_LIMITS_STORE+"[3*2];\n")
ofile.write("\tfor(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){\n\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*2+0]=max("+str(deriv_max_pad-deriv_req_pad)+",(int)("+str(deriv_max_pad-deriv_req_pad)+" + "+VAR_TILE_SZ+"[2]*iter_z -2*iter_z*"+str(deriv_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*2+1]=min("+VAR_TILE_LIMITS+"[2*2+0]+"+VAR_TILE_SZ+"[2]-1,sz[2]-"+str(deriv_max_pad-deriv_req_pad)+ "-1);\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t\t if(("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]+1)<="+str(2*deriv_req_pad+3)+") \n\t\t "+VAR_TILE_LIMITS+"[4]="+VAR_TILE_LIMITS+"[4]-("+str(2*deriv_req_pad+3)+"-("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]+1)) ; \n ")
ofile.write("\n")
ofile.write("\t for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){\n\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*1+0]=max("+str(deriv_max_pad-deriv_req_pad)+",(int)("+str(deriv_max_pad-deriv_req_pad)+" + "+VAR_TILE_SZ+"[1]*iter_y -2*iter_y*"+str(deriv_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*1+1]=min("+VAR_TILE_LIMITS+"[2*1+0]+"+VAR_TILE_SZ+"[1]-1,sz[1]-"+str(deriv_max_pad-deriv_req_pad)+ "-1);\n")
ofile.write("\n")
ofile.write("\t\t if(("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]+1)<="+str(2*deriv_req_pad+3)+") \n\t\t "+VAR_TILE_LIMITS+"[2]="+VAR_TILE_LIMITS+"[2]-("+str(2*deriv_req_pad+3)+"-("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]+1)) ; \n ")
ofile.write("\n")
ofile.write("\t for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*0+0]=max("+str(deriv_max_pad-deriv_req_pad)+",(int)("+str(deriv_max_pad-deriv_req_pad)+" + "+VAR_TILE_SZ+"[0]*iter_x -2*iter_x*"+str(deriv_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*0+1]=min("+VAR_TILE_LIMITS+"[2*0+0]+"+VAR_TILE_SZ+"[0]-1,sz[0]-"+str(deriv_max_pad-deriv_req_pad)+ "-1);\n")
ofile.write("\n")
ofile.write("\t\t if(("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0]+1)<="+str(2*deriv_req_pad+3)+") \n\t\t "+VAR_TILE_LIMITS+"[0]="+VAR_TILE_LIMITS+"[0]-("+str(2*deriv_req_pad+3)+"-("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0]+1)) ; \n ")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t\t //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0)\n")
ofile.write("\t\t //printf(\" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \\n\",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]);\n\n")
ofile.write("\n")
ofile.write("\t\t"+FUNC_LOAD_VAR+"(&"+VAR_UNZIP_IN+"[cuda::VAR::U_BETA0][offset],(double *) "+VAR_IN_SHARED+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t"+FUNC_LOAD_VAR+"(&"+VAR_UNZIP_IN+"[cuda::VAR::U_BETA1][offset],(double *) "+VAR_OUT_SHARED_0+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t"+FUNC_LOAD_VAR+"(&"+VAR_UNZIP_IN+"[cuda::VAR::U_BETA2][offset],(double *) "+VAR_OUT_SHARED_1+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
ofile.write("\t\t"+FUNC_SIGN_EXT+"((double *)"+VAR_IN_SHARED+",(bool *) "+VAR_BETA0_BOOL+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t"+FUNC_SIGN_EXT+"((double *)"+VAR_OUT_SHARED_0+",(bool *) "+VAR_BETA1_BOOL+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t"+FUNC_SIGN_EXT+"((double *)"+VAR_OUT_SHARED_1+",(bool *) "+VAR_BETA2_BOOL+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
for e in D:
enumStr=VAR_ENUM[D.index(e)]
ofile.write("\n")
ofile.write("\t\t//load input data from global to shared memory\n")
ofile.write("\t\t"+FUNC_LOAD_VAR+"(&"+VAR_UNZIP_IN+"["+enumStr+"][offset],(double *) "+VAR_IN_SHARED+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
ofile.write("\t\t//sync to make sure all the data is loaded\n")
for deriv in derivs:
if((deriv.DerivType=="d") and (deriv.DerivDir=="x")):
ofile.write("\t\t// computing deriv "+deriv.DerivDir+" for variable "+e+" \n")
ofile.write("\t\t"+deriv.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
for deriv1 in derivs:
if((e in DD) and (deriv1.DerivType=="dd") and ((deriv1.DerivDir=="xy") or (deriv1.DerivDir=="xz"))):
ofile.write("\t\t// computing deriv "+deriv1.DerivDir+" for variable "+e+" \n")
ofile.write("\t\t"+deriv1.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
if(deriv1.DerivDir=="xy"):
computeTileStore("y",ofile,deriv_req_pad)
elif(deriv1.DerivDir=="xz"):
computeTileStore("z",ofile,deriv_req_pad)
#!!!! NOTE that for mixed derivs you need to store the padding region as well.
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_1+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv1.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
if((deriv1.DerivDir=="xy")):
ofile.write("\t\t__syncthreads();\n")
ofile.write("\n")
computeTileStore("x",ofile,deriv_req_pad)
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_0+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
ofile.write("\n")
if((deriv.DerivType=="d") and (deriv.DerivDir=="y")):
ofile.write("\t\t// computing deriv "+deriv.DerivDir+" for variable "+e+" \n")
ofile.write("\t\t"+deriv.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
for deriv1 in derivs:
if((e in DD) and (deriv1.DerivType=="dd") and (deriv1.DerivDir=="yz")):
ofile.write("\t\t// computing deriv "+deriv1.DerivDir+" for variable "+e+" \n")
ofile.write("\t\t"+deriv1.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
computeTileStore("z",ofile,deriv_req_pad)
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_1+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv1.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
#ofile.write("\t\t__syncthreads();\n")
ofile.write("\n")
#write the x, y,z derivs.
computeTileStore("y",ofile,deriv_req_pad)
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_0+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
ofile.write("\n")
if((deriv.DerivType=="d") and (deriv.DerivDir=="z")):
ofile.write("\t\t// computing deriv "+deriv.DerivDir+" for variable "+e+" \n")
ofile.write("\t\t"+deriv.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
computeTileStore("z",ofile,deriv_req_pad)
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_0+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
ofile.write("\n")
derivCount=0
for deriv in derivs:
if((e in DD) and (deriv.DerivType=="dd") and ((deriv.DerivDir=="xx") or (deriv.DerivDir=="yy") or (deriv.DerivDir=="zz"))):
ofile.write("\t\t"+deriv.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
if(deriv.DerivDir=="xx"):
computeTileStore("x",ofile,deriv_req_pad)
elif(deriv.DerivDir=="yy"):
computeTileStore("y",ofile,deriv_req_pad)
elif(deriv.DerivDir=="zz"):
computeTileStore("z",ofile,deriv_req_pad)
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_0+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
ofile.write("\n")
for deriv in derivs:
if( (e in KO) and (deriv.DerivType=="ko")):
ofile.write("\t\t"+deriv.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
if(deriv.DerivDir=="x"):
computeTileStore("x",ofile,deriv_req_pad)
elif(deriv.DerivDir=="y"):
computeTileStore("y",ofile,deriv_req_pad)
elif(deriv.DerivDir=="z"):
computeTileStore("z",ofile,deriv_req_pad)
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_0+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
for deriv in derivs:
if( (e in AD) and (deriv.DerivType=="ad")):
ofile.write("\t\t"+deriv.DerivFuncCall+"\n")
ofile.write("\t\t__syncthreads();\n") # not essential if each thread writes only the points it has computed in the block.
if(deriv.DerivDir=="x"):
computeTileStore("x",ofile,deriv_req_pad)
elif(deriv.DerivDir=="y"):
computeTileStore("y",ofile,deriv_req_pad)
elif(deriv.DerivDir=="z"):
computeTileStore("z",ofile,deriv_req_pad)
ofile.write("\t\t"+FUNC_STORE_VAR+"((double *) "+VAR_OUT_SHARED_0+",&("+VAR_DERIV_WORKSPACE+"->__"+deriv.DerivOutput+"_"+ e +"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n")
ofile.write("\t\t } // end of block tile loop x\n")
ofile.write("\t\t } // end of block tile loop y\n")
ofile.write("\t\t} // end of block tile loop z\n\n")
ofile.write("} // end of function "+FUNC_DERIV_COMP+"\n\n")
##############################################################################
## RHS code generation
##############################################################################
for var_id in range(0,len(varnames)):
memManager.deallocAll()
memManager.clearScopeVariables()
ofile.write("/**@brief compute "+varnames[var_id]+" \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
ofile.write("__device__ void "+FUNC_COMP_RHS_PRE+"_"+varnames[var_id]+"(double **"+VAR_UNZIP_OUT+", const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* dblock, const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+",const cudaDeviceProp* "+VAR_CUDA_DEVICE+", "+TYPE_SHARED_MEM+"* "+VAR_SHARED_MEM+", unsigned int "+VAR_STREAM_ID+"){\n")
ofile.write("\n\n")
mi = [0, 1, 2, 4, 5, 8]
midx = ['00', '01', '02', '11', '12', '22']
ofile.write("\n")
idx="[pp]"
ofile.write("\t///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n")
ofile.write("\t// generated code for "+varnames[var_id]+" begin \n")
ofile.write("\t///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n")
varOut=varnames[var_id]
exp=outs[var_id]
ofile.write("\t// bssn compute parameters \n")
ofile.write("\tconst double lambda[4]={"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[0],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[1],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[2],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[3]};\n")
ofile.write("\tconst double lambda_f[2]={"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA_F[0],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA_F[1]};\n")
ofile.write("\tconst double kosigma="+VAR_BSSN_PARAMS+"->KO_DISS_SIGMA;\n")
ofile.write("\tconst double ETA_R0="+VAR_BSSN_PARAMS+"->ETA_R0;\n")
ofile.write("\tconst double R0="+VAR_BSSN_PARAMS+"->ETA_R0;\n")
ofile.write("\tconst double ETA_DAMPING="+VAR_BSSN_PARAMS+"->ETA_DAMPING;\n")
ofile.write("\tconst double ETA_DAMPING_EXP="+VAR_BSSN_PARAMS+"->ETA_DAMPING_EXP;\n")
ofile.write("\tconst double ETA_CONST="+VAR_BSSN_PARAMS+"->ETA_CONST;\n")
ofile.write("\tconst double eta_power[2]={"+VAR_BSSN_PARAMS+"->BSSN_ETA_POWER[0],"+VAR_BSSN_PARAMS+"->BSSN_ETA_POWER[1]};\n")
print("code generation for : "+varOut)
ofile.write("\tconst unsigned int NUM_SM_UNITS="+VAR_CUDA_DEVICE+"->multiProcessorCount;\n")
ofile.write("\tconst unsigned int SM_ID=get_smid();//"+VAR_BLK_ID_X+"%NUM_SM_UNITS;\n")
ofile.write("\tconst unsigned int offset=dblock->getOffset();\n")
ofile.write("\tconst unsigned int *sz=dblock->getSz();\n")
ofile.write("\tconst unsigned int *"+VAR_DENDRO_BLK_ALIGNED_SZ+"=dblock->getAlignedSz();\n")
ofile.write("\tconst double* hx=dblock->getDx();\n")
ofile.write("\tconst double dx=hx[0];\n")
ofile.write("\tconst double dy=hx[1];\n")
ofile.write("\tconst double dz=hx[2];\n")
ofile.write("\tconst double* ptmin=dblock->getPtMin();\n")
ofile.write("\tconst double* ptmax=dblock->getPtMax();\n")
ofile.write("\tconst unsigned int bflag=dblock->getBFlag();\n")
ofile.write("\n")
num_e = 0
lexp = []
lname = []
if type(exp) == list:
num_e = num_e + len(exp)
for j, ev in enumerate(exp):
lexp.append(ev)
lname.append(varOut+repr(j)+idx)
elif type(exp) == sympy.Matrix:
num_e = num_e + len(exp)
for j, k in enumerate(mi):
lexp.append(exp[k])
lname.append(varOut+midx[j]+idx)
else:
num_e = num_e + 1
lexp.append(exp)
lname.append(varOut+idx)
print("cse tree build begin")
ee_name = 'DENDRO_' #''.join(random.choice(string.ascii_uppercase) for _ in range(5))
ee_syms = sympy.utilities.numbered_symbols(prefix=ee_name)
_v = sympy.cse(lexp, symbols=ee_syms, optimizations='basic')
print("cse tree build completed")
# bssn variables needed for rhs computation.
bssnInputVars=[]
# bssn variables output
bssnOutputVars=[]
# derivative variables needed for rhs computation
derivVars=[]
# staged bssn variables.
bssnStagedVars=[]
if type(exp) == list:
#print("list \n")
for j, ev in enumerate(exp):
regm=re.findall(re.compile(r"([A-Z,a-z,0-9,_]*\[pp\])"),dendro.change_deriv_names(str(ev)))
for varDep in regm:
if varDep[0:-4] in VAR_ENUM_TO_INPUT_SYM.keys():
bssnInputVars.append(varDep[0:-4])
elif varDep[0:-4] in VAR_ENUM_TO_OUTPUT_SYM.keys():
bssnOutputVars.append(varDep[0:-4])
else:
for key,value in custom_functions.items():
if value in varDep[0:-4]:
derivVars.append(varDep[0:-4])
break
elif type(exp)==sympy.Matrix:
#print(dendro.change_deriv_names(str(exp)))
#print(exp.free_symbols)
regm=re.findall(re.compile(r"([A-Z,a-z,0-9,_]*\[pp\])"),dendro.change_deriv_names(str(exp)))
for varDep in regm:
if varDep[0:-4] in VAR_ENUM_TO_INPUT_SYM.keys():
bssnInputVars.append(varDep[0:-4])
elif varDep[0:-4] in VAR_ENUM_TO_OUTPUT_SYM.keys():
bssnOutputVars.append(varDep[0:-4])
else:
for key,value in custom_functions.items():
if value in varDep[0:-4]:
derivVars.append(varDep[0:-4])
break
else:
#print(dendro.change_deriv_names(str(exp)))
regm=re.findall(re.compile(r"([A-Z,a-z,0-9,_]*\[pp\])"),dendro.change_deriv_names(str(exp)))
for varDep in regm:
#print (varDep[0:-4])
if varDep[0:-4] in VAR_ENUM_TO_INPUT_SYM.keys():
bssnInputVars.append(varDep[0:-4])
elif varDep[0:-4] in VAR_ENUM_TO_OUTPUT_SYM.keys():
bssnOutputVars.append(varDep[0:-4])
else:
for key,value in custom_functions.items():
if value in varDep[0:-4]:
derivVars.append(varDep[0:-4])
break
for lvar in lname:
if lvar[0:-4] in VAR_ENUM_TO_OUTPUT_SYM.keys():
bssnOutputVars.append(lvar[0:-4])
else:
bssnStagedVars.append(lvar[0:-4])
bssnInputVars=list(set(bssnInputVars))
bssnOutputVars=list(set(bssnOutputVars))
bssnStagedVars=list(set(bssnStagedVars))
derivVars=list(set(derivVars))
total_dep=len(bssnInputVars)+len(bssnStagedVars)+len(derivVars)+len(bssnOutputVars)
rhs_tile_size_1d=math.floor(((memManager.getMemUsable())/(total_dep*8))**(1.0/3.0))
ofile.write("\tconst unsigned int "+VAR_TILE_SZ+"[3]={"+str(rhs_tile_size_1d)+","+str(rhs_tile_size_1d)+","+str(rhs_tile_size_1d)+"};\n")
rhs_tile_size=rhs_tile_size_1d**3
ofile.write("\t\n")
# no padding region required for rhs computation
rhs_req_pad=0
ofile.write("\t //input vars begin\n")
for var in bssnInputVars:
memManager.malloc(var,rhs_tile_size,ofile,prefix="\t")
ofile.write("\t //input vars end\n")
ofile.write("\t // staged vars begin\n")
for var in bssnStagedVars:
memManager.malloc(var,rhs_tile_size,ofile,prefix="\t")
ofile.write("\t // staged vars end\n")
ofile.write("\t // deriv vars begin\n")
for var in derivVars:
memManager.malloc(var,rhs_tile_size,ofile,prefix="\t")
ofile.write("\t // deriv vars end\n")
ofile.write("\t // output vars begin\n")
for var in bssnOutputVars:
memManager.malloc(var,rhs_tile_size,ofile,prefix="\t")
ofile.write("\t // output vars end\n")
ofile.write("\tconst unsigned int Lb = "+str(deriv_max_pad-rhs_req_pad)+";// load begin bound\n")
ofile.write("\tconst unsigned int Le = sz[0]-"+str(deriv_max_pad-rhs_req_pad)+";// load end bound\n")
# !! Note that we assume tile size are cubic.
ofile.write("//!! Note that we assume tile size are cubic.\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_X = ((Le-Lb)<"+VAR_TILE_SZ+"[0])? 1: ((int)ceil((double)(Le-Lb-"+VAR_TILE_SZ+"[0])/("+VAR_TILE_SZ+"[0]-2*" +str(rhs_req_pad)+")))+1;\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X;\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X;\n")
ofile.write("\n")
ofile.write("\tunsigned int "+VAR_TILE_LIMITS+"[3*2];\n")
ofile.write("\tunsigned int "+VAR_TILE_LIMITS_STORE+"[3*2];\n")
ofile.write("\tfor(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){\n\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*2+0]=max("+str(deriv_max_pad-rhs_req_pad)+",(int)("+str(deriv_max_pad-rhs_req_pad)+" + "+VAR_TILE_SZ+"[2]*iter_z -2*iter_z*"+str(rhs_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*2+1]=min("+VAR_TILE_LIMITS+"[2*2+0]+"+VAR_TILE_SZ+"[2]-1,sz[2]-"+str(deriv_max_pad-rhs_req_pad)+"-1);\n")
ofile.write("\n")
ofile.write("\t for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){\n\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*1+0]=max("+str(deriv_max_pad-rhs_req_pad)+",(int)("+str(deriv_max_pad-rhs_req_pad)+" + "+VAR_TILE_SZ+"[1]*iter_y -2*iter_y*"+str(rhs_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*1+1]=min("+VAR_TILE_LIMITS+"[2*1+0]+"+VAR_TILE_SZ+"[1]-1,sz[1]-"+str(deriv_max_pad-rhs_req_pad)+"-1);\n")
ofile.write("\n")
ofile.write("\t for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*0+0]=max("+str(deriv_max_pad-rhs_req_pad)+",(int)("+str(deriv_max_pad-rhs_req_pad)+" + "+VAR_TILE_SZ+"[0]*iter_x -2*iter_x*"+str(rhs_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*0+1]=min("+VAR_TILE_LIMITS+"[2*0+0]+"+VAR_TILE_SZ+"[0]-1,sz[0]-"+str(deriv_max_pad-rhs_req_pad)+"-1);\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[0]=0;\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[1]="+VAR_TILE_LIMITS+"[1] - "+VAR_TILE_LIMITS+"[0];\n")
ofile.write("\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[2]=0;\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[3]="+VAR_TILE_LIMITS+"[3] - "+VAR_TILE_LIMITS+"[2];\n")
ofile.write("\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[4]=0;\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[5]="+VAR_TILE_LIMITS+"[5] - "+VAR_TILE_LIMITS+"[4];\n")
ofile.write("\t\t //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0)\n")
ofile.write("\t\t //printf(\" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \\n\",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]);\n\n")
ofile.write("\n\n")
ofile.write("\t\t //load data from global to shared memory\n")
for var in bssnInputVars:
ofile.write("\t\t "+FUNC_LOAD_VAR+"(&"+VAR_UNZIP_IN+"["+VAR_ENUM_TO_INPUT_SYM[var]+"][offset],(double *) "+var+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
for var in derivVars:
ofile.write("\t\t "+FUNC_LOAD_VAR+"(&("+VAR_DERIV_WORKSPACE+"->__"+var+"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(double *) "+var+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t __syncthreads();\n\n")
ofile.write("\n\n")
#/*|| ("+VAR_TRD_ID_Z+">=("+VAR_TILE_LIMITS+"[5]-"+VAR_TILE_LIMITS+"[4]))*/
ofile.write("\tif(!(("+VAR_TRD_ID_X+">("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0])) || ("+VAR_TRD_ID_Y+">("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]))) ){ \n\n")
ofile.write("\t\t double x,y,z,r_coord,eta;\n")
ofile.write("\t\t unsigned int pp=0*"+VAR_TILE_SZ+"[0]*"+VAR_TILE_SZ+"[1]+"+VAR_TRD_ID_Y+"*"+VAR_TILE_SZ+"[1]+"+VAR_TRD_ID_X+";\n")
ofile.write("\t\t for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+="+VAR_TILE_SZ+"[0]*"+VAR_TILE_SZ+"[1]){\n")
ofile.write("\t\t\t z = ptmin[2] + (k+"+VAR_TILE_LIMITS+"[4])*dz;\n")
ofile.write("\t\t\t y = ptmin[1] + ("+VAR_TRD_ID_Y+"+"+VAR_TILE_LIMITS+"[2])*dy;\n")
ofile.write("\t\t\t x = ptmin[0] + ("+VAR_TRD_ID_X+"+"+VAR_TILE_LIMITS+"[0])*dx;\n")
ofile.write("\t\t\t r_coord = sqrt(x*x + y*y + z*z);\n")
ofile.write("\t\t\t eta=ETA_CONST;\n")
ofile.write("\t\t\t if (r_coord >= ETA_R0) {\n")
ofile.write("\t\t\t eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP);\n")
ofile.write("\t\t\t }\n\n")
ofile.write("\t\t\t // Dendro: {{{ \n")
ofile.write("\t\t\t // Dendro: original ops: "+str(sympy.count_ops(lexp))+"\n")
rops=0
ofile.write("\t\t\t // Dendro: printing temp variables\n")
for (v1, v2) in _v[0]:
ofile.write("\t\t const double ")
ofile.write(dendro.change_deriv_names(sympy.ccode(v2, assign_to=v1, user_functions=custom_functions))+"\n")
rops = rops + sympy.count_ops(v2)
ofile.write("\t\t\t // Dendro: printing variables\n\n")
for i, e in enumerate(_v[1]):
ofile.write("\t\t "+dendro.change_deriv_names(sympy.ccode(e, assign_to=lname[i], user_functions=custom_functions))+"\n")
rops = rops + sympy.count_ops(e)
ofile.write("\t\t\t // Dendro: reduced ops: "+str(rops)+"\n")
ofile.write("\t\t\t // Dendro: }}} \n")
ofile.write("\t\t\t } //loop z end \n")
ofile.write("\t}// end of the if for the thread idx \n")
ofile.write("\t\t\t__syncthreads();\n\n")
ofile.write("\t\t\t// sotre computed variables\n\n")
for var in bssnOutputVars:
ofile.write("\t\t"+FUNC_STORE_VAR+"("+var+", &"+VAR_UNZIP_OUT+"["+VAR_ENUM_TO_OUTPUT_SYM[var]+"][offset],(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t __syncthreads();\n")
ofile.write("\t } // end of block assigned to gpu block loop x \n\n")
ofile.write("\t } // end of block assigned to gpu block loop y \n\n")
ofile.write("\t} // end of block assigned to gpu block loop z \n\n")
ofile.write("} // end of function" +FUNC_COMP_RHS_PRE+"_"+varnames[var_id]+" \n\n")
memManager.deallocAll()
memManager.clearScopeVariables()
ofile.write("/**@brief apply KO dissipation \n")
ofile.write(" @param[in] "+VAR_UNZIP_IN+": unzipped input array (global memory) \n")
ofile.write(" @param[in] "+TYPE_DERIV_STRUCT+": allocated workspace for derivative computations \n")
ofile.write(" @param[in] "+VAR_DENDRO_BLK_LIST+": dendro block list \n")
ofile.write(" @param[in] "+VAR_GPU_BLOCK_MAP+": gpu block map \n")
ofile.write(" @param[in] "+VAR_CUDA_DEVICE+": cuda device properties \n")
ofile.write(" @param[out] "+VAR_UNZIP_OUT+": unzipped output computed rhs \n")
ofile.write("*/ \n")
ofile.write("__device__ void "+FUNC_KO_DISS+"(double **"+VAR_UNZIP_OUT+", const double**"+VAR_UNZIP_IN+","+TYPE_DERIV_STRUCT+"* __derivWorkspace, const "+ TYPE_BLK_CU+ "* dblock, const unsigned int * "+VAR_GPU_BLOCK_MAP+",const "+TYPE_BSSN_PARAMS+" * "+VAR_BSSN_PARAMS+",const cudaDeviceProp* "+VAR_CUDA_DEVICE+", "+TYPE_SHARED_MEM+"* "+VAR_SHARED_MEM+", unsigned int "+VAR_STREAM_ID+"){\n")
ofile.write("\n")
ofile.write("\t// bssn compute parameters \n")
ofile.write("\tconst double lambda[4]={"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[0],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[1],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[2],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA[3]};\n")
ofile.write("\tconst double lambda_f[2]={"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA_F[0],"+VAR_BSSN_PARAMS+"->BSSN_LAMBDA_F[1]};\n")
ofile.write("\tconst double kosigma="+VAR_BSSN_PARAMS+"->KO_DISS_SIGMA;\n")
ofile.write("\tconst double ETA_R0="+VAR_BSSN_PARAMS+"->ETA_R0;\n")
ofile.write("\tconst double R0="+VAR_BSSN_PARAMS+"->ETA_R0;\n")
ofile.write("\tconst double ETA_DAMPING="+VAR_BSSN_PARAMS+"->ETA_DAMPING;\n")
ofile.write("\tconst double ETA_DAMPING_EXP="+VAR_BSSN_PARAMS+"->ETA_DAMPING_EXP;\n")
ofile.write("\tconst double ETA_CONST="+VAR_BSSN_PARAMS+"->ETA_CONST;\n")
ofile.write("\tconst double eta_power[2]={"+VAR_BSSN_PARAMS+"->BSSN_ETA_POWER[0],"+VAR_BSSN_PARAMS+"->BSSN_ETA_POWER[1]};\n")
ofile.write("\tconst unsigned int NUM_SM_UNITS="+VAR_CUDA_DEVICE+"->multiProcessorCount;\n")
ofile.write("\tconst unsigned int SM_ID=get_smid();//"+VAR_BLK_ID_X+"%NUM_SM_UNITS;\n")
ofile.write("\tconst unsigned int offset=dblock->getOffset();\n")
ofile.write("\tconst unsigned int *sz=dblock->getSz();\n")
ofile.write("\tconst unsigned int *"+VAR_DENDRO_BLK_ALIGNED_SZ+"=dblock->getAlignedSz();\n")
ofile.write("\tconst double* hx=dblock->getDx();\n")
ofile.write("\tconst double dx=hx[0];\n")
ofile.write("\tconst double dy=hx[1];\n")
ofile.write("\tconst double dz=hx[2];\n")
ofile.write("\tconst double* ptmin=dblock->getPtMin();\n")
ofile.write("\tconst double* ptmax=dblock->getPtMax();\n")
ofile.write("\tconst unsigned int bflag=dblock->getBFlag();\n")
total_dep=4
rhs_req_pad=0
rhs_tile_size_1d=math.floor(((memManager.getMemUsable())/(total_dep*8))**(1.0/3.0))
ofile.write("\tconst unsigned int "+VAR_TILE_SZ+"[3]={"+str(rhs_tile_size_1d)+","+str(rhs_tile_size_1d)+","+str(rhs_tile_size_1d)+"};\n")
rhs_tile_size=rhs_tile_size_1d**3
VAR_KO_TEMP=["kograd_0","kograd_1","kograd_2"]
VAR_KO_TEMP_RHS=["unZipSharedOut"]
for var in VAR_KO_TEMP:
memManager.malloc(var,rhs_tile_size,ofile,prefix="\t")
for var in VAR_KO_TEMP_RHS:
memManager.malloc(var,rhs_tile_size,ofile,prefix="\t")
ofile.write("\tconst unsigned int Lb = "+str(deriv_max_pad-rhs_req_pad)+";// load begin bound\n")
ofile.write("\tconst unsigned int Le = sz[0]-"+str(deriv_max_pad-rhs_req_pad)+";// load end bound\n")
# !! Note that we assume tile size are cubic.
ofile.write("//!! Note that we assume tile size are cubic.\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_X = ((Le-Lb)<"+VAR_TILE_SZ+"[0])? 1: ((int)ceil((double)(Le-Lb-"+VAR_TILE_SZ+"[0])/("+VAR_TILE_SZ+"[0]-2*" +str(rhs_req_pad)+")))+1;\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X;\n")
ofile.write("\tconst unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X;\n")
ofile.write("\n")
ofile.write("\tunsigned int "+VAR_TILE_LIMITS+"[3*2];\n")
ofile.write("\tunsigned int "+VAR_TILE_LIMITS_STORE+"[3*2];\n")
ofile.write("\tfor(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){\n\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*2+0]=max("+str(deriv_max_pad-rhs_req_pad)+",(int)("+str(deriv_max_pad-rhs_req_pad)+" + "+VAR_TILE_SZ+"[2]*iter_z -2*iter_z*"+str(rhs_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*2+1]=min("+VAR_TILE_LIMITS+"[2*2+0]+"+VAR_TILE_SZ+"[2]-1,sz[2]-"+str(deriv_max_pad-rhs_req_pad)+"-1);\n")
ofile.write("\n")
ofile.write("\t for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){\n\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*1+0]=max("+str(deriv_max_pad-rhs_req_pad)+",(int)("+str(deriv_max_pad-rhs_req_pad)+" + "+VAR_TILE_SZ+"[1]*iter_y -2*iter_y*"+str(rhs_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*1+1]=min("+VAR_TILE_LIMITS+"[2*1+0]+"+VAR_TILE_SZ+"[1]-1,sz[1]-"+str(deriv_max_pad-rhs_req_pad)+"-1);\n")
ofile.write("\n")
ofile.write("\t for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*0+0]=max("+str(deriv_max_pad-rhs_req_pad)+",(int)("+str(deriv_max_pad-rhs_req_pad)+" + "+VAR_TILE_SZ+"[0]*iter_x -2*iter_x*"+str(rhs_req_pad)+"));\n")
ofile.write("\t\t "+VAR_TILE_LIMITS+"[2*0+1]=min("+VAR_TILE_LIMITS+"[2*0+0]+"+VAR_TILE_SZ+"[0]-1,sz[0]-"+str(deriv_max_pad-rhs_req_pad)+"-1);\n")
ofile.write("\n")
ofile.write("\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[0]=0;\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[1]="+VAR_TILE_LIMITS+"[1] - "+VAR_TILE_LIMITS+"[0];\n")
ofile.write("\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[2]=0;\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[3]="+VAR_TILE_LIMITS+"[3] - "+VAR_TILE_LIMITS+"[2];\n")
ofile.write("\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[4]=0;\n")
ofile.write("\t\t"+VAR_TILE_LIMITS_STORE+"[5]="+VAR_TILE_LIMITS+"[5] - "+VAR_TILE_LIMITS+"[4];\n")
ofile.write("\t\t //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0)\n")
ofile.write("\t\t //printf(\" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \\n\",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]);\n\n")
ofile.write("\n\n")
ofile.write("\t\t unsigned int pp;\n")
for var_id in range(0,len(varnames)):
mi = [0, 1, 2, 4, 5, 8]
midx = ['00', '01', '02', '11', '12', '22']
varOut=varnames[var_id]
exp=outs[var_id]
num_e = 0
lexp = []
lname = []
idx=""
if type(exp) == list:
num_e = num_e + len(exp)
for j, ev in enumerate(exp):
lexp.append(ev)
lname.append(varOut+repr(j)+idx)
elif type(exp) == sympy.Matrix:
num_e = num_e + len(exp)
for j, k in enumerate(mi):
lexp.append(exp[k])
lname.append(varOut+midx[j]+idx)
else:
num_e = num_e + 1
lexp.append(exp)
lname.append(varOut+idx)
for rhs in lname:
var_d=list(VAR_ENUM_TO_INPUT_SYM.keys())[list(VAR_ENUM_TO_INPUT_SYM.values()).index(VAR_ENUM_TO_OUTPUT_SYM[rhs])]
ofile.write("\t\t //ko dissipation for variable "+var_d+"\n\n")
for var in VAR_KO_TEMP:
ofile.write("\t\t "+FUNC_LOAD_VAR+"(&("+VAR_DERIV_WORKSPACE+"->__"+var+"_"+var_d+"[("+VAR_DERIV_WORKSPACE_OFFSET+")]),(double *) "+var+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
#ofile.write("\t\t "+FUNC_LOAD_VAR+"(&"+VAR_UNZIP_IN+"["+VAR_ENUM_TO_INPUT_SYM[var_d]+"][offset],(double *) "+VAR_KO_TEMP_RHS[0]+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t "+FUNC_LOAD_VAR+"(&"+VAR_UNZIP_OUT+"["+VAR_ENUM_TO_OUTPUT_SYM[rhs]+"][offset],(double *) "+VAR_KO_TEMP_RHS[0]+",(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t __syncthreads();\n\n")
ofile.write("\t\tif(!(("+VAR_TRD_ID_X+">("+VAR_TILE_LIMITS+"[1]-"+VAR_TILE_LIMITS+"[0])) || ("+VAR_TRD_ID_Y+">("+VAR_TILE_LIMITS+"[3]-"+VAR_TILE_LIMITS+"[2]))) ){ \n\n")
ofile.write("\t\t pp=0*"+VAR_TILE_SZ+"[0]*"+VAR_TILE_SZ+"[1]+"+VAR_TRD_ID_Y+"*"+VAR_TILE_SZ+"[1]+"+VAR_TRD_ID_X+";\n")
ofile.write("\t\t for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+="+VAR_TILE_SZ+"[0]*"+VAR_TILE_SZ+"[1]){\n")
ofile.write("\t\t "+VAR_KO_TEMP_RHS[0]+"[pp] += kosigma * ("+VAR_KO_TEMP[0]+"[pp] +"+VAR_KO_TEMP[1]+"[pp] + "+VAR_KO_TEMP[2]+"[pp]);\n")
ofile.write("\t\t } //loop z end \n")
ofile.write("\t\t}// end of the if for the thread idx \n")
ofile.write("\t\t__syncthreads();\n\n")
ofile.write("\t\t// sotre computed variables\n\n")
ofile.write("\t\t"+FUNC_STORE_VAR+"("+VAR_KO_TEMP_RHS[0]+", &"+VAR_UNZIP_OUT+"["+VAR_ENUM_TO_OUTPUT_SYM[rhs]+"][offset],(const unsigned int *) "+VAR_TILE_LIMITS+",(const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+", (const unsigned int *) "+VAR_TILE_LIMITS_STORE+",(const unsigned int *) "+VAR_TILE_SZ+");\n")
ofile.write("\t\t__syncthreads();\n\n")
ofile.write("\t } // end of block assigned to gpu block loop x \n\n")
ofile.write("\t } // end of block assigned to gpu block loop y \n\n")
ofile.write("\t} // end of block assigned to gpu block loop z \n\n")
ofile.write("}// end of function "+FUNC_KO_DISS+"\n")
ofile.write("}// end of namespace cuda\n")
ofile.close()
def main():
# shared derivs
'''
dxn = "grad_0"
dxxn = "grad2_0_0"
dyn = "grad_1"
dyyn = "grad2_1_1"
dzn = "grad_2"
dzzn = "grad2_2_2"
dxyn = "grad2_0_1"
dxzn = "grad2_0_2"
dyzn = "grad2_1_2"
adxn = "agrad_0"
adyn = "agrad_1"
adzn = "agrad_2"
kodxn = "kograd_0"
kodyn = "kograd_1"
kodzn = "kograd_2"
'''
dxn = VAR_OUT_SHARED_0
dxxn = VAR_OUT_SHARED_0
dyn = VAR_OUT_SHARED_0
dyyn = VAR_OUT_SHARED_0
dzn = VAR_OUT_SHARED_0
dzzn = VAR_OUT_SHARED_0
dxyn = VAR_OUT_SHARED_1
dxzn = VAR_OUT_SHARED_1
dyzn = VAR_OUT_SHARED_1
adxn = VAR_OUT_SHARED_0
adyn = VAR_OUT_SHARED_0
adzn = VAR_OUT_SHARED_0
kodxn = VAR_OUT_SHARED_0
kodyn = VAR_OUT_SHARED_0
kodzn = VAR_OUT_SHARED_0
func_dx="deriv42_x((double *) "+dxn+",(const double *) "+VAR_IN_SHARED+",dx, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dy="deriv42_y((double *) "+dyn+",(const double *) "+VAR_IN_SHARED+",dy, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dz="deriv42_z((double *) "+dzn+",(const double *) "+VAR_IN_SHARED+",dz, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dxx="deriv42_xx((double *) "+dxxn+",(const double *) "+VAR_IN_SHARED+",dx, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dxy="deriv42_y((double *) "+dxyn+",(const double *) "+dxn+",dy, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dxz="deriv42_z((double *) "+dxzn+",(const double *) "+dxn+",dz, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dyy="deriv42_yy((double *) "+dyyn+",(const double *) "+VAR_IN_SHARED+",dy, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dyz="deriv42_z((double *) " +dyzn+",(const double *) "+dyn+",dz, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_dzz="deriv42_zz((double *) "+dzzn+",(const double *) "+VAR_IN_SHARED+",dz, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_adx="deriv42adv_x((double *) "+adxn+",(const double *) "+VAR_IN_SHARED+",dx, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", (const bool*) "+ VAR_BETA0_BOOL+" , 3, bflag);"
func_ady="deriv42adv_y((double *) "+adyn+",(const double *) "+VAR_IN_SHARED+",dy, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", (const bool*) "+ VAR_BETA1_BOOL+" , 3, bflag);"
func_adz="deriv42adv_z((double *) "+adzn+",(const double *) "+VAR_IN_SHARED+",dz, (const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", (const bool*) "+ VAR_BETA2_BOOL+" , 3, bflag);"
func_kodx="ko_deriv42_x((double *) "+kodxn+",(const double *) "+VAR_IN_SHARED+",dx,(const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_kody="ko_deriv42_y((double *) "+kodyn+",(const double *) "+VAR_IN_SHARED+",dy,(const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) "+VAR_TILE_SZ+", 3, bflag);"
func_kodz="ko_deriv42_z((double *) "+kodzn+",(const double *) "+VAR_IN_SHARED+",dz,(const unsigned int *) "+VAR_TILE_LIMITS+" , (const unsigned int *) "+VAR_DENDRO_BLK_ALIGNED_SZ+" , (const unsigned int *) " +VAR_TILE_SZ+", 3, bflag);"
## number of passes for cuda derivatives.
Derivative = namedtuple("Derivative", "DerivType DerivDir DerivName DerivTile1D DerivInput DerivOutput IB IE JB JE KB KE padWidth DerivFuncCall")
####
## Since the block shared memory is not enough to compute the all the derivs (15) for a given variable,
## we use multiple passes of deriv computations.
##
## We assume that the deriv TILE is cubic, for simplicity !!!
##
### !!!!!!! NOTE: WHEN SPECIFYING THE TILE SZ MAKE SURE YOU HAVE 5 POINTS FOR ONE SIDED DERIVS, WHEN THE TILE LOAD THE BLOCK IN THE ITERATIONS
TileSz1D=12
bssn_derivs=[
Derivative(DerivType="d",DerivDir="x",DerivName="deriv_x",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="grad_0",IB=3,IE=-3,JB=1,JE=-1,KB=1,KE=-1,padWidth=3,DerivFuncCall="_RSWS_"+func_dx),
Derivative(DerivType="d",DerivDir="y",DerivName="deriv_y",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="grad_1",IB=3,IE=-3,JB=3,JE=-3,KB=1,KE=-1,padWidth=3,DerivFuncCall="_RSWS_"+func_dy),
Derivative(DerivType="d",DerivDir="z",DerivName="deriv_z",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="grad_2",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_dz),
Derivative(DerivType="dd",DerivDir="xx",DerivName="deriv_xx",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="grad2_0_0",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_dxx),
Derivative(DerivType="dd",DerivDir="yy",DerivName="deriv_yy",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="grad2_1_1",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_dyy),
Derivative(DerivType="dd",DerivDir="zz",DerivName="deriv_zz",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="grad2_2_2",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_dzz),
Derivative(DerivType="ko",DerivDir="x",DerivName="ko_deriv_x",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="kograd_0",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_kodx),
Derivative(DerivType="ko",DerivDir="y",DerivName="ko_deriv_y",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="kograd_1",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_kody),
Derivative(DerivType="ko",DerivDir="z",DerivName="ko_deriv_z",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="kograd_2",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_kodz),
Derivative(DerivType="ad",DerivDir="x",DerivName="adv_deriv_x",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="agrad_0",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_adx),
Derivative(DerivType="ad",DerivDir="y",DerivName="adv_deriv_y",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="agrad_1",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_ady),
Derivative(DerivType="ad",DerivDir="z",DerivName="adv_deriv_z",DerivTile1D=TileSz1D,DerivInput=VAR_IN_SHARED,DerivOutput="agrad_2",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_adz),
Derivative(DerivType="dd",DerivDir="xy",DerivName="deriv_xy",DerivTile1D=TileSz1D,DerivInput=dxn,DerivOutput="grad2_0_1",IB=3,IE=-3,JB=3,JE=-3,KB=1,KE=-1,padWidth=3,DerivFuncCall="_RSWS_"+func_dxy),
Derivative(DerivType="dd",DerivDir="xz",DerivName="deriv_xz",DerivTile1D=TileSz1D,DerivInput=dxn,DerivOutput="grad2_0_2",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_dxz),
Derivative(DerivType="dd",DerivDir="yz",DerivName="deriv_yz",DerivTile1D=TileSz1D,DerivInput=dyn,DerivOutput="grad2_1_2",IB=3,IE=-3,JB=3,JE=-3,KB=3,KE=-3,padWidth=3,DerivFuncCall="_RSWS_"+func_dyz)
]
#cudaDerivAllocDeallocHeader("../bssn/cuda_gr/include/bssn_rhs_deriv_mem_cuda.h")
#cudaDerivAllocDeallocSource("../bssn/cuda_gr/src/bssn_rhs_deriv_mem_cuda.cpp",["bssn_rhs_deriv_mem_cuda.h"])
subset_exp=bssn.outs#[0:4]
subset_var=bssn.vnames#[0:4]
cudaCompute("../bssn/cuda_gr/include/rhs_bssn.cuh","../bssn/cuda_gr/src/rhs_bssn.cu",bssn_derivs,subset_exp,subset_var,"__computeBSSNRHS",["block_cu.h","params_cu.h","bssn_rhs_deriv_mem_cuda.h","cudaUtils.cuh","derivs.cuh","cudaUtils.h"])
if __name__ == "__main__":
main()
| 56.178546
| 515
| 0.593531
| 11,219
| 79,605
| 3.94126
| 0.052768
| 0.108782
| 0.100753
| 0.042879
| 0.801592
| 0.777439
| 0.752018
| 0.732886
| 0.718231
| 0.692132
| 0
| 0.018192
| 0.209359
| 79,605
| 1,416
| 516
| 56.21822
| 0.684345
| 0.059029
| 0
| 0.560496
| 1
| 0.009307
| 0.330175
| 0.04515
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005171
| false
| 0
| 0.010341
| 0
| 0.015512
| 0.009307
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eff4b6fabb2796ca12858b6ff5d04ad67c5cc7b5
| 200
|
py
|
Python
|
src/UQpy/dimension_reduction/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/dimension_reduction/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/dimension_reduction/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
from UQpy.dimension_reduction.grassmann_manifold import *
from UQpy.dimension_reduction.pod import *
from UQpy.dimension_reduction.hosvd import *
from UQpy.dimension_reduction.diffusion_maps import *
| 40
| 57
| 0.86
| 26
| 200
| 6.384615
| 0.423077
| 0.192771
| 0.409639
| 0.626506
| 0.578313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 200
| 4
| 58
| 50
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4bfcd4d8d46c263e7d31a85670b6c126d12503ac
| 7,290
|
py
|
Python
|
babilim/model/layers/convolution.py
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1
|
2020-05-04T15:20:55.000Z
|
2020-05-04T15:20:55.000Z
|
babilim/model/layers/convolution.py
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1
|
2019-11-28T09:03:20.000Z
|
2019-11-28T09:03:20.000Z
|
babilim/model/layers/convolution.py
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1
|
2019-11-28T08:30:13.000Z
|
2019-11-28T08:30:13.000Z
|
# AUTOGENERATED FROM: babilim/model/layers/convolution.ipynb
# Cell: 0
"""doc
# babilim.model.layers.convolution
> Convolution for 1d and 2d.
"""
# Cell: 1
from typing import Optional, Any, Tuple
from babilim.core.annotations import RunOnlyOnce
from babilim.core.module_native import ModuleNative
from babilim.model.layers.activation import Activation
# Cell: 2
class Conv1D(ModuleNative):
def __init__(self, filters: int, kernel_size: int, padding: Optional[str] = None, stride: int = 1, dilation_rate: int = 1, kernel_initializer: Optional[Any] = None, activation=None):
"""
A 1d convolution layer.
:param filters: The number of filters in the convolution. Defines the number of output channels.
:param kernel_size: The kernel size of the convolution. Defines the area over which is convolved. Typically 1, 3 or 5 are recommended.
:param padding: What type of padding should be applied. The string "none" means no padding is applied, None or "same" means the input is padded in a way that the output stays the same size if no stride is applied.
:param stride: The offset between two convolutions that are applied. Typically 1. Stride affects also the resolution of the output feature map. A stride 2 halves the resolution, since convolutions are only applied every odd pixel.
:param dilation_rate: The dilation rate for a convolution.
:param kernel_initializer: A kernel initializer function. By default orthonormal weight initialization is used.
:param activation: The activation function that should be added after the dense layer.
"""
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
self.dilation = dilation_rate
self.stride = stride
self.kernel_initializer = kernel_initializer
self.activation = Activation(activation, axis=1)
@RunOnlyOnce
def _build_pytorch(self, features):
import torch
from torch.nn import Conv1d as _Conv1d
if self.kernel_initializer is None:
from torch.nn.init import orthogonal_
self.kernel_initializer = orthogonal_
if self.padding == "same" or self.padding is None:
self.padding = int((self.kernel_size - 1) / 2)
elif self.padding == "none":
self.padding = 0
else:
raise NotImplementedError("Padding {} is not implemented.".format(padding))
in_channels = features.shape[1]
self.conv = _Conv1d(in_channels, self.filters, self.kernel_size, self.stride, self.padding, self.dilation)
self.conv.weight.data = self.kernel_initializer(self.conv.weight.data)
if torch.cuda.is_available():
self.conv = self.conv.to(torch.device("cuda")) # TODO move to correct device
from babilim.core.tensor_pt import Tensor as _Tensor
self.weight = _Tensor(data=None, trainable=True, native=self.conv.weight)
self.bias = _Tensor(data=None, trainable=True, native=self.conv.bias)
def _call_pytorch(self, features):
return self.activation(self.conv(features))
@RunOnlyOnce
def _build_tf(self, features):
#TODO Implement
raise NotImplementedError()
def _call_tf(self, features):
#TODO Implement
raise NotImplementedError()
# Cell: 3
class Conv2D(ModuleNative):
def __init__(self, filters: int, kernel_size: Tuple[int, int], padding: Optional[str] = None, strides: Tuple[int, int] = (1, 1), dilation_rate: Tuple[int, int] = (1, 1), kernel_initializer: Optional[Any] = None, activation=None):
"""
A 2d convolution layer.
:param filters: The number of filters in the convolution. Defines the number of output channels.
:param kernel_size: The kernel size of the convolution. Defines the area over which is convolved. Typically (1,1) (3,3) or (5,5) are recommended.
:param padding: What type of padding should be applied. The string "none" means no padding is applied, None or "same" means the input is padded in a way that the output stays the same size if no stride is applied.
:param stride: The offset between two convolutions that are applied. Typically (1, 1). Stride affects also the resolution of the output feature map. A stride 2 halves the resolution, since convolutions are only applied every odd pixel.
:param dilation_rate: The dilation rate for a convolution.
:param kernel_initializer: A kernel initializer function. By default orthonormal weight initialization is used.
:param activation: The activation function that should be added after the dense layer.
"""
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
self.dilation = dilation_rate
self.stride = strides
self.kernel_initializer = kernel_initializer
self.activation = Activation(activation)
@RunOnlyOnce
def _build_pytorch(self, features):
import torch
from torch.nn import Conv2d as _Conv2d
if self.kernel_initializer is None:
from torch.nn.init import orthogonal_
self.kernel_initializer = orthogonal_
if self.padding == "same" or self.padding is None:
px = int((self.kernel_size[0] - 1) / 2)
py = int((self.kernel_size[1] - 1) / 2)
self.padding = (px, py)
elif self.padding == "none":
self.padding = (0, 0)
else:
raise NotImplementedError("Padding {} is not implemented.".format(padding))
in_channels = features.shape[1]
self.conv = _Conv2d(in_channels, self.filters, self.kernel_size, self.stride, self.padding, self.dilation)
self.conv.weight.data = self.kernel_initializer(self.conv.weight.data)
if torch.cuda.is_available():
self.conv = self.conv.to(torch.device("cuda")) # TODO move to correct device
from babilim.core.tensor_pt import Tensor as _Tensor
self.weight = _Tensor(data=None, trainable=True, native=self.conv.weight)
self.bias = _Tensor(data=None, trainable=True, native=self.conv.bias)
def _call_pytorch(self, features):
return self.activation(self.conv(features))
@RunOnlyOnce
def _build_tf(self, features):
from tensorflow.keras.layers import Conv2D as _Conv2D
if self.kernel_initializer is None:
from tensorflow.keras.initializers import Orthogonal
self.kernel_initializer = Orthogonal()
if self.padding is None:
self.padding = "same"
self.conv = _Conv2D(filters=self.filters, kernel_size=self.kernel_size, strides=self.stride, dilation_rate=self.dilation_rate, padding=self.padding, activation=None, kernel_initializer=self.kernel_initializer)
self.conv.build(features.shape)
from babilim.core.tensor_tf import Tensor as _Tensor
self.weight = _Tensor(data=None, trainable=True, native=self.conv.kernel)
self.bias = _Tensor(data=None, trainable=True, native=self.conv.bias)
def _call_tf(self, features):
raise self.activation(self.conv(features))
| 51.702128
| 243
| 0.684499
| 947
| 7,290
| 5.161563
| 0.155227
| 0.03437
| 0.047259
| 0.028232
| 0.852087
| 0.813011
| 0.807283
| 0.7768
| 0.759206
| 0.697831
| 0
| 0.009454
| 0.231001
| 7,290
| 140
| 244
| 52.071429
| 0.862469
| 0.29904
| 0
| 0.652174
| 1
| 0
| 0.017854
| 0
| 0
| 0
| 0
| 0.014286
| 0
| 1
| 0.108696
| false
| 0
| 0.163043
| 0.021739
| 0.315217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ef2f05ba7a7c491439a6a7b86b89eb5f2087bf95
| 795
|
py
|
Python
|
tests/test_provider_bltavares_zerotier.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_bltavares_zerotier.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_bltavares_zerotier.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_bltavares_zerotier.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:31:25 UTC)
def test_provider_import():
import terrascript.provider.bltavares.zerotier
def test_resource_import():
from terrascript.resource.bltavares.zerotier import zerotier_member
from terrascript.resource.bltavares.zerotier import zerotier_network
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.bltavares.zerotier
#
# t = terrascript.provider.bltavares.zerotier.zerotier()
# s = str(t)
#
# assert 'https://github.com/bltavares/terraform-provider-zerotier' in s
# assert '0.3.0' in s
| 29.444444
| 80
| 0.761006
| 103
| 795
| 5.757282
| 0.553398
| 0.172007
| 0.168634
| 0.182125
| 0.323777
| 0.182125
| 0.182125
| 0
| 0
| 0
| 0
| 0.022157
| 0.148428
| 795
| 26
| 81
| 30.576923
| 0.853767
| 0.638994
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0
| 1
| 0.4
| true
| 0
| 1
| 0
| 1.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ef4e9fc07b282bbcf421d54c15af9310733a3b4b
| 4,578
|
py
|
Python
|
tests/terraform/checks/resource/azure/test_AzureFrontDoorEnablesWAF.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 4,013
|
2019-12-09T13:16:54.000Z
|
2022-03-31T14:31:01.000Z
|
tests/terraform/checks/resource/azure/test_AzureFrontDoorEnablesWAF.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 1,258
|
2019-12-17T09:55:51.000Z
|
2022-03-31T19:17:17.000Z
|
tests/terraform/checks/resource/azure/test_AzureFrontDoorEnablesWAF.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 638
|
2019-12-19T08:57:38.000Z
|
2022-03-30T21:38:37.000Z
|
import unittest
import hcl2
from checkov.terraform.checks.resource.azure.AzureFrontDoorEnablesWAF import check
from checkov.common.models.enums import CheckResult
class TestAzureFrontDoorEnablesWAF(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "azurerm_frontdoor" "example" {
name = "example-FrontDoor"
location = "EastUS2"
resource_group_name = azurerm_resource_group.example.name
enforce_backend_pools_certificate_name_check = false
routing_rule {
name = "exampleRoutingRule1"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["exampleFrontendEndpoint1"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "exampleBackendBing"
}
}
backend_pool_load_balancing {
name = "exampleLoadBalancingSettings1"
}
backend_pool_health_probe {
name = "exampleHealthProbeSetting1"
}
backend_pool {
name = "exampleBackendBing"
backend {
host_header = "www.bing.com"
address = "www.bing.com"
http_port = 80
https_port = 443
}
load_balancing_name = "exampleLoadBalancingSettings1"
health_probe_name = "exampleHealthProbeSetting1"
}
frontend_endpoint {
name = "exampleFrontendEndpoint1"
host_name = "example-FrontDoor.azurefd.net"
custom_https_provisioning_enabled = false
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_frontdoor']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "azurerm_frontdoor" "example" {
name = "example-FrontDoor"
location = "EastUS2"
resource_group_name = azurerm_resource_group.example.name
enforce_backend_pools_certificate_name_check = false
web_application_firewall_policy_link_id = "this_is_id"
routing_rule {
name = "exampleRoutingRule1"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["exampleFrontendEndpoint1"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "exampleBackendBing"
}
}
backend_pool_load_balancing {
name = "exampleLoadBalancingSettings1"
}
backend_pool_health_probe {
name = "exampleHealthProbeSetting1"
}
backend_pool {
name = "exampleBackendBing"
backend {
host_header = "www.bing.com"
address = "www.bing.com"
http_port = 80
https_port = 443
}
load_balancing_name = "exampleLoadBalancingSettings1"
health_probe_name = "exampleHealthProbeSetting1"
}
frontend_endpoint {
name = "exampleFrontendEndpoint1"
host_name = "example-FrontDoor.azurefd.net"
custom_https_provisioning_enabled = false
}
}
""")
resource_conf = hcl_res['resource'][0]['azurerm_frontdoor']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 39.128205
| 96
| 0.490826
| 306
| 4,578
| 6.980392
| 0.310458
| 0.041199
| 0.043071
| 0.061798
| 0.85206
| 0.85206
| 0.85206
| 0.85206
| 0.85206
| 0.85206
| 0
| 0.012205
| 0.445173
| 4,578
| 116
| 97
| 39.465517
| 0.82874
| 0
| 0
| 0.708333
| 0
| 0
| 0.838576
| 0.18851
| 0
| 0
| 0
| 0
| 0.020833
| 1
| 0.020833
| false
| 0.010417
| 0.041667
| 0
| 0.072917
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
322d79aad7f5d87cfa794f7179d95c58d29e0df4
| 3,296
|
py
|
Python
|
plot_creation_scripts/set_making_weight_non_negative_before_cen_cal/set_making_weights_non_negative_before_cen_cal_dis_histograms.py
|
andrewjh9/CenBench
|
afd960b77ade05be2d2368bed3b47d54f7e229b6
|
[
"MIT"
] | null | null | null |
plot_creation_scripts/set_making_weight_non_negative_before_cen_cal/set_making_weights_non_negative_before_cen_cal_dis_histograms.py
|
andrewjh9/CenBench
|
afd960b77ade05be2d2368bed3b47d54f7e229b6
|
[
"MIT"
] | null | null | null |
plot_creation_scripts/set_making_weight_non_negative_before_cen_cal/set_making_weights_non_negative_before_cen_cal_dis_histograms.py
|
andrewjh9/CenBench
|
afd960b77ade05be2d2368bed3b47d54f7e229b6
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import tikzplotlib
read_dataset_0_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_0__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
read_dataset_25_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_25__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
read_dataset_50_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_50__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
read_dataset_75_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_75__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
read_dataset_100_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_100__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
read_dataset_150_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_150__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
read_dataset_125_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_125__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
read_dataset_175_fminst = np.genfromtxt('results/set_making_weight_non_negative_before_cen_cal/SET__fashion_mnist_for_200_epochs_20210604-081818_num_sd_None_cen_dis_lap_epoch_175__testing_set_make_weights_non_negative_before_lap_cen_cal.csv',delimiter='')
# plt.hist(read_dataset_175_fminst , bins= np.arange(int(min(read_dataset_175_fminst)), max(read_dataset_175_fminst) + 0.5, 0.5), label="175")
# plt.hist(read_dataset_150_fminst , bins= np.arange(int(min(read_dataset_175_fminst)), max(read_dataset_175_fminst) + 0.5, 0.5), label="150")
# plt.hist(read_dataset_125_fminst , bins= np.arange(int(min(read_dataset_175_fminst)), int(max(read_dataset_125_fminst)) + 0.5, 0.5), label="125")
# plt.hist(read_dataset_100_fminst , bins= np.arange(int(min(read_dataset_175_fminst)), max(read_dataset_175_fminst) + 0.5, 0.5), label="100")
# plt.hist(read_dataset_75_fminst , bins= np.arange(int(min(read_dataset_175_fminst)), max(read_dataset_175_fminst) + 0.5, 0.5), label="75")
# plt.hist(read_dataset_50_fminst , bins= np.arange(int(min(read_dataset_175_fminst)), max(read_dataset_175_fminst) + 0.5, 0.5), label="50")
plt.hist(read_dataset_0_fminst , bins=10, label="0")
plt.legend( title="At Epoch[#]")
plt.xlabel("Laplacian centrality")
plt.ylabel("Frequency")
plt.title("Frequency Distribution of Centrality of Nodes with Making weight non negative")
plt.show()
# tikzplotlib.save("plots/tex/histogram_lap/cifar_250_epochs.tex")
| 99.878788
| 256
| 0.867112
| 559
| 3,296
| 4.490161
| 0.134168
| 0.118327
| 0.108367
| 0.103586
| 0.798406
| 0.798406
| 0.79243
| 0.79243
| 0.79243
| 0.79243
| 0
| 0.08631
| 0.040352
| 3,296
| 32
| 257
| 103
| 0.70724
| 0.276396
| 0
| 0
| 0
| 0
| 0.717291
| 0.667648
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
329fedc21fa62b0a53eeb32beb376d82f2851b0d
| 122
|
py
|
Python
|
axial_positional_embedding/__init__.py
|
lucidrains/axial-positional-embedding
|
fa6bee65ae45ce373004e33eea40a3625a126787
|
[
"MIT"
] | 43
|
2020-06-08T09:38:19.000Z
|
2022-03-17T02:58:26.000Z
|
axial_positional_embedding/__init__.py
|
lucidrains/axial-positional-embedding
|
fa6bee65ae45ce373004e33eea40a3625a126787
|
[
"MIT"
] | 2
|
2020-08-12T00:18:29.000Z
|
2021-05-02T02:42:35.000Z
|
axial_positional_embedding/__init__.py
|
lucidrains/axial-positional-embedding
|
fa6bee65ae45ce373004e33eea40a3625a126787
|
[
"MIT"
] | 5
|
2021-07-10T05:02:50.000Z
|
2021-12-14T15:00:03.000Z
|
from axial_positional_embedding.axial_positional_embedding import AxialPositionalEmbedding, AxialPositionalEmbeddingImage
| 61
| 121
| 0.942623
| 10
| 122
| 11.1
| 0.7
| 0.27027
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040984
| 122
| 1
| 122
| 122
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
088e0dea06148c771774b461fd3d1aa5ea5bc774
| 22,830
|
py
|
Python
|
yyds/jd_scripts_check_dependence.py
|
demo2099/js
|
f0b3850fcc386e55241e2b9dc79c91032cbebadd
|
[
"MIT"
] | 15
|
2022-02-08T06:56:31.000Z
|
2022-03-23T05:21:27.000Z
|
yyds/jd_scripts_check_dependence.py
|
demo2099/js
|
f0b3850fcc386e55241e2b9dc79c91032cbebadd
|
[
"MIT"
] | null | null | null |
yyds/jd_scripts_check_dependence.py
|
demo2099/js
|
f0b3850fcc386e55241e2b9dc79c91032cbebadd
|
[
"MIT"
] | 33
|
2022-02-07T12:31:03.000Z
|
2022-03-21T06:42:33.000Z
|
# -*- coding:utf-8 -*-
# 作者仓库:https://jihulab.com/spiritlhl/qinglong_auto_tools.git
# 觉得不错麻烦点个star谢谢
# 频道:https://t.me/qinglong_auto_tools
'''
cron: 1
new Env('单容器 二叉树修复脚本依赖文件');
'''
import os, requests
import os.path
import time
# from os import popen
# 版本号 2.10.9 ,其他环境自测
# 只修复依赖文件(jdCookie.js那种)!!不修复环境依赖(pip install aiohttp)!!
# 默认不做任何操作只查询依赖脚本存在与否,有需求请在配置文件中配置对应变量进行操作,更新不会增加缺失文件
# 如果你有发现更多的脚本依赖文件没有新增,欢迎提交issues到https://jihulab.com/spiritlhl/dependence_scripts
# 增加缺失依赖文件(推荐)
# export ec_fix_dep="true"
# 更新老旧依赖文件(慎填,默认的依赖我使用的魔改版本,非必要别选)
# export ec_ref_dep="true"
# 2021.11.27 支持新版本仓库拉取的脚本目录结构,针对各个仓库进行依赖检索
txtx = "青龙配置文件中的config中填写下列变量启用对应功能\n\n增加缺失依赖文件(推荐)\n填写export ec_fix_dep=\"true\"\n更新老旧依赖文件(日常使用别填,默认的依赖我使用的魔改版本,非必要别选)\n如果选择使用请使用对应code文件等相关文件:https://jihulab.com/spiritlhl/dependence_config \n填写export ec_ref_dep=\"true\"\n"
print(txtx)
try:
if os.environ["ec_fix_dep"] == "true":
print("已配置依赖文件缺失修复\n")
fix = 1
else:
fix = 0
except:
fix = 0
print("#默认不修复缺失依赖文件,有需求")
print("#请在配置文件中配置\nexport ec_fix_dep=\"true\" \n#开启脚本依赖文件缺失修复\n")
try:
if os.environ["ec_ref_dep"] == "true":
print("已配置依赖文件老旧更新\n")
ref = 1
else:
ref = 0
except:
ref = 0
print("#默认不更新老旧依赖文件,有需求")
print("#请在配置文件中配置\nexport ec_re_dep=\"true\" #开启脚本依赖文件更新\n")
def traversalDir_FirstDir(path):
list = []
if (os.path.exists(path)):
files = os.listdir(path)
for file in files:
m = os.path.join(path, file)
if (os.path.isdir(m)):
h = os.path.split(m)
list.append(h[1])
print("文件夹名字有:")
print(list)
return list
def check_dependence(file_path):
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/contents.json").json()
except:
print("网络波动,稍后尝试")
time.sleep(5)
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/contents.json").json()
except:
print("网络问题无法获取仓库文件列表,终止检索")
return
dependence_scripts_name = []
for i in res:
dependence_scripts_name.append(i["name"])
if "db" in os.listdir("../"):
dir_list = os.listdir(file_path)
else:
dir_list = os.listdir("." + file_path)
# 查询
for i in dependence_scripts_name:
if i not in dir_list and i != "utils" and i != "function":
print("缺失文件 {}{}".format(file_path, i))
# 修补
try:
if fix == 1:
print("增加文件 {}{}".format(file_path, i))
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/" + i).text
if "db" in os.listdir("../"):
with open(file_path + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("." + file_path + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
temp = 1
try:
if temp == 1:
print("未配置ec_fix_dep,默认不修复增加缺失的依赖文件")
except:
pass
# 更新
try:
if ref == 1:
for i in dependence_scripts_name:
if i != "utils" and i != "function":
if "db" in os.listdir("../"):
with open(i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/" + i).text
d = f.read()
if r == d:
print("无需修改 {}".format(i))
else:
print("更新文件 {}".format(i))
with open(file_path + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open(i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/" + i).text
d = f.read()
if r == d:
print("无需修改 {}".format(i))
else:
print("更新文件 {}".format(i))
with open("." + file_path + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
print("未配置ec_ref_dep,默认不更新依赖文件")
#########################################################################################################
# utils
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils.json").json()
except:
print("网络波动,稍后尝试")
time.sleep(5)
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils.json").json()
except:
print("网络问题无法获取仓库文件列表,终止检索")
return
dependence_scripts_utils = []
for i in res:
dependence_scripts_utils.append(i["name"])
try:
if "db" in os.listdir("../"):
utils_list = os.listdir(file_path + "utils")
else:
utils_list = os.listdir("." + file_path + "utils")
except:
if "db" in os.listdir("../"):
os.makedirs(file_path + "utils")
utils_list = os.listdir(file_path + "utils")
else:
os.makedirs("." + file_path + "utils")
utils_list = os.listdir("." + file_path + "utils")
# 查询
for i in dependence_scripts_utils:
if i not in utils_list and i != "utils" and i != "function":
print("缺失文件 {}utils/{}".format(file_path, i))
# 修补
try:
if fix == 1:
print("增加文件 {}utils/{}".format(file_path, i))
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils/" + i).text
if "db" in os.listdir("../"):
with open(file_path + "utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("." + file_path + "utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
temp = 1
try:
if temp == 1:
print("未配置ec_fix_dep,默认不修复增加缺失的依赖文件")
except:
pass
# 更新
try:
if ref == 1:
for i in dependence_scripts_utils:
if i != "utils" and i != "function":
if "db" in os.listdir("../"):
with open(file_path + "utils/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils/" + i).text
d = f.read()
if r == d:
print("已存在文件 {}utils/{}".format(file_path, i))
else:
print("更新文件 {}utils/{}".format(file_path, i))
with open(file_path + "utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("." + file_path + "utils/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils/" + i).text
d = f.read()
if r == d:
print("已存在文件 {}utils/{}".format(file_path, i))
else:
print("更新文件 {}utils/{}".format(file_path, i))
with open("." + file_path + "utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
print("未配置ec_ref_dep,默认不更新依赖文件")
####################################################################################################
# function
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function.json").json()
except:
print("网络波动,稍后尝试")
time.sleep(5)
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function.json").json()
except:
print("网络问题无法获取仓库文件列表,终止检索")
return
dependence_scripts_function = []
for i in res:
dependence_scripts_function.append(i["name"])
try:
if "db" in os.listdir("../"):
function_list = os.listdir(file_path + "function")
else:
function_list = os.listdir("." + file_path + "function")
except:
if "db" in os.listdir("../"):
os.makedirs(file_path + "function")
function_list = os.listdir(file_path + "function")
else:
os.makedirs("." + file_path + "function")
function_list = os.listdir("." + file_path + "function")
# 查询
for i in dependence_scripts_function:
if i not in function_list and i != "utils" and i != "function":
print("缺失文件 {}function/{}".format(file_path, i))
# 修补
try:
if fix == 1:
print("增加文件 {}function/{}".format(file_path, i))
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function/" + i).text
if "db" in os.listdir("../"):
with open(file_path + "function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("." + file_path + "function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
temp = 1
try:
if temp == 1:
print("未配置ec_fix_dep,默认不修复增加缺失的依赖文件")
except:
pass
# 更新
try:
if ref == 1:
for i in dependence_scripts_function:
if i != "utils" and i != "function":
if "db" in os.listdir("../"):
with open(file_path + "function/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function/" + i).text
d = f.read()
if r == d:
print("已存在文件 {}function/{}".format(file_path, i))
else:
print("更新文件 {}function/{}".format(file_path, i))
with open(file_path + "function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("." + file_path + "function/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function/" + i).text
d = f.read()
if r == d:
print("已存在文件 {}function/{}".format(file_path, i))
else:
print("更新文件 {}function/{}".format(file_path, i))
with open('.' + file_path + "function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
print("未配置ec_ref_dep,默认不更新依赖文件")
def check_root():
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/contents.json").json()
except:
print("网络波动,稍后尝试")
time.sleep(5)
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/contents.json").json()
except:
print("网络问题无法获取仓库文件列表,终止检索")
return
dependence_scripts_name = []
for i in res:
dependence_scripts_name.append(i["name"])
if "db" in os.listdir("../"):
dir_list = os.listdir("./")
else:
dir_list = os.listdir("../")
# 查询
for i in dependence_scripts_name:
if i not in dir_list and i != "utils" and i != "function":
print("缺失文件 {}".format(i))
# 修补
try:
if fix == 1:
print("增加文件 {}".format(i))
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/" + i).text
if "db" in os.listdir("../"):
with open(i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("../" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
temp = 1
try:
if temp == 1:
print("未配置ec_fix_dep,默认不修复增加缺失的依赖文件")
except:
pass
# 更新
try:
if ref == 1:
for i in dependence_scripts_name:
if i != "utils" and i != "function":
if "db" in os.listdir("../"):
with open(i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/" + i).text
d = f.read()
if r == d:
print("无需修改 {}".format(i))
else:
print("更新文件 {}".format(i))
with open(i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("../" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/" + i).text
d = f.read()
if r == d:
print("无需修改 {}".format(i))
else:
print("更新文件 {}".format(i))
with open("../" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
print("未配置ec_ref_dep,默认不更新依赖文件")
#########################################################################################################
# utils
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils.json").json()
except:
print("网络波动,稍后尝试")
time.sleep(5)
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils.json").json()
except:
print("网络问题无法获取仓库文件列表,终止检索")
return
dependence_scripts_utils = []
for i in res:
dependence_scripts_utils.append(i["name"])
try:
if "db" in os.listdir("../"):
utils_list = os.listdir("./utils")
else:
utils_list = os.listdir("../utils")
except:
if "db" in os.listdir("../"):
os.makedirs("utils")
utils_list = os.listdir("./utils")
else:
os.makedirs("../utils")
utils_list = os.listdir("../utils")
# 查询
for i in dependence_scripts_utils:
if i not in utils_list and i != "utils" and i != "function":
print("缺失文件 utils/{}".format(i))
# 修补
try:
if fix == 1:
print("增加文件 utils/{}".format(i))
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils/" + i).text
if "db" in os.listdir("../"):
with open("./utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("../utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
temp = 1
try:
if temp == 1:
print("未配置ec_fix_dep,默认不修复增加缺失的依赖文件")
except:
pass
# 更新
try:
if ref == 1:
for i in dependence_scripts_utils:
if i != "utils" and i != "function":
if "db" in os.listdir("../"):
with open("./utils/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils/" + i).text
d = f.read()
if r == d:
print("已存在文件 utils/{}".format(i))
else:
print("更新文件 utils/{}".format(i))
with open("./utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("../utils/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/utils/" + i).text
d = f.read()
if r == d:
print("已存在文件 utils/{}".format(i))
else:
print("更新文件 utils/{}".format(i))
with open("../utils/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
print("未配置ec_ref_dep,默认不更新依赖文件")
####################################################################################################
# function
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function.json").json()
except:
print("网络波动,稍后尝试")
time.sleep(5)
try:
res = requests.get("https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function.json").json()
except:
print("网络问题无法获取仓库文件列表,终止检索")
return
dependence_scripts_function = []
for i in res:
dependence_scripts_function.append(i["name"])
try:
if "db" in os.listdir("../"):
function_list = os.listdir("./function")
else:
function_list = os.listdir("../function")
except:
if "db" in os.listdir("../"):
os.makedirs("function")
function_list = os.listdir("./function")
else:
os.makedirs("../function")
function_list = os.listdir("../function")
# 查询
for i in dependence_scripts_function:
if i not in function_list and i != "utils" and i != "function":
print("缺失文件 function/{}".format(i))
# 修补
try:
if fix == 1:
print("增加文件 function/{}".format(i))
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function/" + i).text
if "db" in os.listdir("../"):
with open("./function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("../function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
temp = 1
try:
if temp == 1:
print("未配置ec_fix_dep,默认不修复增加缺失的依赖文件")
except:
pass
# 更新
try:
if ref == 1:
for i in dependence_scripts_function:
if i != "utils" and i != "function":
if "db" in os.listdir("../"):
with open("./function/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function/" + i).text
d = f.read()
if r == d:
print("已存在文件 function/{}".format(i))
else:
print("更新文件 function/{}".format(i))
with open("./function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
else:
with open("../function/" + i, "r", encoding="utf-8") as f:
r = requests.get(
"https://jihulab.com/spiritlhl/dependence_scripts/-/raw/master/function/" + i).text
d = f.read()
if r == d:
print("已存在文件 function/{}".format(i))
else:
print("更新文件 function/{}".format(i))
with open("../function/" + i, "w", encoding="utf-8") as fe:
fe.write(r)
except:
print("未配置ec_ref_dep,默认不更新依赖文件")
if __name__ == '__main__':
# 针对青龙拉取仓库后单个仓库单个文件夹的情况对每个文件夹进行检测,不需要可以注释掉 开始到结束的部分
### 开始
if "db" in os.listdir("../"):
dirs_ls = traversalDir_FirstDir("./")
else:
dirs_ls = traversalDir_FirstDir("../")
# script根目录默认存在的文件夹,放入其中的文件夹不再检索其内依赖完整性
or_list = ['node_modules', '__pycache__', 'utils', '.pnpm-store', 'function', 'tools', 'backUp', '.git', '.idea', '.github']
print()
for i in dirs_ls:
if i not in or_list:
file_path = "./" + i + "/"
print("检测依赖文件是否完整路径 {}".format(file_path))
check_dependence(file_path)
print()
### 结束
# 检测根目录,不需要可以注释掉下面这行,旧版本只需要保留下面这行
check_root()
print("检测完毕")
if fix == 1:
print("修复完毕后脚本无法运行,显示缺依赖文件,大概率库里没有或者依赖文件同名但内容不一样,请另寻他法\n")
print("修复完毕后缺依赖环境导致的脚本无法运行,这种无法修复,请自行在依赖管理中添加\n")
print("前者缺文件(如 Error: Cannot find module './utils/magic'),后者缺依赖(如 Error: Cannot find module 'date-fns' ),本脚本只修复前一种")
| 37.611203
| 225
| 0.436662
| 2,293
| 22,830
| 4.250327
| 0.087222
| 0.095937
| 0.044326
| 0.051714
| 0.848656
| 0.824338
| 0.814078
| 0.808434
| 0.78976
| 0.778576
| 0
| 0.006516
| 0.408454
| 22,830
| 606
| 226
| 37.673267
| 0.715143
| 0.032501
| 0
| 0.839506
| 0
| 0.004115
| 0.215574
| 0.021826
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006173
| false
| 0.012346
| 0.006173
| 0
| 0.026749
| 0.156379
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
08a4a133f22acabb5fc30c41887ea93c6245bbe9
| 1,747
|
py
|
Python
|
nicos_virt_mlz/reseda/setups/static_flippers.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos_virt_mlz/reseda/setups/static_flippers.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91
|
2020-08-18T09:20:26.000Z
|
2022-02-01T11:07:14.000Z
|
nicos_virt_mlz/reseda/setups/static_flippers.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
# -*- coding: utf-8 -*-
description = 'Static flippers'
group = 'lowlevel'
display_order = 22
#abslimits are defined in .res file!
devices = dict(
sf_0a = device('nicos.devices.generic.ManualMove',
description = 'Static flipper arm 0 - A',
fmtstr = '%.3f',
pollinterval = 60,
maxage = 120,
abslimits = (0, 5),
# precision = 0.01,
unit = 'A',
),
sf_0b = device('nicos.devices.generic.ManualMove',
description = 'Static flipper arm 0 - B',
fmtstr = '%.3f',
pollinterval = 60,
maxage = 120,
abslimits = (0, 5),
# precision = 0.01,
unit = 'A',
),
sf_1 = device('nicos.devices.generic.ManualMove',
description = 'Static flipper arm 1',
fmtstr = '%.3f',
pollinterval = 60,
maxage = 120,
abslimits = (0, 5),
# precision = 0.01,
unit = 'A',
),
hsf_0a = device('nicos.devices.generic.ManualMove',
description = 'Helmholtz mezei flipper arm 0 - A',
fmtstr = '%.3f',
pollinterval = 60,
maxage = 120,
abslimits = (0, 5),
# precision = 0.01,
unit = 'A',
),
hsf_0b = device('nicos.devices.generic.ManualMove',
description = 'Helmholtz mezei flipper arm 0 - B',
fmtstr = '%.3f',
pollinterval = 60,
maxage = 120,
abslimits = (0, 5),
# precision = 0.01,
unit = 'A',
),
hsf_1 = device('nicos.devices.generic.ManualMove',
description = 'Helmholtz mezei flipper arm 1',
fmtstr = '%.3f',
pollinterval = 60,
maxage = 120,
abslimits = (0, 5),
# precision = 0.01,
unit = 'A',
),
)
| 26.074627
| 58
| 0.511162
| 182
| 1,747
| 4.868132
| 0.247253
| 0.074492
| 0.121896
| 0.1693
| 0.883747
| 0.883747
| 0.883747
| 0.872461
| 0.872461
| 0.81377
| 0
| 0.07099
| 0.34688
| 1,747
| 66
| 59
| 26.469697
| 0.705521
| 0.093875
| 0
| 0.679245
| 0
| 0
| 0.259377
| 0.12206
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eb4364b6130f86e7de569e5e14b6293555530b5a
| 11,034
|
py
|
Python
|
dataloaders.py
|
roatienza/agmax
|
2a7299cc506605aeaaf64b6155b5c826c71d5786
|
[
"Apache-2.0"
] | 2
|
2021-11-05T13:09:12.000Z
|
2022-03-04T05:07:33.000Z
|
dataloaders.py
|
roatienza/agmax
|
2a7299cc506605aeaaf64b6155b5c826c71d5786
|
[
"Apache-2.0"
] | 1
|
2021-11-04T10:06:57.000Z
|
2021-11-07T08:35:39.000Z
|
dataloaders.py
|
roatienza/agmax
|
2a7299cc506605aeaaf64b6155b5c826c71d5786
|
[
"Apache-2.0"
] | null | null | null |
'''
A single dataloader is the typical one input - one label mapping. This is the classical supervised learning.
A double dataloader creates 2 data points for the same label and used in training with AgMax.
Copyright 2021 Rowel Atienza
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import ConcatDataset
import os
class SingleLoader:
def __init__(self,
root='./data',
batch_size=128,
dataset=datasets.CIFAR10,
transform={'train':transforms.ToTensor(), 'test':transforms.ToTensor()},
device=None,
dataset_name="cifar10",
shuffle_test=False,
corruption=None,
num_workers=16):
super(SingleLoader, self).__init__()
self.test = None
self.train = None
self._build(root,
batch_size,
dataset,
transform,
device,
dataset_name,
shuffle_test,
corruption,
num_workers)
def _build(self,
root,
batch_size,
dataset,
transform,
device,
dataset_name,
shuffle_test,
corruption,
num_workers):
DataLoader = torch.utils.data.DataLoader
#workers = torch.cuda.device_count() * 4
if "cuda" in str(device):
print("num_workers: ", num_workers)
kwargs = {'num_workers': num_workers, 'pin_memory': True}
else:
kwargs = {}
if dataset_name == "svhn" or dataset_name == "svhn-core":
x_train = dataset(root=root,
split='train',
download=True,
transform=transform['train'])
if dataset_name == "svhn":
x_extra = dataset(root=root,
split='extra',
download=True,
transform=transform['train'])
x_train = ConcatDataset([x_train, x_extra])
x_test = dataset(root=root,
split='test',
download=True,
transform=transform['test'])
elif dataset_name == "imagenet":
x_train = dataset(root=root,
split='train',
transform=transform['train'])
if corruption is None:
x_test = dataset(root=root,
split='val',
transform=transform['test'])
else:
root = os.path.join(root, corruption)
corrupt_test = []
for i in range(1, 6):
folder = os.path.join(root, str(i))
x_test = datasets.ImageFolder(root=folder,
transform=transform['test'])
corrupt_test.append(x_test)
x_test = ConcatDataset(corrupt_test)
elif dataset_name == "speech_commands":
x_train = dataset(root=root,
split='train',
transform=transform['train'])
x_val = dataset(root=root,
split='valid',
transform=transform['test'])
x_test = dataset(root=root,
split='test',
transform=transform['test'])
self.val = DataLoader(x_val,
shuffle=False,
batch_size=batch_size,
**kwargs)
#self.train = DataLoader(x_train,
# shuffle=True,
# batch_size=batch_size,
# **kwargs)
#self.test = DataLoader(x_test,
# shuffle=False,
# batch_size=batch_size,
# **kwargs)
#return
else:
x_train = dataset(root=root,
train=True,
download=True,
transform=transform['train'])
x_test = dataset(root=root,
train=False,
download=True,
transform=transform['test'])
self.train = DataLoader(x_train,
shuffle=True,
batch_size=batch_size,
**kwargs)
self.test = DataLoader(x_test,
shuffle=shuffle_test,
batch_size=batch_size,
**kwargs)
class DoubleLoader(SingleLoader):
def __init__(self,
root='./data',
batch_size=128,
dataset=[None, None],
transform={'train':transforms.ToTensor(), 'test':transforms.ToTensor()},
device=None,
dataset_name="cifar10",
shuffle_test=False,
corruption=None,
num_workers=16):
super(DoubleLoader, self).__init__(root=root,
batch_size=batch_size,
dataset=dataset,
transform=transform,
device=device,
dataset_name=dataset_name,
shuffle_test=shuffle_test,
corruption=corruption,
num_workers=num_workers)
def _build(self,
root,
batch_size,
dataset,
transform,
device,
dataset_name,
shuffle_test,
corruption,
num_workers):
print(self.__class__.__name__)
DataLoader = torch.utils.data.DataLoader
#workers = torch.cuda.device_count() * 4
if "cuda" in str(device):
print("num_workers: ", num_workers)
kwargs = {'num_workers': num_workers, 'pin_memory': True}
else:
kwargs = {}
if dataset_name == "svhn" or dataset_name == "svhn-core":
x_train = dataset[0](root=root,
split='train',
download=True,
transform=transform['train'],
siamese_transform=transform['train'])
if dataset_name == "svhn":
x_extra = dataset[0](root=root,
split='extra',
download=True,
transform=transform['train'],
siamese_transform=transform['train'])
x_train = ConcatDataset([x_train, x_extra])
x_test = dataset[1](root=root,
split='test',
download=True,
transform=transform['test'])
elif dataset_name == "imagenet":
x_train = dataset[0](root=root,
split='train',
transform=transform['train'],
siamese_transform=transform['train'])
if corruption is None:
x_test = dataset[1](root=root,
split='val',
transform=transform['test'])
else:
root = os.path.join(root, corruption)
corrupt_test = []
for i in range(1, 6):
folder = os.path.join(root, str(i))
x_test = datasets.ImageFolder(root=folder,
transform=transform['test'])
corrupt_test.append(x_test)
x_test = ConcatDataset(corrupt_test)
elif dataset_name == "speech_commands":
x_train = dataset[0](root=root,
split='train',
transform=transform['train'],
siamese_transform=transform['train'])
x_val = dataset[1](root=root,
split='valid',
transform=transform['test'])
x_test = dataset[1](root=root,
split='test',
transform=transform['test'])
self.val = DataLoader(x_val,
shuffle=False,
batch_size=batch_size,
**kwargs)
#from torch.utils.data.sampler import WeightedRandomSampler
#weights = x_train.make_weights_for_balanced_classes()
#sampler = WeightedRandomSampler(weights, len(weights))
# sampler=sampler,
#self.train = DataLoader(x_train,
# shuffle=True,
# batch_size=batch_size,
# **kwargs)
#self.test = DataLoader(x_test,
# shuffle=False,
# batch_size=batch_size,
# **kwargs)
#return
else:
x_train = dataset[0](root=root,
train=True,
download=True,
transform=transform['train'],
siamese_transform=transform['train'])
x_test = dataset[1](root=root,
train=False,
download=True,
transform=transform['test'])
self.train = DataLoader(x_train,
shuffle=True,
batch_size=batch_size,
**kwargs)
self.test = DataLoader(x_test,
shuffle=shuffle_test,
batch_size=batch_size,
**kwargs)
| 39.548387
| 108
| 0.414537
| 838
| 11,034
| 5.25179
| 0.138425
| 0.114519
| 0.047262
| 0.04499
| 0.810043
| 0.809589
| 0.799137
| 0.798909
| 0.798
| 0.750966
| 0
| 0.00678
| 0.505438
| 11,034
| 278
| 109
| 39.690647
| 0.799707
| 0.100779
| 0
| 0.860465
| 0
| 0
| 0.039939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018605
| false
| 0
| 0.037209
| 0
| 0.065116
| 0.018605
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
de9263734b9754ee203f6c5649cafb29458ac3d8
| 7,497
|
py
|
Python
|
test_add_edit_delete.py
|
Sonny-skyez/Selenium_test_-_Polls_app
|
5e166edbe6dcd9c23e6e75f349e0a249be1dfa39
|
[
"MIT"
] | null | null | null |
test_add_edit_delete.py
|
Sonny-skyez/Selenium_test_-_Polls_app
|
5e166edbe6dcd9c23e6e75f349e0a249be1dfa39
|
[
"MIT"
] | 1
|
2021-06-01T23:51:28.000Z
|
2021-06-01T23:51:28.000Z
|
test_add_edit_delete.py
|
Sonny-skyez/Selenium_test_-_Polls_app
|
5e166edbe6dcd9c23e6e75f349e0a249be1dfa39
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest
'''Test suite: add edit & delete questions in admin panel.
This test suite contains 3 test cases.
Tested URL: https://polls-application.herokuapp.com/polls/'''
class Test_1_add_question(unittest.TestCase):
'''Test case, that tests adding new poll question
by the app user.'''
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
def test_app(self):
driver = self.driver
driver.get("https://polls-application.herokuapp.com/polls/")
driver.find_element_by_link_text("Admin").click()
driver.find_element_by_id("id_username").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys("Sonny")
driver.find_element_by_id("login-form").submit()
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys("XXXXX")
driver.find_element_by_id("login-form").submit()
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='Questions'])[1]/following::a[1]").click()
driver.find_element_by_id("id_question_text").clear()
driver.find_element_by_id("id_question_text").send_keys("Test")
driver.find_element_by_id("question_form").submit()
driver.find_element_by_link_text("Today").click()
driver.find_element_by_link_text("Now").click()
driver.find_element_by_id("id_choice_set-0-choice_text").click()
driver.find_element_by_id("id_choice_set-0-choice_text").clear()
driver.find_element_by_id("id_choice_set-0-choice_text").send_keys("Test")
driver.find_element_by_id("question_form").submit()
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
class Test_2_edit_question(unittest.TestCase):
'''Testing of created poll question,
date and answer edition.'''
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
def test_app(self):
driver = self.driver
driver.get("https://polls-application.herokuapp.com/polls/")
driver.find_element_by_link_text("Admin").click()
driver.find_element_by_id("id_username").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys("Sonny")
driver.find_element_by_id("login-form").submit()
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys("XXXXX")
driver.find_element_by_id("login-form").submit()
driver.find_element_by_link_text("Questions").click()
driver.find_element_by_link_text("Test").click()
driver.find_element_by_id("id_question_text").click()
driver.find_element_by_id("id_question_text").clear()
driver.find_element_by_id("id_question_text").send_keys("Test_2")
driver.find_element_by_link_text("Today").click()
driver.find_element_by_link_text("Now").click()
driver.find_element_by_id("id_choice_set-0-choice_text").click()
driver.find_element_by_id("id_choice_set-0-choice_text").clear()
driver.find_element_by_id("id_choice_set-0-choice_text").send_keys("Test_2")
driver.find_element_by_id("question_form").submit()
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
class Test_3_delete_question(unittest.TestCase):
'''Test 'delete question' option in admin panel'''
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
def test_app(self):
driver = self.driver
driver.get("https://polls-application.herokuapp.com/polls/")
driver.find_element_by_link_text("Admin").click()
driver.find_element_by_id("id_username").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys("Sonny")
driver.find_element_by_id("login-form").submit()
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys("XXXXX")
driver.find_element_by_id("login-form").submit()
driver.find_element_by_link_text("Questions").click()
driver.find_element_by_name("_selected_action").click()
driver.find_element_by_name("action").click()
Select(driver.find_element_by_name("action")).select_by_visible_text("Delete selected questions")
driver.find_element_by_name("index").click()
driver.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='Choice: Test_2'])[1]/following::input[5]").click()
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| 36.043269
| 117
| 0.656663
| 945
| 7,497
| 4.875132
| 0.129101
| 0.117213
| 0.199262
| 0.222705
| 0.880399
| 0.859562
| 0.83677
| 0.835034
| 0.835034
| 0.825917
| 0
| 0.003985
| 0.230225
| 7,497
| 208
| 118
| 36.043269
| 0.794316
| 0.022276
| 0
| 0.869565
| 0
| 0.012422
| 0.138877
| 0.046199
| 0
| 0
| 0
| 0
| 0.018634
| 1
| 0.111801
| false
| 0.037267
| 0.024845
| 0
| 0.248447
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
deb0fd1a9266b883920f2fb3c483e4a2249c666e
| 10,920
|
py
|
Python
|
data/archive/download_cwat_atmos.py
|
Skye777/transformer
|
177834bcb55e59f8ea0fbe666734c148effbec8d
|
[
"Apache-2.0"
] | null | null | null |
data/archive/download_cwat_atmos.py
|
Skye777/transformer
|
177834bcb55e59f8ea0fbe666734c148effbec8d
|
[
"Apache-2.0"
] | null | null | null |
data/archive/download_cwat_atmos.py
|
Skye777/transformer
|
177834bcb55e59f8ea0fbe666734c148effbec8d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#################################################################
# Python Script to retrieve 164 online Data files of 'ds131.2',
# total 3.02G. This script uses 'requests' to download data.
#
# Highlight this script by Select All, Copy and Paste it into a file;
# make the file executable and run it on command line.
#
# You need pass in your password as a parameter to execute
# this script; or you can set an environment variable RDAPSWD
# if your Operating System supports it.
#
# Contact rpconroy@ucar.edu (Riley Conroy) for further assistance.
#################################################################
import sys, os
import requests
def check_file_status(filepath, filesize):
sys.stdout.write('\r')
sys.stdout.flush()
size = int(os.stat(filepath).st_size)
percent_complete = (size / filesize) * 100
sys.stdout.write('%.3f %s' % (percent_complete, '% Completed'))
sys.stdout.flush()
# Try to get password
if len(sys.argv) < 2 and not 'RDAPSWD' in os.environ:
try:
import getpass
input = getpass.getpass
except:
try:
input = raw_input
except:
pass
pswd = input('Password: ')
else:
try:
pswd = sys.argv[1]
except:
pswd = os.environ['RDAPSWD']
url = 'https://rda.ucar.edu/cgi-bin/login'
values = {'email': '1811017@tongji.edu.cn', 'passwd': pswd, 'action': 'login'}
# Authenticate
ret = requests.post(url, data=values)
if ret.status_code != 200:
print('Bad Authentication')
print(ret.text)
exit(1)
dspath = 'https://rda.ucar.edu/data/ds131.2/'
filelist = [
'pgrbanl/pgrbanl_mean_1851_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1852_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1853_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1854_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1855_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1856_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1857_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1858_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1859_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1860_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1861_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1862_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1863_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1864_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1865_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1866_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1867_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1868_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1869_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1870_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1871_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1872_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1873_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1874_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1875_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1876_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1877_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1878_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1879_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1880_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1881_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1882_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1883_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1884_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1885_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1886_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1887_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1888_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1889_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1890_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1891_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1892_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1893_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1894_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1895_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1896_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1897_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1898_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1899_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1900_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1901_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1902_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1903_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1904_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1905_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1906_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1907_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1908_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1909_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1910_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1911_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1912_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1913_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1914_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1915_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1916_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1917_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1918_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1919_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1920_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1921_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1922_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1923_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1924_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1925_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1926_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1927_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1928_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1929_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1930_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1931_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1932_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1933_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1934_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1935_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1936_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1937_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1938_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1939_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1940_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1941_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1942_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1943_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1944_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1945_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1946_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1947_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1948_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1949_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1950_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1951_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1952_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1953_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1954_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1955_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1956_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1957_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1958_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1959_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1960_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1961_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1962_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1963_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1964_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1965_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1966_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1967_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1968_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1969_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1970_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1971_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1972_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1973_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1974_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1975_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1976_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1977_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1978_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1979_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1980_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1981_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1982_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1983_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1984_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1985_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1986_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1987_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1988_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1989_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1990_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1991_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1992_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1993_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1994_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1995_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1996_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1997_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1998_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_1999_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2000_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2001_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2002_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2003_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2004_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2005_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2006_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2007_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2008_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2009_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2010_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2011_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2012_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2013_CWAT_atmos-col.grib',
'pgrbanl/pgrbanl_mean_2014_CWAT_atmos-col.grib']
for file in filelist:
filename = dspath + file
file_base = '../meta-data/cwat/' + os.path.basename(file)
print('Downloading', file_base)
req = requests.get(filename, cookies=ret.cookies, allow_redirects=True, stream=True)
filesize = int(req.headers['Content-length'])
with open(file_base, 'wb') as outfile:
chunk_size = 1048576
for chunk in req.iter_content(chunk_size=chunk_size):
outfile.write(chunk)
if chunk_size < filesize:
check_file_status(file_base, filesize)
check_file_status(file_base, filesize)
print()
| 46.271186
| 88
| 0.757784
| 1,604
| 10,920
| 4.736284
| 0.198878
| 0.302225
| 0.388574
| 0.3454
| 0.738713
| 0.738713
| 0.738713
| 0
| 0
| 0
| 0
| 0.071992
| 0.117216
| 10,920
| 235
| 89
| 46.468085
| 0.716079
| 0.047161
| 0
| 0.047393
| 0
| 0
| 0.740474
| 0.721275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004739
| false
| 0.023697
| 0.014218
| 0
| 0.018957
| 0.018957
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
def23a372ae272d0f6c26d81ebf53f371871b669
| 136
|
py
|
Python
|
flask_gtts/__init__.py
|
mrf345/flask_gtts
|
70c572b431a2be25e46572f99acc2eb14ade9a5b
|
[
"MIT"
] | 9
|
2018-04-09T16:35:13.000Z
|
2021-05-05T16:39:27.000Z
|
flask_gtts/__init__.py
|
mrf345/flask_gtts
|
70c572b431a2be25e46572f99acc2eb14ade9a5b
|
[
"MIT"
] | 2
|
2018-06-14T07:04:53.000Z
|
2020-06-25T18:00:12.000Z
|
flask_gtts/__init__.py
|
mrf345/flask_gtts
|
70c572b431a2be25e46572f99acc2eb14ade9a5b
|
[
"MIT"
] | 1
|
2019-01-09T17:46:04.000Z
|
2019-01-09T17:46:04.000Z
|
from flask_gtts.main import gtts # noqa
from flask_gtts.about import (__license__, __version__, __author__, __doc__, __email__) # noqa
| 45.333333
| 94
| 0.801471
| 18
| 136
| 4.833333
| 0.666667
| 0.206897
| 0.298851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 136
| 2
| 95
| 68
| 0.731092
| 0.066176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7222268fe3dca6ae3c608469b772c758558949b2
| 65,412
|
py
|
Python
|
userbot/modules/gdrive.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/gdrive.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/gdrive.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#Encript Marshal By XVenom
#https://github.com/xvenom15
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00@\x00\x00\x00s\x98\x03\x00\x00d\x00Z\x00d\x01d\x02l\x01Z\x01d\x01d\x02l\x02Z\x02d\x01d\x02l\x03Z\x03d\x01d\x02l\x04Z\x04d\x01d\x02l\x05Z\x05d\x01d\x02l\x06Z\x06d\x01d\x02l\x07Z\x07d\x01d\x02l\x08Z\x08d\x01d\x02l\tZ\td\x01d\x02l\nZ\nd\x01d\x02l\x0bZ\x0bd\x01d\x02l\x0cm\r\x02\x00\x01\x00m\x0e\x02\x00\x01\x00m\x0fZ\x10\x01\x00d\x01d\x03l\x11m\x12Z\x12\x01\x00d\x01d\x04l\x13m\x14Z\x14m\x15Z\x15m\x16Z\x16\x01\x00d\x01d\x05l\x17m\x18Z\x18\x01\x00d\x01d\x06l\x19m\x1aZ\x1a\x01\x00d\x01d\x07l\x1bm\x1cZ\x1c\x01\x00d\x01d\x08l\x1dm\x1eZ\x1e\x01\x00d\x01d\tl\x1fm Z \x01\x00d\x01d\nl!m"Z"\x01\x00d\x01d\x0bl#m$Z$m%Z%\x01\x00d\x01d\x0cl&m\'Z\'m(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.\x01\x00d\x01d\rl/m0Z0\x01\x00d\x01d\x0el1m2Z2m3Z3m4Z4m5Z5\x01\x00d\x01d\x0fl6m7Z7\x01\x00d\x01d\x10l8m9Z9m:Z:\x01\x00d\x11Z;d\x12Z<d\x13d\x14g\x02Z=d\x15Z>e*Z?e?d\x02k\t\x90\x02r\x82d\x16e*k\x06\x90\x01r\x8ee.\xa0@d\x17\xa1\x01\x01\x00d\x02Z*z\x12e?\xa0Ad\x18\xa1\x01d\x19\x19\x00Z*W\x00n\xe0\x04\x00eBk\n\x90\x02r\x80\x01\x00\x01\x00\x01\x00z\x12e?\xa0Ad\x1a\xa1\x01d\x19\x19\x00Z*W\x00n\xb6\x04\x00eBk\n\x90\x02rz\x01\x00\x01\x00\x01\x00d\x1be?k\x06\x90\x01r\xf0e?\xa0Ad\x1c\xa1\x01d\x1d\x19\x00Z*n\x86z\x12e?\xa0Ad\x1e\xa1\x01d\x19\x19\x00Z*W\x00nr\x04\x00eBk\n\x90\x02rt\x01\x00\x01\x00\x01\x00eCeDeEjFe?\x83\x02\x83\x01\x90\x02r,d\x1fZGn\x04d ZGd!e?k\x06\x90\x02sDd"e?k\x06\x90\x02rJd\x1fZHn\x04d ZHd\x1feG\x90\x02pXeHf\x01k\x06\x90\x02rbn\x0ee.\xa0@d#\xa1\x01\x01\x00d\x02Z*Y\x00n\x02X\x00Y\x00n\x02X\x00Y\x00n\x02X\x00e\x0b\xa0Id$\xa1\x01ZJeJ\xa0Ke\x0bjL\xa1\x01\x01\x00e0d%d\x1fd&\x8d\x02d\'d(\x84\x00\x83\x01ZMd)d*\x84\x00ZNe0d+d\x1fd&\x8d\x02d,d-\x84\x00\x83\x01ZOd.d/\x84\x00ZPd0d1\x84\x00ZQdUd2d3\x84\x01ZRd4d5\x84\x00ZSd6d7\x84\x00ZTd8d9\x84\x00ZUd:d;\x84\x00ZVd<d=\x84\x00ZWd>d?\x84\x00ZXd@dA\x84\x00ZYe0dBd\x1fd&\x8d\x02dCdD\x84\x00\x83\x01ZZe0dEd\x1fd&\x8d\x02dFdG\x84\x00\x83\x01Z[e0dHd\x1fd&\x8d\x02dIdJ\x84\x00\x83\x01Z\\e0dKd\x1fd&\x8d\x02dLdM\x84\x00\x83\x01Z]e0dNd\x1fd&\x8d\x02dOdP\x84\x00\x83\x01Z^dQdR\x84\x00Z_e-\xa0`dSdTi\x01\xa1\x01\x01\x00d\x02S\x00)Vz&\n Google Drive manager for Userbot\n\xe9\x00\x00\x00\x00N)\x01\xda\rBeautifulSoup)\x03\xda\x06isfile\xda\x05isdir\xda\x04join\xa9\x01\xda\nguess_type)\x01\xda\x06events)\x01\xda\x10InstalledAppFlow)\x01\xda\x05build)\x01\xda\tHttpError)\x01\xda\x07Request)\x02\xda\x0fMediaFileUpload\xda\x13MediaIoBaseDownload)\x08\xda\x0cG_DRIVE_DATA\xda\x11G_DRIVE_CLIENT_ID\xda\x15G_DRIVE_CLIENT_SECRET\xda\x11G_DRIVE_FOLDER_ID\xda\rBOTLOG_CHATID\xda\x17TEMP_DOWNLOAD_DIRECTORY\xda\x08CMD_HELP\xda\x04LOGS)\x01\xda\x08register)\x04\xda\x08progress\xda\nhumanbytes\xda\x0etime_formatter\xda\x0ehuman_to_bytes)\x01\xda\rCancelProcess)\x02\xda\x05aria2\xda\x0echeck_metadataz)https://accounts.google.com/o/oauth2/authz#https://oauth2.googleapis.com/tokenz%https://www.googleapis.com/auth/drivez.https://www.googleapis.com/auth/drive.metadataz\x19urn:ietf:wg:oauth:2.0:oob\xfa\x06uc?id=z-G_DRIVE_FOLDER_ID is not a valid folderURL...\xfa\x08folders/\xe9\x01\x00\x00\x00\xfa\x08open?id=\xfa\x05/view\xfa\x01/\xe9\xfe\xff\xff\xff\xfa\x0efolderview?id=TF\xfa\x01-\xda\x01_z\'G_DRIVE_FOLDER_ID not a valid ID/URL...z\x19googleapiclient.discoveryz\x0f^.gdauth(?: |$))\x02Z\x07pattern\xda\x08outgoingc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00s\x06\x02\x00\x00t\x00\xa0\x01t\x02|\x00j\x03\x83\x01\xa1\x01d\x01k\trF|\x00\xa0\x04d\x02\xa1\x01I\x00d\x01H\x00\x01\x00t\x05\xa0\x06d\x03\xa1\x01I\x00d\x01H\x00\x01\x00|\x00\xa0\x07\xa1\x00I\x00d\x01H\x00\x01\x00d\x04S\x00t\x08d\x01k\tr\x88z\x0et\t\xa0\nt\x08\xa1\x01}\x01W\x00q\xbe\x04\x00t\tj\x0bk\nr\x84\x01\x00\x01\x00\x01\x00|\x00\xa0\x04d\x05\xa1\x01I\x00d\x01H\x00\x01\x00Y\x00d\x04S\x00X\x00n6t\x0cd\x01k\x08r\xact\rd\x01k\x08r\xac|\x00\xa0\x04d\x06\xa1\x01I\x00d\x01H\x00\x01\x00d\x04S\x00d\x07t\x0ct\rt\x0et\x0fd\x08\x9c\x04i\x01}\x01|\x00\xa0\x04d\t\xa1\x01I\x00d\x01H\x00\x01\x00t\x10j\x11|\x01t\x12t\x13d\n\x8d\x03}\x02|\x02j\x14d\x0bd\x0cd\r\x8d\x02\\\x02}\x03}\x04|\x00\xa0\x15d\x0e\xa1\x01I\x00d\x01H\x00}\x05|\x00j\x16\xa0\x17t\x18\xa1\x014\x00I\x00d\x01H\x00\x9a\xc2}\x06|\x06\xa0\x19d\x0f|\x03\x9b\x00d\x10\x9d\x03\xa1\x01I\x00d\x01H\x00}\x07|\x06\xa0\x1at\x1bj\x1cd\x11t\x18d\x12\x8d\x02\xa1\x01}\x08|\x08I\x00d\x01H\x00}\x08|\x08j\x1dj\x1d\xa0\x1e\xa1\x00}\t|\x02j\x1f|\td\x13\x8d\x01\x01\x00|\x02j }\nt\x05\xa0\x06d\x14\xa1\x01I\x00d\x01H\x00\x01\x00|\x00j\x16\xa0!|\x00j"|\x05j#\xa1\x02I\x00d\x01H\x00\x01\x00|\x00j\x16\xa0!t\x18|\x07j#|\x08j#g\x02\xa1\x02I\x00d\x01H\x00\x01\x00t$\xa0%t&\xa0\'|\n\xa1\x01\xa1\x01\xa0(\xa1\x00}\n|\x00\xa0\x04d\x15\xa1\x01I\x00d\x01H\x00\x01\x00W\x005\x00Q\x00I\x00d\x01H\x00R\x00X\x00t\x00\xa0)t\x02|\x00j\x03\x83\x01|\n\xa1\x02\x01\x00|\x00\xa0\x07\xa1\x00I\x00d\x01H\x00\x01\x00d\x01S\x00)\x16z% - Only generate once for long run - Nz!`You already authorized token...`g\x00\x00\x00\x00\x00\x00\xf8?Fz]`[AUTHENTICATE - ERROR]`\n\n`Status` : **BAD**\n`Reason` : **G_DRIVE_DATA** entity is not valid!z\x93`[AUTHENTICATE - ERROR]`\n\n`Status` : **BAD**\n`Reason` : please get your **G_DRIVE_DATA** [here](https://telegra.ph/How-To-Setup-Google-Drive-04-03)Z\tinstalled)\x04Z\tclient_idZ\rclient_secretZ\x08auth_uriZ\ttoken_uriz\x19`Creating credentials...`)\x01Z\x0credirect_uriZ\x07offlineZ\x07consent)\x02Z\x0baccess_type\xda\x06promptz2`Go to your BOTLOG group to authenticate token...`z\x17Please go to this URL:\nz\x1e\nauthorize then reply the codeT\xa9\x02r)\x00\x00\x00Z\x05chats)\x01\xda\x04code\xe7\x00\x00\x00\x00\x00\x00\x0c@z\x18`Credentials created...`)*\xda\x06helper\xda\x0fget_credentials\xda\x03str\xda\x07from_id\xda\x04edit\xda\x07asyncio\xda\x05sleep\xda\x06deleter\x0f\x00\x00\x00\xda\x04json\xda\x05loadsZ\x0fJSONDecodeErrorr\x10\x00\x00\x00r\x11\x00\x00\x00\xda\x0fGOOGLE_AUTH_URI\xda\x10GOOGLE_TOKEN_URIr\t\x00\x00\x00Z\x12from_client_config\xda\x06SCOPES\xda\x0cREDIRECT_URIZ\x11authorization_url\xda\x07respond\xda\x06client\xda\x0cconversationr\x13\x00\x00\x00\xda\x0csend_message\xda\nwait_eventr\x08\x00\x00\x00\xda\nNewMessage\xda\x07message\xda\x05stripZ\x0bfetch_token\xda\x0bcredentials\xda\x0fdelete_messages\xda\x07chat_id\xda\x02id\xda\x06base64\xda\tb64encode\xda\x06pickle\xda\x05dumps\xda\x06decode\xda\x10save_credentials)\x0b\xda\x06gdriveZ\x07configsZ\x04flowZ\x08auth_urlr(\x00\x00\x00\xda\x03msg\xda\x04convZ\x07url_msg\xda\x01rr,\x00\x00\x00\xda\x05creds\xa9\x00rS\x00\x00\x00\xda\x00\xda\x14generate_credentialsg\x00\x00\x00sr\x00\x00\x00\x00\x03\x14\x01\x10\x01\x10\x01\x0e\x01\x04\x02\x08\x01\x02\x01\x0e\x01\x10\x01\x04\x01\x02\xff\n\x05\n\x03\x10\x01\x04\x01\x02\xff\n\x06\x04\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\xfc\x04\xff\x04\x08\x10\x01\x04\x01\x02\x00\x02\x00\x02\xff\x06\x02\x04\x01\x02\x00\x02\xff\n\x02\x04\x01\x02\xff\n\x03\x16\x01\x04\x01\n\xff\n\x04\x04\x01\x0c\xff\x04\x02\n\x01\x0c\x01\x0c\x01\x06\x01\x10\x01\x18\x01\x1c\x02\x14\x01 \x01\x12\x01\x0e\x01rU\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x08\x00\x00\x00\xc3\x00\x00\x00s\xae\x00\x00\x00t\x00\xa0\x01t\x02|\x00j\x03\x83\x01\xa1\x01}\x01|\x01d\x01k\tr,t\x04\xa0\x05t\x06\xa0\x07|\x01\xa0\x08\xa1\x00\xa1\x01\xa1\x01}\x01|\x01r6|\x01j\ts\x9a|\x01r\x86|\x01j\nr\x86|\x01j\x0br\x86|\x00\xa0\x0cd\x02\xa1\x01I\x00d\x01H\x00\x01\x00|\x01\xa0\rt\x0e\x83\x00\xa1\x01\x01\x00t\x00\xa0\x0ft\x02|\x00j\x03\x83\x01t\x06\xa0\x10t\x04\xa0\x11|\x01\xa1\x01\xa1\x01\xa0\x12\xa1\x00\xa1\x02\x01\x00n\x14|\x00\xa0\x0cd\x03\xa1\x01I\x00d\x01H\x00\x01\x00d\x04S\x00t\x13d\x05d\x06|\x01d\x04d\x07\x8d\x04}\x02|\x02S\x00)\x08z% - Create google drive service app - Nz\x1b`Refreshing credentials...`z-`Credentials is empty, please generate it...`F\xda\x05driveZ\x02v3)\x02rD\x00\x00\x00Z\x0fcache_discovery)\x14r.\x00\x00\x00r/\x00\x00\x00r0\x00\x00\x00r1\x00\x00\x00rJ\x00\x00\x00r7\x00\x00\x00rH\x00\x00\x00Z\tb64decode\xda\x06encodeZ\x05validZ\x07expiredZ\rrefresh_tokenr2\x00\x00\x00Z\x07refreshr\x0c\x00\x00\x00rM\x00\x00\x00rI\x00\x00\x00rK\x00\x00\x00rL\x00\x00\x00r\n\x00\x00\x00)\x03rN\x00\x00\x00rR\x00\x00\x00\xda\x07servicerS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\ncreate_app\xaa\x00\x00\x00s \x00\x00\x00\x00\x02\x10\x01\x08\x02\x04\x01\x0c\xff\x04\x02\n\x01\x10\x01\x10\x02\x0c\x01\x0c\x01\x12\xff\x06\x03\x10\x01\x04\x01\x10\x01rY\x00\x00\x00z\x10^.gdreset(?: |$)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00\xc3\x00\x00\x00sR\x00\x00\x00|\x00\xa0\x00d\x01\xa1\x01I\x00d\x02H\x00\x01\x00t\x01\xa0\x02t\x03|\x00j\x04\x83\x01\xa1\x01\x01\x00|\x00\xa0\x00d\x03\xa1\x01I\x00d\x02H\x00\x01\x00t\x05\xa0\x06d\x04\xa1\x01I\x00d\x02H\x00\x01\x00|\x00\xa0\x07\xa1\x00I\x00d\x02H\x00\x01\x00d\x02S\x00)\x05z) - Reset credentials or change account - z\x1a`Resetting information...`Nz\t`Done...`r!\x00\x00\x00)\x08r2\x00\x00\x00r.\x00\x00\x00Z\x11clear_credentialsr0\x00\x00\x00r1\x00\x00\x00r3\x00\x00\x00r4\x00\x00\x00r5\x00\x00\x00)\x01rN\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x11reset_credentials\xbf\x00\x00\x00s\x0c\x00\x00\x00\x00\x03\x10\x01\x10\x01\x10\x01\x10\x01\x0e\x01rZ\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00\xc3\x00\x00\x00s\x0e\x00\x00\x00|\x00\xa0\x00d\x01\xa1\x01d\x02\x19\x00S\x00)\x03z" - Get file_name from file_path - r$\x00\x00\x00\xe9\xff\xff\xff\xff)\x01\xda\x05split)\x01\xda\tfile_pathrS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x0cget_raw_name\xca\x00\x00\x00s\x02\x00\x00\x00\x00\x02r^\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\xc3\x00\x00\x00s\x18\x00\x00\x00t\x00|\x00\x83\x01d\x01\x19\x00}\x01|\x01s\x14d\x02}\x01|\x01S\x00)\x03z\x1f - Check mimeType given file - r\x01\x00\x00\x00z\ntext/plainr\x06\x00\x00\x00\xa9\x02\xda\x04name\xda\x08mimeTyperS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x0cget_mimeType\xcf\x00\x00\x00s\x08\x00\x00\x00\x00\x02\x0c\x01\x04\x01\x04\x01rb\x00\x00\x00c\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x0c\x00\x00\x00\x83\x00\x00\x00sF\x03\x00\x00d\x01}\x03t\x00t\x01\x83\x01s\x1at\x02\xa0\x03t\x01\xa1\x01\x01\x00d\x00}\x04|\x02r\xf2t\x02\xa0\x04\xa1\x00t\x01\xa0\x05d\x02\xa1\x01\x17\x00}\x05t\x06|\x02\x83\x01rZ|\x02\xa0\x07d\x03\xa1\x01rZt\x08j\t|\x02d\x00d\x04|\x05i\x01d\x00d\x05\x8d\x04}\x06n\x1a|\x02g\x01}\x02t\x08j\n|\x02d\x04|\x05i\x01d\x00d\x06\x8d\x03}\x06|\x06j\x0b}\x07t\x0c\x88\x01|\x07d\x00d\x07\x8d\x03I\x00d\x00H\x00\x01\x00t\x08\xa0\r|\x07\xa1\x01}\x08|\x08j\x0e}\t|\x08j\x0fr\xc6t\x10|\x07\x83\x01I\x00d\x00H\x00}\nt\x0c\x88\x01|\nd\x00d\x07\x8d\x03I\x00d\x00H\x00\x01\x00z\x0ct\x01t\x11\x17\x00}\x04W\x00n\x1c\x04\x00t\x12k\nr\xee\x01\x00\x01\x00\x01\x00t\x01|\t\x17\x00}\x04Y\x00n\x02X\x00ndz<t\x13\xa0\x13\xa1\x00\x89\x00d\x08a\x14\x88\x01j\x15j\x16\x88\x01\xa0\x17\xa1\x00I\x00d\x00H\x00t\x01\x87\x00\x87\x01f\x02d\td\n\x84\x08d\x0b\x8d\x03I\x00d\x00H\x00}\x0bW\x00n"\x04\x00t\x18k\n\x90\x01rP\x01\x00\x01\x00\x01\x00|\x03d\x0c7\x00}\x03|\x03\x06\x00Y\x00S\x00X\x00|\x0b}\x04z\x12t\x19|\x04\x83\x01I\x00d\x00H\x00}\x0cW\x00n"\x04\x00t\x1ak\n\x90\x01r\x8a\x01\x00\x01\x00\x01\x00|\x03d\r7\x00}\x03|\x03\x06\x00Y\x00S\x00X\x00t\x1b|\x04\x83\x01I\x00d\x00H\x00}\r\x90\x01zRd\x0e}\x0et\x06|\x04\x83\x01\x90\x02r(z\x1at\x1c\x88\x01|\x01|\x04|\x0c|\r\x83\x05I\x00d\x00H\x00}\x0fW\x00n$\x04\x00t\x18k\n\x90\x01r\xea\x01\x00\x01\x00\x01\x00|\x03d\x0c7\x00}\x03|\x03\x06\x00Y\x00W\x00S\x00X\x00|\x03d\x0f|\x0e\x9b\x00d\x10|\x0c\x9b\x00d\x11t\x1d|\x0fd\x12\x19\x00\x83\x01\x9b\x00d\x13|\x0c\x9b\x00d\x14|\x0fd\x15\x19\x00\x9b\x00d\x16\x9d\x0b7\x00}\x03|\x03W\x00S\x00n\xc4|\x0e\xa0\x1ed\x17d\x18\xa1\x02}\x0et\x1f|\x01|\x0c\x83\x02I\x00d\x00H\x00}\x10|\x10\xa0 d\x19\xa1\x01a!d\x1at!\x17\x00}\x11z\x16t"\x88\x01|\x01|\x04\x83\x03I\x00d\x00H\x00\x01\x00W\x00nP\x04\x00t\x18k\n\x90\x02r\x9c\x01\x00\x01\x00\x01\x00|\x03d\x1b7\x00}\x03t#\x83\x00I\x00d\x00H\x00\x01\x00|\x03\x06\x00Y\x00W\x00S\x00\x04\x00t\x12k\n\x90\x02r\xbc\x01\x00\x01\x00\x01\x00t#\x83\x00I\x00d\x00H\x00\x01\x00Y\x00n0X\x00|\x03d\x0f|\x0e\x9b\x00d\x1c|\x0c\x9b\x00d\x14|\x11\x9b\x00d\x1d\x9d\x077\x00}\x03t#\x83\x00I\x00d\x00H\x00\x01\x00|\x03W\x00S\x00W\x00nR\x04\x00t\x12k\n\x90\x03r@\x01\x00}\x12\x01\x00z2|\x0e\xa0\x1ed\x1ed\x1f\xa1\x02}\x0e|\x03d\x0f|\x0e\x9b\x00d t$|\x12\x83\x01\x9b\x00d!\x9d\x057\x00}\x03|\x03\x06\x00W\x00Y\x00\xa2\x02S\x00d\x00}\x12~\x12X\x00Y\x00n\x02X\x00d\x00S\x00)"NrT\x00\x00\x00\xda\x01.\xfa\x08.torrent\xda\x03dir)\x03Z\x04uris\xda\x07options\xda\x08position)\x02rf\x00\x00\x00rg\x00\x00\x00)\x01\xda\x08previousFc\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\n\x00\x00\x00\x13\x00\x00\x00s\x1e\x00\x00\x00t\x00\xa0\x01\xa1\x00\xa0\x02t\x03|\x00|\x01\x88\x01\x88\x00d\x01t\x04d\x02\x8d\x06\xa1\x01S\x00)\x03Nz\x11[FILE - DOWNLOAD])\x01\xda\x0cis_cancelled)\x05r3\x00\x00\x00Z\x0eget_event_loopZ\x0bcreate_taskr\x18\x00\x00\x00ri\x00\x00\x00)\x02\xda\x01d\xda\x01t\xa9\x02\xda\x0ccurrent_timerN\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x08<lambda>\xfe\x00\x00\x00s\n\x00\x00\x00\x08\x01\n\x01\x02\x01\x02\xfe\x04\xffz\x1adownload.<locals>.<lambda>)\x01Z\x11progress_callback\xfaD`[FILE - CANCELLED]`\n\n`Status` : **OK** - received signal cancelled.z&`[ENTRY - ERROR]`\n\n`Status` : **BAD**\nz\x0f[FILE - UPLOAD]\xfa\x01`z\x0f`\n\n`Name :` `\xfa\x0e`\n`Size :` `r\x01\x00\x00\x00\xfa\x0e`\n`Link :` [\xfa\x02](r!\x00\x00\x00z.)\n`Status :` **OK** - Successfully uploaded.\n\nz\x05[FILEz\x07[FOLDERrG\x00\x00\x00\xfa\'https://drive.google.com/drive/folders/\xfaF`[FOLDER - CANCELLED]`\n\n`Status` : **OK** - received signal cancelled.z\x04`\n\n[\xfa.)\n`Status` : **OK** - Successfully uploaded.\n\nz\tDOWNLOAD]\xfa\x06ERROR]z%`\n\n`Status` : **failed**\n`Reason` : `\xfa\x03`\n\n)%r\x04\x00\x00\x00r\x14\x00\x00\x00\xda\x02os\xda\x08makedirs\xda\x06getcwdrC\x00\x00\x00r\x03\x00\x00\x00\xda\x08endswithr\x1d\x00\x00\x00Z\x0badd_torrentZ\x08add_uris\xda\x03gid\xda\x15check_progress_for_dl\xda\x0cget_downloadr`\x00\x00\x00Z\x0ffollowed_by_idsr\x1e\x00\x00\x00\xda\tfilenames\xda\tException\xda\x04timeri\x00\x00\x00r=\x00\x00\x00Z\x0edownload_mediaZ\x11get_reply_messager\x1c\x00\x00\x00r^\x00\x00\x00\xda\x0eAttributeErrorrb\x00\x00\x00\xda\x06uploadr\x19\x00\x00\x00\xda\x07replace\xda\ncreate_dir\xda\x03get\xda\tparent_Id\xda\x0etask_directory\xda\x0ereset_parentIdr0\x00\x00\x00)\x13rN\x00\x00\x00rX\x00\x00\x00\xda\x03uri\xda\x05replyZ\x12required_file_name\xda\tfull_path\xda\tdownloadsr}\x00\x00\x00\xda\x04file\xda\x08filenameZ\x07new_gidZ\x14downloaded_file_name\xda\tfile_namera\x00\x00\x00\xda\x06status\xda\x06result\xda\x06folder\xda\nwebViewURL\xda\x01erS\x00\x00\x00rl\x00\x00\x00rT\x00\x00\x00\xda\x08download\xd7\x00\x00\x00s\xca\x00\x00\x00\x00\x02\x04\x02\x08\x01\n\x01\x04\x01\x04\x01\x12\x01\x12\x01\x04\x01\x02\x01\x02\x01\x06\x01\x02\xfc\x08\x06\x06\x01\x04\x01\x02\x01\x06\x01\x02\xfd\x06\x04\x06\x01\x14\x01\n\x01\x06\x01\x06\x01\x0e\x01\x14\x01\x02\x01\x0c\x01\x0e\x01\x10\x02\x02\x01\x08\x01\x04\x01\x06\x01\x0c\x01\x02\x01\x0c\xfd\x10\x07\x10\x01\x02\x01\x02\xff\x04\x04\n\x02\x04\x01\x02\x01\x12\x01\x10\x01\x02\x01\x02\xff\x04\x04\n\x01\x0e\x01\x04\x01\x04\x01\n\x01\x02\x01\x02\x01\x02\x00\x02\x00\x02\x00\x02\x00\x02\xff\x0e\x02\x10\x01\x02\x01\x02\xff\x04\x04\x0c\x02\x02\x01.\xff\x04\x07\x08\x02\x0c\x02\x10\x01\n\x02\x02\x01\x02\xff\x02\xff\x02\x04\x02\x01\x16\x01\x10\x01\x02\x01\x02\xff\x04\x04\x0c\x01\n\x01\x10\x01\x12\x02\x02\x01\x16\xff\x04\x05\x0c\x01\n\x01\x12\x01\x0c\x01\x02\x01\x14\xff\x04\x05\x1a\x01r\x97\x00\x00\x00c\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00*\x00\x00\x00\x12\x00\x00\x00\xc3\x00\x00\x00s|\x07\x00\x00d\x01}\x03t\x00t\x01\x83\x01s\x16t\x02\xa0\x03t\x01\xa1\x01\x01\x00d\x02|\x02k\x06r.|\x02\xa0\x04d\x02\xa1\x01d\x03\x19\x00}\x02n\x1ed\x04|\x02k\x06rLd\x05|\x02k\x06rL|\x02\xa0\x04d\x06\xa1\x01d\x03\x19\x00}\x02z\x12|\x02\xa0\x04d\x07\xa1\x01d\x08\x19\x00}\x04W\x00n\x8a\x04\x00t\x05k\nr\xe8\x01\x00\x01\x00\x01\x00z\x12|\x02\xa0\x04d\t\xa1\x01d\x08\x19\x00}\x04W\x00nb\x04\x00t\x05k\nr\xe2\x01\x00\x01\x00\x01\x00d\x05|\x02k\x06r\xa8|\x02\xa0\x04d\n\xa1\x01d\x0b\x19\x00}\x04n6z\x1c|\x02\xa0\x04d\x0c\xa1\x01d\x08\x19\x00\xa0\x04d\r\xa1\x01d\x08\x19\x00}\x04W\x00n\x18\x04\x00t\x05k\nr\xdc\x01\x00\x01\x00\x01\x00|\x02}\x04Y\x00n\x02X\x00Y\x00n\x02X\x00Y\x00n\x02X\x00z\x16t\x06|\x01|\x04\x83\x02I\x00d\x00H\x00}\x05W\x00\x90\x03n\x1c\x04\x00t\x07k\n\x90\x04r\x1c\x01\x00}\x06\x01\x00\x90\x02z\xf8d\x0et\x08|\x06\x83\x01k\x06\x90\x04r\nd\x0f}\x07|\x07\x9b\x00d\x10|\x04\x9b\x00\x9d\x03}\x08t\t\xa0\n\xa1\x00}\t|\tj\x0b|\x08d\x11d\x12\x8d\x02}\nz\x0e|\nj\x0cd\x13\x19\x00\x01\x00W\x00n\xf6\x04\x00t\rk\n\x90\x02rP\x01\x00\x01\x00\x01\x00t\x0e|\nj\x0fd\x14\x83\x02}\x0bz\x1e|\x07|\x0b\xa0\x10d\x15d\x16d\x17i\x01\xa1\x02\xa0\x0bd\x18\xa1\x01\x17\x00}\x0cW\x00n\x82\x04\x00t\x11k\n\x90\x02r\x18\x01\x00\x01\x00\x01\x00z,|\x0b\xa0\x10d\x19d\x1ad\x1bi\x01\xa1\x02j\x12d\x1c\x17\x00|\x0b\xa0\x10d\x19d\x1ad\x1di\x01\xa1\x02j\x12\x17\x00}\rW\x00n\x1e\x04\x00t\x13k\n\x90\x01r\xf2\x01\x00\x01\x00\x01\x00|\x03d\x1e7\x00}\x03Y\x00n\x10X\x00|\x03d\x1f|\r\x9b\x00\x9d\x027\x00}\x03|\x03\x06\x00Y\x00\x06\x00Y\x00\x06\x00W\x00Y\x00\x90\x01\xa2\xf8S\x00X\x00|\tj\x0b|\x0cd\x11d\x12\x8d\x02}\nt\x14|\x0b\xa0\x10d d\x1ad!i\x01\xa1\x02j\x12\xa0\x04\xa1\x00d"\x19\x00\xa0\x15d#\xa1\x01\x83\x01}\x0eY\x00n\x10X\x00t\x16|\nj\x0cd$\x19\x00\x83\x01}\x0et\x17\xa0\x18d%|\nj\x0cd\x13\x19\x00\xa1\x02\xa0\x19d\x08\xa1\x01}\x0ft\x01|\x0f\x17\x00}\x10t\x1a\xa0\x1b|\x10d&\xa1\x02\x90\x01\x8fv}\x11d\x00}\x12t\x1c\xa0\x1c\xa1\x00}\x13d\x00}\x14d\x11}\x15d\'a\x1d|\n\xa0\x1e|\x12\xa1\x01D\x00\x90\x01]J}\x16t\x1dd\x11k\x08\x90\x02r\xc6t\x1f\x82\x01|\x16\x90\x02s\xd2\x01\x00\x90\x04q\x00t\x1c\xa0\x1c\xa1\x00|\x13\x18\x00}\x17|\x15d\x11k\x08\x90\x02r\xf6t |\x16\x83\x01}\x18d\'}\x15n\x0c|\x18t |\x16\x83\x017\x00}\x18|\x18|\x0e\x1b\x00d(\x14\x00}\x19t!|\x18|\x17\x1b\x00d)\x83\x02}\x1at!|\x0e|\x18\x18\x00|\x1a\x1b\x00\x83\x01}\x1bd*\xa0"d\x01\xa0#d+d,\x84\x00t$t%\xa0&|\x19d-\x1b\x00\xa1\x01\x83\x01D\x00\x83\x01\xa1\x01d\x01\xa0#d.d,\x84\x00t$d-t%\xa0&|\x19d-\x1b\x00\xa1\x01\x18\x00\x83\x01D\x00\x83\x01\xa1\x01t!|\x19d)\x83\x02\xa1\x03}\x1cd/|\x0f\x9b\x00d0|\x1c\x9b\x00d1t\'|\x18\x83\x01\x9b\x00d2t\'|\x0e\x83\x01\x9b\x00d3t\'|\x1a\x83\x01\x9b\x00d4t(|\x1b\x83\x01\x9b\x00\x9d\x0c}\x1dt!|\x17d5\x16\x00\x83\x01d\x03k\x02\x90\x03r\xd4|\x14|\x1dk\x03\x90\x03s\xde|\x18|\x0ek\x02\x90\x03r\xf2|\x00\xa0)|\x1d\xa1\x01I\x00d\x00H\x00\x01\x00|\x1d}\x14|\x11\xa0*|\x16\xa1\x01\x01\x00\x90\x02q\xb2W\x005\x00Q\x00R\x00X\x00W\x005\x00d\x00}\x06~\x06X\x00Y\x00\x90\x01n\xbcX\x00|\x05\xa0\x0bd6\xa1\x01}\x0f|\x05\xa0\x0bd7\xa1\x01}\x1e|\x1ed8k\x02\x90\x04rP|\x00\xa0)d9\xa1\x01I\x00d\x00H\x00\x01\x00d\'S\x00t\x01|\x0f\x17\x00}\x10|\x01\xa0+\xa1\x00j,|\x04d\x11d:\x8d\x02}\x1ft\x1a\xa0\x1b|\x10d&\xa1\x02\x90\x01\x8fZ} t-| |\x1f\x83\x02}!d\'}"d\'a\x1dt\x1c\xa0\x1c\xa1\x00}\x13d\x00}\x14|"d\'k\x08\x90\x05r\xcet\x1dd\x11k\x08\x90\x04r\xb0t\x1f\x82\x01|!\xa0.\xa1\x00\\\x02}#}"|#\x90\x04r\x98|#j/}\x0et\x1c\xa0\x1c\xa1\x00|\x13\x18\x00}\x17|#j0}\x18|\x18|\x0e\x1b\x00d(\x14\x00}\x19t!|\x18|\x17\x1b\x00d)\x83\x02}\x1at!|\x0e|\x18\x18\x00|\x1a\x1b\x00\x83\x01}\x1bd*\xa0"d\x01\xa0#d;d,\x84\x00t$t%\xa0&|\x19d-\x1b\x00\xa1\x01\x83\x01D\x00\x83\x01\xa1\x01d\x01\xa0#d<d,\x84\x00t$d-t%\xa0&|\x19d-\x1b\x00\xa1\x01\x18\x00\x83\x01D\x00\x83\x01\xa1\x01t!|\x19d)\x83\x02\xa1\x03}\x1cd/|\x0f\x9b\x00d0|\x1c\x9b\x00d1t\'|\x18\x83\x01\x9b\x00d2t\'|\x0e\x83\x01\x9b\x00d3t\'|\x1a\x83\x01\x9b\x00d4t(|\x1b\x83\x01\x9b\x00\x9d\x0c}\x1dt!|\x17d5\x16\x00\x83\x01d\x03k\x02\x90\x05r\xac|\x14|\x1dk\x03\x90\x05s\xb6|\x18|\x0ek\x02\x90\x04r\x98|\x00\xa0)|\x1d\xa1\x01I\x00d\x00H\x00\x01\x00|\x1d}\x14\x90\x04q\x98W\x005\x00Q\x00R\x00X\x00|\x00\xa0)d=|\x0f\x9b\x00d>t\'|\x0e\x83\x01\x9b\x00d?|\x10\x9b\x00d@\x9d\x07\xa1\x01I\x00d\x00H\x00\x01\x00|\x00\xa01dA\xa1\x01I\x00d\x00H\x00}$|\x00j2\xa03t4\xa1\x014\x00I\x00d\x00H\x00\x9a\xa4}%|%\xa05dB\xa1\x01I\x00d\x00H\x00}&z"|%\xa06t7j8d\x11t4dC\x8d\x02\xa1\x01}\'|\'I\x00d\x00H\x00}\'W\x00n\x1a\x04\x00t\x13k\n\x90\x06rr\x01\x00\x01\x00\x01\x00dD}(Y\x00n$X\x00|\'j9j9\xa0\x15\xa1\x00}(|\x00j2\xa0:t4|\'j;\xa1\x02I\x00d\x00H\x00\x01\x00|\x00j2\xa0:|\x00j<|$j;\xa1\x02I\x00d\x00H\x00\x01\x00|\x00j2\xa0:t4|&j;\xa1\x02I\x00d\x00H\x00\x01\x00W\x005\x00Q\x00I\x00d\x00H\x00R\x00X\x00|(\xa0=\xa1\x00dDk\x02\x90\x06r\xe6|\x03S\x00|(\xa0=\xa1\x00dEk\x02\x90\x07r`z\x1at>|\x00|\x01|\x10|\x0f|\x1e\x83\x05I\x00d\x00H\x00})W\x00n\x1e\x04\x00t\x1fk\n\x90\x07r,\x01\x00\x01\x00\x01\x00|\x03dF7\x00}\x03Y\x00n0X\x00|\x03dG|\x0f\x9b\x00d>t\'|)d\x03\x19\x00\x83\x01\x9b\x00dH|\x0f\x9b\x00dI|)d\x08\x19\x00\x9b\x00dJ\x9d\t7\x00}\x03|\x03S\x00|\x00j2\xa05t4dK\xa1\x02I\x00d\x00H\x00\x01\x00|\x03S\x00d\x00S\x00)LNrT\x00\x00\x00z\x10&export=downloadr\x01\x00\x00\x00z\x07file/d/r#\x00\x00\x00z\r?usp=drivesdkr\x1f\x00\x00\x00r!\x00\x00\x00r"\x00\x00\x00r$\x00\x00\x00r%\x00\x00\x00z\x1buc?export=download&confirm=z\x03id=Z\x03404z\x18https://drive.google.comz\x17/uc?export=download&id=T)\x01\xda\x06streamz\x13Content-DispositionZ\x04lxml\xda\x01arG\x00\x00\x00z\x10uc-download-linkZ\x04href\xda\x01pZ\x05classz\x10uc-error-caption\xda\x01\nz\x13uc-error-subcaptionzS`[FILE - ERROR]`\n\n`Status` : **BAD** - failed to download.\n`Reason` : uncaught err.zF`[FILE - ERROR]`\n\n`Status` : **BAD** - failed to download.\n`Reason` : \xda\x04spanz\x0cuc-name-sizer[\x00\x00\x00z\x02()z\x0eContent-Lengthz\x0ffilename="(.*)"\xda\x02wbF\xe9d\x00\x00\x00\xe9\x02\x00\x00\x00z\x1f`Downloading` | [{0}{1}] `{2}%`c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00\xa9\x01u\x03\x00\x00\x00\xe2\x96\xa0rS\x00\x00\x00\xa9\x02\xda\x02.0\xda\x01irS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\n<listcomp>\xb0\x01\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00z#download_gdrive.<locals>.<listcomp>\xe9\n\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00\xa9\x01u\x03\x00\x00\x00\xe2\x96\xa8rS\x00\x00\x00r\xa1\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xa4\x00\x00\x00\xb2\x01\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00z\x16`[FILE - DOWNLOAD]`\n\n`\xfa\x0b`\n`Status`\n\xfa\x02\n`\xfa\x04 of \xfa\x03 @ \xfa\x0b`\n`ETA` -> \xe7\x00\x00\x00\x00\x00\x00.@r`\x00\x00\x00ra\x00\x00\x00\xfa"application/vnd.google-apps.folderz*`Aborting, folder download not support...`\xa9\x02\xda\x06fileId\xda\x11supportsAllDrivesc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00r\xa0\x00\x00\x00rS\x00\x00\x00r\xa1\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xa4\x00\x00\x00\xe0\x01\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00r\xa6\x00\x00\x00rS\x00\x00\x00r\xa1\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xa4\x00\x00\x00\xe2\x01\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00z!`[FILE - DOWNLOAD]`\n\n`Name :` `rq\x00\x00\x00z\x0e`\n`Path :` `z.`\n`Status :` **OK** - Successfully downloaded.z*`Answer the question in your BOTLOG group`z\x1f`Proceed with mirroring? [y/N]`r+\x00\x00\x00\xda\x01N\xda\x01Yro\x00\x00\x00\xfa\x1f`[FILE - UPLOAD]`\n\n`Name :` `rr\x00\x00\x00rs\x00\x00\x00z\x15)\n`Status :` **OK**\n\nz#`Invalid answer type [Y/N] only...`)?r\x04\x00\x00\x00r\x14\x00\x00\x00ry\x00\x00\x00\xda\x05mkdirr\\\x00\x00\x00\xda\nIndexError\xda\x0fget_informationr\x0b\x00\x00\x00r0\x00\x00\x00\xda\x08requests\xda\x07sessionr\x87\x00\x00\x00Z\x07headers\xda\x08KeyErrorr\x02\x00\x00\x00Z\x07content\xda\x04findr\x83\x00\x00\x00\xda\x04textr\x81\x00\x00\x00r\x1b\x00\x00\x00rC\x00\x00\x00\xda\x03int\xda\x02re\xda\x06search\xda\x05group\xda\x02io\xda\x06FileIOr\x82\x00\x00\x00ri\x00\x00\x00Z\x0citer_contentr\x1c\x00\x00\x00\xda\x03len\xda\x05round\xda\x06formatr\x05\x00\x00\x00\xda\x05range\xda\x04math\xda\x05floorr\x19\x00\x00\x00r\x1a\x00\x00\x00r2\x00\x00\x00\xda\x05write\xda\x05filesZ\tget_mediar\x0e\x00\x00\x00\xda\nnext_chunk\xda\ntotal_size\xda\x12resumable_progressr<\x00\x00\x00r=\x00\x00\x00r>\x00\x00\x00r\x13\x00\x00\x00r?\x00\x00\x00r@\x00\x00\x00r\x08\x00\x00\x00rA\x00\x00\x00rB\x00\x00\x00rE\x00\x00\x00rG\x00\x00\x00rF\x00\x00\x00\xda\ncapitalizer\x84\x00\x00\x00)*rN\x00\x00\x00rX\x00\x00\x00r\x8b\x00\x00\x00r\x8c\x00\x00\x00Z\x07file_Idr\x8f\x00\x00\x00r\x96\x00\x00\x00rV\x00\x00\x00Z\x03urlr\xb8\x00\x00\x00r\x97\x00\x00\x00Z\x04pageZ\x06export\xda\x05error\xda\tfile_sizer\x91\x00\x00\x00r]\x00\x00\x00r\xc9\x00\x00\x00Z\nCHUNK_SIZErm\x00\x00\x00\xda\x0fdisplay_messageZ\x05firstZ\x05chunk\xda\x04diff\xda\ndownloaded\xda\npercentage\xda\x05speed\xda\x03eta\xda\x08prog_str\xda\x0fcurrent_messagera\x00\x00\x00Z\x07requestZ\x02dfZ\ndownloader\xda\x08completer\x92\x00\x00\x00rO\x00\x00\x00rP\x00\x00\x00Z\x03askrQ\x00\x00\x00Z\x03ansr\x93\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x0fdownload_gdriveO\x01\x00\x00s\xa0\x01\x00\x00\x00\x01\x04\x03\x08\x01\n\x01\x08\x01\x10\x01\x10\x01\x0e\x01\x02\x01\x12\x01\x0e\x01\x02\x01\x12\x01\x0e\x01\x08\x01\x10\x02\x02\x01\x08\x01\x02\xff\x04\x01\x02\xff\x02\x01\x02\xff\x08\x02\x0e\x02\x16\x01\x02\x01\x16\x01\x14\x01\x0e\x01\x04\x01\x0e\x02\x08\x01\x0e\x02\x02\x01\x0e\x01\x10\x01\x0c\x01\x02\x01\x12\x01\x02\xff\n\x02\x10\x01\x02\x02\x10\x01\x02\xff\x02\x02\x10\xfe\x02\xff\x06\x06\x10\x01\x02\x01\x02\xff\n\x06\x02\x01\x08\xff\x04\x05\x18\x01\x0e\x01\x02\x01\x14\x01\x02\xff\x04\x01\x02\xff\x02\xff\n\x04\x0e\x02\x04\x01\x02\x00\x08\xff\x04\x02\x02\xfe\x04\x03\x08\x01\x10\x01\x04\x01\x08\x01\x04\x01\x04\x01\x04\x01\x10\x01\n\x01\x04\x02\x06\x01\x06\x02\x0c\x01\n\x01\x08\x01\x06\x02\x0c\x01\x0c\x01\x0e\x01\x10\x01\x04\x01\x0c\x01\x0c\xff\x08\x02\x0c\x01\x10\xff\x08\x02\x08\xfb\x04\x076\xff\x02\x08\x02\x01\x06\xff\x02\x01\x02\xff\x06\x01\x02\x01\x02\xff\x02\xff\x04\x03\x06\xfd\x04\x04\x10\x01\x04\x01,\x02\n\x01\n\x01\n\x01\x10\x01\x04\x01\x08\x01\n\x01\x02\xff\x06\x02\x10\x01\n\x01\x04\x01\x04\x01\x08\x01\x04\x01\n\x01\n\x01\x04\x02\x0c\x01\x06\x01\x06\x01\x0c\x01\x06\x01\x0c\x01\x0e\x01\x10\x01\x04\x01\x0c\x01\x0c\xff\x08\x02\x0c\x01\x10\xff\x08\x02\x08\xfb\x04\x076\xff\x02\x08\x02\x01\x06\xff\x02\x01\x02\xff\x06\x01\x02\x01\x02\xff\x02\xff\x04\x03\x06\xfd\x04\x04\x10\x01\x12\x01\x04\x01\x1a\xff\n\x07\x10\x01\x16\x01\x10\x01\x02\x01\x04\x01\x0c\xff\x04\x02\x0e\x01\x10\x01\n\x02\x0c\x01\x16\x01\x18\x01&\x01\x0e\x01\x04\x01\x0e\x01\x02\x01\x02\x01\x02\x00\x02\x00\x02\x00\x02\x00\x02\xff\x0e\x02\x10\x01\x02\x01\x02\xff\n\x05\x02\x01(\xff\x04\x07\x04\x02\x06\x01\x02\x01\x02\xfe\n\x04r\xd9\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00sz\x00\x00\x00d\x01d\x02d\x03\x9c\x02}\x02z\x1a|\x00\xa0\x00\xa1\x00j\x01|\x01|\x02d\x04\x8d\x02\xa0\x02\xa1\x00\x01\x00W\x00nP\x04\x00t\x03k\nrt\x01\x00}\x03\x01\x00z2d\x05|\x01\x9b\x00d\x06\x9d\x03t\x04|\x03\x83\x01k\x06sVd\x07t\x04|\x03\x83\x01k\x06r`W\x00Y\x00\xa2\x0cd\x00S\x00|\x03\x82\x01W\x005\x00d\x00}\x03~\x03X\x00Y\x00n\x02X\x00d\x00S\x00)\x08N\xda\x06readerZ\x06anyone)\x02Z\x04role\xda\x04type)\x02r\xaf\x00\x00\x00\xda\x04bodyz\x11"File not found: z\x02."zB"Sharing folders that are inside a shared drive is not supported.")\x05Z\x0bpermissions\xda\x06create\xda\x07executer\x0b\x00\x00\x00r0\x00\x00\x00)\x04rX\x00\x00\x00\xda\x02IdZ\npermissionr\x96\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x11change_permission$\x02\x00\x00s\x1c\x00\x00\x00\x00\x02\x02\x01\x02\xfe\x06\x04\x02\x01\x1a\x01\x10\x02\x14\x01\x02\x01\x06\xff\x02\xff\x02\x03\n\x02\x16\x01r\xe0\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x05\x00\x00\x00\xc3\x00\x00\x00s\x1c\x00\x00\x00|\x00\xa0\x00\xa1\x00j\x01|\x01d\x01d\x02d\x03\x8d\x03\xa0\x02\xa1\x00}\x02|\x02S\x00)\x04NzAname, id, size, mimeType, webViewLink, webContentLink,descriptionT)\x03r\xaf\x00\x00\x00\xda\x06fieldsr\xb0\x00\x00\x00)\x03r\xc9\x00\x00\x00r\x87\x00\x00\x00r\xde\x00\x00\x00)\x03rX\x00\x00\x00r\xdf\x00\x00\x00rQ\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xb6\x00\x00\x006\x02\x00\x00s\x08\x00\x00\x00\x00\x01\x0c\x02\x02\xfe\n\x03r\xb6\x00\x00\x00c\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x08\x00\x00\x00\xc3\x00\x00\x00sz\x00\x00\x00|\x01d\x01d\x02\x9c\x02}\x02z\x0ct\x00d\x00k\tr\x14W\x00n&\x04\x00t\x01k\nr<\x01\x00\x01\x00\x01\x00t\x02d\x00k\tr8t\x02g\x01|\x02d\x03<\x00Y\x00n\x0cX\x00t\x00g\x01|\x02d\x03<\x00|\x00\xa0\x03\xa1\x00j\x04|\x02d\x04d\x05d\x06\x8d\x03\xa0\x05\xa1\x00}\x03t\x06|\x00|\x03\xa0\x07d\x07\xa1\x01\x83\x02I\x00d\x00H\x00\x01\x00|\x03S\x00)\x08Nr\xad\x00\x00\x00r_\x00\x00\x00\xda\x07parentsz\x0fid, webViewLinkT)\x03r\xdc\x00\x00\x00r\xe1\x00\x00\x00r\xb0\x00\x00\x00rG\x00\x00\x00)\x08r\x88\x00\x00\x00\xda\tNameErrorr\x12\x00\x00\x00r\xc9\x00\x00\x00r\xdd\x00\x00\x00r\xde\x00\x00\x00r\xe0\x00\x00\x00r\x87\x00\x00\x00)\x04rX\x00\x00\x00\xda\x0bfolder_name\xda\x08metadatar\x94\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\x86\x00\x00\x00=\x02\x00\x00s"\x00\x00\x00\x00\x02\x02\x01\x02\xfe\x06\x04\x02\x01\x08\x01\x04\x01\x0e\x02\x08\x01\x10\x03\n\x01\x08\x01\x02\x00\x02\x00\x02\xff\n\x03\x16\x01r\x86\x00\x00\x00c\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x00\x00\r\x00\x00\x00\xc3\x00\x00\x00s\x14\x02\x00\x00z\x14|\x00\xa0\x00d\x01\xa1\x01I\x00d\x00H\x00\x01\x00W\x00n\x14\x04\x00t\x01k\nr(\x01\x00\x01\x00\x01\x00Y\x00n\x02X\x00|\x03d\x02|\x04d\x03\x9c\x03}\x05z\x0ct\x02d\x00k\tr@W\x00n&\x04\x00t\x03k\nrh\x01\x00\x01\x00\x01\x00t\x04d\x00k\trdt\x04g\x01|\x05d\x04<\x00Y\x00n\x0cX\x00t\x02g\x01|\x05d\x04<\x00t\x05|\x02|\x04d\x05d\x06\x8d\x03}\x06|\x01\xa0\x06\xa1\x00j\x07|\x05|\x06d\x07d\x05d\x08\x8d\x04}\x07t\x08\xa0\x08\xa1\x00}\x08d\x00}\td\x00}\nd\ta\t|\td\x00k\x08\x90\x01r\xdat\td\x05k\x08r\xc2t\n\x82\x01|\x07\xa0\x0b\xa1\x00\\\x02}\x0b}\t|\x0br\xac|\x0bj\x0c}\x0ct\x08\xa0\x08\xa1\x00|\x08\x18\x00}\r|\x0bj\r}\x0e|\x0e|\x0c\x1b\x00d\n\x14\x00}\x0ft\x0e|\x0e|\r\x1b\x00d\x0b\x83\x02}\x10t\x0e|\x0c|\x0e\x18\x00|\x10\x1b\x00\x83\x01}\x11d\x0c\xa0\x0fd\r\xa0\x10d\x0ed\x0f\x84\x00t\x11t\x12\xa0\x13|\x0fd\x10\x1b\x00\xa1\x01\x83\x01D\x00\x83\x01\xa1\x01d\r\xa0\x10d\x11d\x0f\x84\x00t\x11d\x10t\x12\xa0\x13|\x0fd\x10\x1b\x00\xa1\x01\x18\x00\x83\x01D\x00\x83\x01\xa1\x01t\x0e|\x0fd\x0b\x83\x02\xa1\x03}\x12d\x12|\x03\x9b\x00d\x13|\x12\x9b\x00d\x14t\x14|\x0e\x83\x01\x9b\x00d\x15t\x14|\x0c\x83\x01\x9b\x00d\x16t\x14|\x10\x83\x01\x9b\x00d\x17t\x15|\x11\x83\x01\x9b\x00\x9d\x0c}\x13t\x0e|\rd\x18\x16\x00\x83\x01d\x19k\x02\x90\x01r\xbc|\n|\x13k\x03\x90\x01s\xc4|\x0e|\x0ck\x02r\xac|\x00\xa0\x00|\x13\xa1\x01I\x00d\x00H\x00\x01\x00|\x13}\nq\xac|\t\xa0\x16d\x1a\xa1\x01}\x14|\t\xa0\x16d\x1b\xa1\x01}\x0c|\t\xa0\x16d\x1c\xa1\x01}\x15t\x17|\x01|\x14\x83\x02I\x00d\x00H\x00\x01\x00t\x18|\x0c\x83\x01|\x15f\x02S\x00)\x1dNz\x16`Processing upload...`z1Uploaded from Telegram using ProjectBish userbot.)\x03r`\x00\x00\x00\xda\x0bdescriptionra\x00\x00\x00r\xe2\x00\x00\x00T)\x02Z\x08mimetypeZ\tresumablez\x18id, size, webContentLink)\x04r\xdc\x00\x00\x00\xda\nmedia_bodyr\xe1\x00\x00\x00r\xb0\x00\x00\x00Fr\x9e\x00\x00\x00r\x9f\x00\x00\x00z\x1d`Uploading` | [{0}{1}] `{2}%`rT\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00r\xa0\x00\x00\x00rS\x00\x00\x00r\xa1\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xa4\x00\x00\x00\x82\x02\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00z\x1aupload.<locals>.<listcomp>r\xa5\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00r\xa6\x00\x00\x00rS\x00\x00\x00r\xa1\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xa4\x00\x00\x00\x84\x02\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00z\x14`[FILE - UPLOAD]`\n\n`r\xa7\x00\x00\x00r\xa8\x00\x00\x00r\xa9\x00\x00\x00r\xaa\x00\x00\x00r\xab\x00\x00\x00r\xac\x00\x00\x00r\x01\x00\x00\x00rG\x00\x00\x00\xda\x04size\xda\x0ewebContentLink)\x19r2\x00\x00\x00r\x81\x00\x00\x00r\x88\x00\x00\x00r\xe3\x00\x00\x00r\x12\x00\x00\x00r\r\x00\x00\x00r\xc9\x00\x00\x00r\xdd\x00\x00\x00r\x82\x00\x00\x00ri\x00\x00\x00r\x1c\x00\x00\x00r\xca\x00\x00\x00r\xcb\x00\x00\x00r\xcc\x00\x00\x00r\xc3\x00\x00\x00r\xc4\x00\x00\x00r\x05\x00\x00\x00r\xc5\x00\x00\x00r\xc6\x00\x00\x00r\xc7\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00r\x87\x00\x00\x00r\xe0\x00\x00\x00r\xbc\x00\x00\x00)\x16rN\x00\x00\x00rX\x00\x00\x00r]\x00\x00\x00r\x91\x00\x00\x00ra\x00\x00\x00r\xdc\x00\x00\x00r\xe7\x00\x00\x00r\x8f\x00\x00\x00rm\x00\x00\x00\xda\x08responser\xd0\x00\x00\x00r\x92\x00\x00\x00r\xcf\x00\x00\x00r\xd1\x00\x00\x00Z\x08uploadedr\xd3\x00\x00\x00r\xd4\x00\x00\x00r\xd5\x00\x00\x00r\xd6\x00\x00\x00r\xd7\x00\x00\x00Z\x07file_id\xda\x0bdownloadURLrS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\x84\x00\x00\x00S\x02\x00\x00s|\x00\x00\x00\x00\x01\x02\x01\x14\x01\x0e\x01\x06\x02\x02\x01\x02\x01\x02\xfd\x06\x05\x02\x01\x08\x01\x04\x01\x0e\x02\x08\x01\x10\x03\n\x01\x02\x01\x02\x01\x02\x01\x02\xfd\x06\x06\x0c\x01\x02\x01\x02\xfe\x06\x04\x08\x01\x04\x01\x04\x01\x04\x01\n\x01\x08\x01\x04\x02\x0c\x01\x04\x01\x06\x01\x0c\x01\x06\x01\x0c\x01\x0e\x01\x10\x01\x04\x01\x0c\x01\x0c\xff\x08\x02\x0c\x01\x10\xff\x08\x02\x08\xfb\x04\x076\xff\x02\x08\x12\x01\x06\xff\x04\x02\x06\xfe\x02\x03\x10\x01\x06\x01\n\x01\n\x01\n\x02\x10\x01r\x84\x00\x00\x00c\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x07\x00\x00\x00\xc3\x00\x00\x00s\xb2\x00\x00\x00d\x01a\x00t\x01\xa0\x02|\x02\xa1\x01}\x03t\x03|\x03\x83\x01d\x02k\x02r\x1et\x04S\x00d\x00}\x04|\x03D\x00]\x86}\x05t\x00d\x03k\x08r6t\x05\x82\x01t\x06|\x02|\x05\x83\x02}\x06t\x07|\x06\x83\x01rvt\x08|\x01|\x05\x83\x02I\x00d\x00H\x00}\x07|\x07\xa0\td\x04\xa1\x01a\x04t\n|\x00|\x01|\x06\x83\x03I\x00d\x00H\x00}\x04q&t\x0b|\x06\x83\x01I\x00d\x00H\x00}\x08t\x0c|\x06\x83\x01I\x00d\x00H\x00}\tt\r|\x00|\x01|\x06|\x08|\t\x83\x05I\x00d\x00H\x00\x01\x00t\x04}\x04q&|\x04S\x00)\x05NFr\x01\x00\x00\x00TrG\x00\x00\x00)\x0eri\x00\x00\x00ry\x00\x00\x00\xda\x07listdirr\xc2\x00\x00\x00r\x88\x00\x00\x00r\x1c\x00\x00\x00r\x05\x00\x00\x00r\x04\x00\x00\x00r\x86\x00\x00\x00r\x87\x00\x00\x00r\x89\x00\x00\x00r^\x00\x00\x00rb\x00\x00\x00r\x84\x00\x00\x00)\nrN\x00\x00\x00rX\x00\x00\x00\xda\x0bfolder_path\xda\x05listsZ\x0eroot_parent_Id\xda\x01fZ\x0ecurrent_f_namer\x94\x00\x00\x00r\x91\x00\x00\x00ra\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\x89\x00\x00\x00\x9c\x02\x00\x00s*\x00\x00\x00\x00\x03\x04\x01\n\x01\x0c\x01\x04\x01\x04\x01\x08\x01\x08\x01\x04\x02\n\x01\x08\x01\x10\x01\n\x01\x04\x01\x02\x00\x02\xff\x0c\x03\x0e\x01\x0e\x01\x16\x01\x06\x01r\x89\x00\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\xc3\x00\x00\x00s4\x00\x00\x00z\x0ct\x00d\x00k\tr\nW\x00n \x04\x00t\x01k\nr,\x01\x00\x01\x00\x01\x00t\x02d\x00k\tr(t\x02a\x00Y\x00n\x04X\x00b\x00d\x00S\x00)\x01N)\x03r\x88\x00\x00\x00r\xe3\x00\x00\x00r\x12\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\x8a\x00\x00\x00\xb6\x02\x00\x00s\x10\x00\x00\x00\x00\x02\x02\x01\x08\x01\x04\x01\x0e\x01\x08\x01\n\x02\x02\x01r\x8a\x00\x00\x00z,^.gdlist(?: |$)(-l \\d+)?(?: |$)?(.*)?(?: |$)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00\x0b\x00\x00\x00\xc3\x00\x00\x00s \x03\x00\x00|\x00\xa0\x00d\x01\xa1\x01I\x00d\x00H\x00\x01\x00|\x00j\x01\xa0\x02d\x02\xa1\x01}\x01|\x01d\x00k\trXt\x03|\x00j\x01\xa0\x02d\x02\xa1\x01\xa0\x04d\x03\xa1\x01\x83\x01}\x02|\x02d\x04k\x04r\\|\x00\xa0\x00d\x05\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00n\x04d\x06}\x02|\x00j\x01\xa0\x02d\x07\xa1\x01}\x01|\x01d\x08k\x03\x90\x01r.|\x01\xa0\x05d\t\xa1\x01r\xd6|\x01\xa0\x06d\x00d\x07\xa1\x02d\x02\x19\x00}\x03z\x14|\x01\xa0\x06d\x00d\x07\xa1\x02d\x07\x19\x00}\x04W\x00n \x04\x00t\x07k\nr\xc0\x01\x00\x01\x00\x01\x00d\n|\x03\x9b\x00d\x0b\x9d\x03}\x05Y\x00n\x14X\x00d\n|\x03\x9b\x00d\x0c|\x04\x9b\x00d\r\x9d\x05}\x05nVt\x08\xa0\td\x0e|\x01\xa1\x02\x90\x01r\x1ct\x08\xa0\td\x0e|\x01\xa1\x02\xa0\x02d\x02\xa1\x01}\x03|\x01\xa0\x06d\t\xa1\x01d\x0f\x19\x00\xa0\x04\xa1\x00}\x04d\n|\x03\x9b\x00d\x0c|\x04\x9b\x00d\r\x9d\x05}\x05n\x10|\x01}\x04d\x10|\x04\x9b\x00d\n\x9d\x03}\x05n\x04d\x08}\x05t\n|\x00\x83\x01I\x00d\x00H\x00}\x06|\x06d\x11k\x08\x90\x01rNd\x11S\x00d\x08}\x07d\x12}\x08d\x00}\tg\x00}\nz(|\x06\xa0\x0b\xa1\x00j\x0cd\x13d\x13|\x05d\x14d\x15|\x08|\x02d\x16|\td\x17\x8d\t\xa0\r\xa1\x00}\x0bW\x00nD\x04\x00t\x0ek\n\x90\x01r\xca\x01\x00}\x0c\x01\x00z$|\x00\xa0\x00d\x18t\x0f|\x0c\x83\x01\x9b\x00\x9d\x02\xa1\x01I\x00d\x00H\x00\x01\x00W\x00Y\x00\xa2\x04d\x00S\x00d\x00}\x0c~\x0cX\x00Y\x00n\x02X\x00|\x0b\xa0\x10d\x19g\x00\xa1\x02D\x00]\x80}\rt\x11|\n\x83\x01|\x02k\x05\x90\x01r\xf0\x01\x00\x90\x02qZ|\r\xa0\x10d\x1a\xa1\x01}\x0e|\r\xa0\x10d\x1b\xa1\x01d\x1ck\x02\x90\x02r,|\r\xa0\x10d\x1d\xa1\x01}\x0f|\x07d\x1e|\x0e\x9b\x00d\x1f|\x0f\x9b\x00d \x9d\x057\x00}\x07n |\r\xa0\x10d!\xa1\x01}\x0f|\x07d"|\x0e\x9b\x00d\x1f|\x0f\x9b\x00d \x9d\x057\x00}\x07|\n\xa0\x12|\r\xa1\x01\x01\x00\x90\x01q\xd8t\x11|\n\x83\x01|\x02k\x05\x90\x02rl\x90\x02q\x8a|\x0b\xa0\x10d#d\x00\xa1\x02}\t|\td\x00k\x08\x90\x01r^\x90\x02q\x8a\x90\x01q^~\n|\x05d\x08k\x02\x90\x02r\x9ad$}\x05t\x11|\x07\x83\x01d%k\x04\x90\x03r\x00|\x00\xa0\x00d&\xa1\x01I\x00d\x00H\x00\x01\x00t\x13d\'d(\x83\x02\x8f\x1c}\x10|\x10\xa0\x14d)|\x05\x9b\x00d*|\x07\x9b\x00\x9d\x04\xa1\x01\x01\x00W\x005\x00Q\x00R\x00X\x00|\x00j\x15j\x16|\x00j\x17d\'d+d,\x8d\x03I\x00d\x00H\x00\x01\x00n\x1c|\x00\xa0\x00d-|\x05\x9b\x00d.|\x07\x9b\x00\x9d\x04\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00)/Nz\x18`Getting information...`r!\x00\x00\x00z\x03-l i\xe8\x03\x00\x00zX`[GDRIVE - LIST]`\n\n`Status` : **BAD**\n`Reason` : can\'t get list if limit more than 1000.\xe92\x00\x00\x00r\x9f\x00\x00\x00rT\x00\x00\x00z\x02-p\xfa\x01\'z$\' in parents and (name contains \'*\')z!\' in parents and (name contains \'z\x02\')z\x07-p (.*)r\x01\x00\x00\x00z\x0fname contains \'FzEnextPageToken, files(name, id, mimeType, webViewLink, webContentLink)TrV\x00\x00\x00Z\tallDrivesz\x19modifiedTime desc, folder)\tr\xb0\x00\x00\x00Z\x15includeTeamDriveItems\xda\x01q\xda\x06spacesZ\x07corporar\xe1\x00\x00\x00Z\x08pageSizeZ\x07orderBy\xda\tpageTokenz1`[GDRIVE - LIST]`\n\n`Status` : **BAD**\n`Reason` : r\xc9\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00r\xad\x00\x00\x00\xda\x0bwebViewLinkz\x0c`[FOLDER]`\n[rs\x00\x00\x00\xfa\x03)\n\nr\xe9\x00\x00\x00z\n`[FILE]`\n[\xda\rnextPageTokenz\rNot specifiedi\x00\x10\x00\x00z*`Result is too big, sending it as file...`z\nresult.txt\xda\x01wz\x14Google Drive Query:\nz\x0b\n\nResults\n\nz\x18Google Drive Query List.)\x01Z\x07captionz\x19**Google Drive Query**:\n`z\x10`\n\n**Results**\n\n)\x18r2\x00\x00\x00\xda\rpattern_matchr\xbf\x00\x00\x00r\xbc\x00\x00\x00rC\x00\x00\x00\xda\nstartswithr\\\x00\x00\x00r\xb5\x00\x00\x00r\xbd\x00\x00\x00r\xbe\x00\x00\x00rY\x00\x00\x00r\xc9\x00\x00\x00\xda\x04listr\xde\x00\x00\x00r\x0b\x00\x00\x00r0\x00\x00\x00r\x87\x00\x00\x00r\xc2\x00\x00\x00\xda\x06append\xda\x04openr\xc8\x00\x00\x00r=\x00\x00\x00Z\tsend_filerF\x00\x00\x00)\x11rN\x00\x00\x00Z\x07checkerZ\tpage_sizer\xe2\x00\x00\x00r`\x00\x00\x00Z\x05queryrX\x00\x00\x00rB\x00\x00\x00r\xe1\x00\x00\x00\xda\npage_tokenr\x93\x00\x00\x00r\xea\x00\x00\x00r\x96\x00\x00\x00r\xc9\x00\x00\x00r\x91\x00\x00\x00Z\x04linkrQ\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xee\x00\x00\x00\xc3\x02\x00\x00s\xae\x00\x00\x00\x00\x03\x10\x01\x0c\x01\x08\x01\x16\x01\x08\x01\x04\x01\x02\xff\n\x05\x06\x02\x04\x01\x0c\x01\n\x01\n\x01\x10\x01\x02\x01\x14\x01\x0e\x01\x12\x02\x14\x02\x0e\x01\x12\x01\x12\x01\x14\x02\x04\x01\x0e\x02\x04\x01\x0e\x01\n\x01\x04\x01\x04\x01\x04\x02\x04\x01\x04\x02\x02\x01\x08\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xf7\x0e\x0b\x12\x01\x04\x01\x0c\xff\n\x05\x18\x01\x10\x01\x0e\x01\x06\x02\n\x01\x10\x01\n\x01\x02\x01\x10\xff\x06\x05\n\x01\x02\x01\x10\xff\x04\x04\x0e\x01\x0e\x01\x04\x02\x0c\x01\n\x01\x08\x02\x02\x01\n\x01\x04\x01\x0e\x01\x10\x01\x0c\x01\x04\x01\x0e\xff\x0e\x02\x06\x01\x04\x01\x02\x01\x02\xfd\x0e\x06\x04\x01\x0e\xff\n\x03r\xee\x00\x00\x00z\x1b^.gdf (mkdir|rm|check) (.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x17\x00\x00\x00\x0e\x00\x00\x00\xc3\x00\x00\x00sN\x04\x00\x00|\x00\xa0\x00d\x01\xa1\x01I\x00d\x02H\x00\x01\x00t\x01|\x00\x83\x01I\x00d\x02H\x00}\x01|\x01d\x03k\x08r*d\x02S\x00|\x00j\x02\xa0\x03d\x04\xa1\x01\xa0\x04d\x05\xa1\x01}\x02|\x00j\x02\xa0\x03d\x06\xa1\x01}\x03d\x07}\x04|\x02D\x00\x90\x03]\xe6}\x05|\x05\xa0\x05\xa1\x00}\x05|\x05d\x08d\t\x9c\x02}\x06z\x0ct\x06d\x02k\trrW\x00n&\x04\x00t\x07k\nr\x9a\x01\x00\x01\x00\x01\x00t\x08d\x02k\tr\x96t\x08g\x01|\x06d\n<\x00Y\x00n\x0cX\x00t\x06g\x01|\x06d\n<\x00d\x02}\x07|\x01\xa0\t\xa1\x00j\nd\x0b|\x05\x9b\x00d\x0c\x9d\x03d\rd\x0ed\x0f|\x07d\x10\x8d\x05\xa0\x0b\xa1\x00}\x08|\x03d\x11k\x02\x90\x01rxd\x12}\tz\x14|\x08\xa0\x0cd\x13g\x00\xa1\x02d\x14\x19\x00}\nW\x00n2\x04\x00t\rk\n\x90\x01r"\x01\x00\x01\x00\x01\x00t\x0e|\x01|\x05\x83\x02I\x00d\x02H\x00}\n|\t\xa0\x0fd\x15d\x16\xa1\x02}\tY\x00n\x02X\x00|\n\xa0\x0cd\x17\xa1\x01}\x0b|\n\xa0\x0cd\x18\xa1\x01}\x0cd\x19|\tk\x06\x90\x01rRt\x10|\x01|\x0b\x83\x02I\x00d\x02H\x00\x01\x00|\x04d\x1a|\t\x9b\x00d\x1b|\x05\x9b\x00d\x1c|\x0b\x9b\x00d\x1d|\x0c\x9b\x00d\x1e\x9d\t7\x00}\x04\x90\x02n\xb4|\x03d\x1fk\x02\x90\x02r\xc4z\x1e|\x08\xa0\x0cd\x13g\x00\xa1\x02d\x14\x19\x00}\r|\r\xa0\x0cd\x17\xa1\x01}\x0eW\x00nr\x04\x00t\rk\n\x90\x02r\x12\x01\x00\x01\x00\x01\x00|\x05}\x0ez\x14t\x11|\x01|\x0e\x83\x02I\x00d\x02H\x00}\rW\x00nB\x04\x00t\x12k\n\x90\x02r\x0c\x01\x00}\x0f\x01\x00z"|\x04d t\x13|\x0f\x83\x01\x9b\x00d!\x9d\x037\x00}\x04W\x00Y\x00\xa2\x08Y\x00qPW\x005\x00d\x02}\x0f~\x0fX\x00Y\x00n\x02X\x00Y\x00n\x02X\x00|\r\xa0\x0cd"\xa1\x01}\x10|\r\xa0\x0cd#\xa1\x01}\x11|\x11d\x08k\x02\x90\x02r8d$}\tn\x04d%}\tz\x1a|\x01\xa0\t\xa1\x00j\x14|\x0ed\x0fd&\x8d\x02\xa0\x0b\xa1\x00\x01\x00W\x00nR\x04\x00t\x15k\n\x90\x02r\xa8\x01\x00}\x0f\x01\x00z2|\t\xa0\x0fd\'d(\xa1\x02\x01\x00|\x04d\x1a|\t\x9b\x00d)t\x13|\x0f\x83\x01\x9b\x00d*\x9d\x057\x00}\x04W\x00Y\x00\xa2\x06qPW\x005\x00d\x02}\x0f~\x0fX\x00Y\x00n\x18X\x00|\x04d\x1a|\t\x9b\x00d\x1b|\x10\x9b\x00d+\x9d\x057\x00}\x04\x90\x01nh|\x03d,k\x02\x90\x04r,z\x14|\x08\xa0\x0cd\x13g\x00\xa1\x02d\x14\x19\x00}\rW\x00nr\x04\x00t\rk\n\x90\x03rT\x01\x00\x01\x00\x01\x00|\x05}\x0ez\x14t\x11|\x01|\x0e\x83\x02I\x00d\x02H\x00}\rW\x00nB\x04\x00t\x12k\n\x90\x03rN\x01\x00}\x0f\x01\x00z"|\x04d-t\x13|\x0f\x83\x01\x9b\x00d!\x9d\x037\x00}\x04W\x00Y\x00\xa2\x08Y\x00qPW\x005\x00d\x02}\x0f~\x0fX\x00Y\x00n\x02X\x00Y\x00n\x02X\x00|\r\xa0\x0cd"\xa1\x01}\x05|\r\xa0\x0cd\x17\xa1\x01}\x0e|\r\xa0\x0cd.\xa1\x01}\x12|\r\xa0\x0cd#\xa1\x01}\x11|\r\xa0\x0cd\x18\xa1\x01}\x13|\r\xa0\x0cd/\xa1\x01}\x14|\r\xa0\x0cd0\xa1\x01}\x15|\x11d\x08k\x02\x90\x03r\xacd\x12}\tn\x04d1}\td\x1a|\t\x9b\x00d2|\x05\x9b\x00d3|\x0e\x9b\x00d4\x9d\x07}\x16|\x11d\x08k\x03\x90\x03r\xfe|\x16d5t\x16|\x12\x83\x01\x9b\x00d4\x9d\x037\x00}\x16|\x16d6|\x05\x9b\x00d7|\x14\x9b\x00d\x1e\x9d\x057\x00}\x16n\x10|\x16d8|\x13\x9b\x00d\x1e\x9d\x037\x00}\x16|\x15\x90\x04r$|\x16d9|\x15\x9b\x00d!\x9d\x037\x00}\x16|\x04|\x167\x00}\x04|\x08\xa0\x0cd:d\x02\xa1\x02}\x07qP|\x00\xa0\x00|\x04\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00);z) - Google Drive folder/file management - \xfa\x18`Sending information...`NFr\x9f\x00\x00\x00\xfa\x01;r!\x00\x00\x00rT\x00\x00\x00r\xad\x00\x00\x00r_\x00\x00\x00r\xe2\x00\x00\x00z\x06name="\xfa\x01"rV\x00\x00\x00zanextPageToken, files(parents, name, id, size, mimeType, webViewLink, webContentLink, description)T)\x05r\xf2\x00\x00\x00r\xf3\x00\x00\x00r\xe1\x00\x00\x00r\xb0\x00\x00\x00r\xf4\x00\x00\x00r\xb4\x00\x00\x00z\x10[FOLDER - EXIST]r\xc9\x00\x00\x00r\x01\x00\x00\x00z\x06EXIST]z\x08CREATED]rG\x00\x00\x00r\xf5\x00\x00\x00Z\x07CREATEDrp\x00\x00\x00z\x04`\n\n`z\x0b`\n`ID :` `z\x11`\n`URL :` [Open](r\xf6\x00\x00\x00\xda\x02rmz7`[FILE/FOLDER - ERROR]`\n\n`Status` : **BAD**`Reason` : `rx\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00z\x11[FOLDER - DELETE]z\x0f[FILE - DELETE]r\xae\x00\x00\x00z\x07DELETE]rw\x00\x00\x00z `\n\n`Status` : **BAD**`Reason` : \xfa\x02\n\nz\x15`\n`Status` : **OK**\n\nZ\x05checkz8`[FILE/FOLDER - ERROR]`\n\n`Status` : **BAD**\n`Reason` : `r\xe8\x00\x00\x00r\xe9\x00\x00\x00r\xe6\x00\x00\x00z\x0e[FILE - EXIST]z\x0e`\n\n`Name :` `z\r`\n`ID :` `z\x02`\nz\x0b`Size :` `z\x0b`Link :` [rs\x00\x00\x00z\x11`URL :` [Open](z\x0b`About :`\n`r\xf7\x00\x00\x00)\x17r2\x00\x00\x00rY\x00\x00\x00r\xf9\x00\x00\x00r\xbf\x00\x00\x00r\\\x00\x00\x00rC\x00\x00\x00r\x88\x00\x00\x00r\xe3\x00\x00\x00r\x12\x00\x00\x00r\xc9\x00\x00\x00r\xfb\x00\x00\x00r\xde\x00\x00\x00r\x87\x00\x00\x00r\xb5\x00\x00\x00r\x86\x00\x00\x00r\x85\x00\x00\x00r\xe0\x00\x00\x00r\xb6\x00\x00\x00r\x81\x00\x00\x00r0\x00\x00\x00r5\x00\x00\x00r\x0b\x00\x00\x00r\x19\x00\x00\x00)\x17rN\x00\x00\x00rX\x00\x00\x00Z\x06f_name\xda\x03exer\x8c\x00\x00\x00Z\nname_or_idr\xe5\x00\x00\x00r\xfe\x00\x00\x00r\x93\x00\x00\x00r\x92\x00\x00\x00r\x94\x00\x00\x00Z\tfolder_idr\x95\x00\x00\x00r\xef\x00\x00\x00Z\x04f_idr\x96\x00\x00\x00r`\x00\x00\x00ra\x00\x00\x00Z\x06f_sizer\xf5\x00\x00\x00r\xeb\x00\x00\x00r\xe6\x00\x00\x00rO\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x15google_drive_managers0\x03\x00\x00s\xd0\x00\x00\x00\x00\x03\x10\x01\x0e\x01\x08\x01\x04\x02\x12\x01\x0c\x01\x04\x01\n\x02\x08\x02\x02\x01\x02\xfe\x06\x04\x02\x01\x08\x01\x04\x01\x0e\x02\x08\x01\x10\x03\n\x01\x04\x01\x08\x01\n\x01\x02\x02\x02\x03\x02\x01\x02\xf8\n\n\n\x04\x04\x01\x02\x01\x14\x01\x10\x01\x10\x01\x12\x01\n\x01\n\x01\n\x02\x10\x01\x02\x01\x1c\xff\x08\x06\n\x02\x02\x02\x10\x01\x0e\x01\x10\x02\x04\x01\x02\x01\x14\x01\x12\x01\x02\x01\x0e\xff\x04\x05"\x01\n\x01\n\x01\n\x01\x06\x02\x04\x01\x02\x01\x1a\x02\x12\x01\x0c\x01\x02\x01\x14\xff\x04\x05\x1a\x02\x02\x01\x10\xff\x08\x05\n\x02\x02\x01\x14\x01\x10\x02\x04\x01\x02\x01\x14\x01\x12\x01\x02\x01\x0e\xff\x04\x05"\x02\n\x01\n\x01\n\x01\n\x01\n\x01\n\x01\n\x01\n\x01\x06\x02\x04\x02\x16\xff\x02\x05\n\x01\x14\x01\x18\x02\x10\x01\x06\x01\x10\x01\x08\x01\x0e\x01\x10\x01r\x05\x01\x00\x00z\x10^.gdabort(?: |$)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\xc3\x00\x00\x00s^\x00\x00\x00t\x00\xa0\x01\xa1\x00}\x01|\x00\xa0\x02d\x01\xa1\x01I\x00d\x02H\x00\x01\x00t\x03|\x01\x83\x01d\x03k\x03r8t\x00j\x04d\x04d\x05\x8d\x01\x01\x00t\x00\xa0\x05\xa1\x00\x01\x00d\x04a\x06t\x07\xa0\x08d\x06\xa1\x01I\x00d\x02H\x00\x01\x00|\x00\xa0\t\xa1\x00I\x00d\x02H\x00\x01\x00d\x02S\x00)\x07z2\n Abort process for download and upload\n z\x0f`Cancelling...`Nr\x01\x00\x00\x00T\xa9\x01Z\x05forcer-\x00\x00\x00)\nr\x1d\x00\x00\x00Z\rget_downloadsr2\x00\x00\x00r\xc2\x00\x00\x00Z\nremove_allZ\tautopurgeri\x00\x00\x00r3\x00\x00\x00r4\x00\x00\x00r5\x00\x00\x00)\x02rN\x00\x00\x00r\x8e\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x0ecancel_process\xc3\x03\x00\x00s\x10\x00\x00\x00\x00\x06\x08\x01\x10\x01\x0c\x01\x0c\x01\x08\x01\x04\x01\x10\x01r\x07\x01\x00\x00z\x0f^.gd(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00\x00\x0b\x00\x00\x00\xc3\x00\x00\x00s\xa4\x05\x00\x00d\x01}\x01|\x00j\x00\xa0\x01d\x02\xa1\x01}\x02d\x00}\x03d\x00}\x04|\x02s&|\x00j\x02s&d\x00S\x00|\x02rD|\x00j\x02rD|\x00\xa0\x03d\x03\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00t\x04|\x00\x83\x01I\x00d\x00H\x00}\x05|\x05d\x04k\x08r^d\x00S\x00t\x05|\x02\x83\x01r\x82|\x02}\x03|\x03\xa0\x06d\x05\xa1\x01r~|\x03g\x01}\x04d\x00}\x03\x90\x03n\x92t\x07|\x02\x83\x01\x90\x01r\xa0|\x02}\x06t\x08|\x06\x83\x01I\x00d\x00H\x00}\x07t\t|\x05|\x07\x83\x02I\x00d\x00H\x00}\x08|\x08\xa0\nd\x06\xa1\x01a\x0bd\x07t\x0b\x17\x00}\tz\x16t\x0c|\x00|\x05|\x06\x83\x03I\x00d\x00H\x00\x01\x00W\x00n\x96\x04\x00t\rk\n\x90\x01r\x18\x01\x00\x01\x00\x01\x00|\x00\xa0\x0ed\x08\xa1\x01I\x00d\x00H\x00\x01\x00t\x0f\x83\x00I\x00d\x00H\x00\x01\x00|\x00\xa0\x10\xa1\x00I\x00d\x00H\x00\x01\x00Y\x00d\tS\x00\x04\x00t\x11k\n\x90\x01rl\x01\x00}\n\x01\x00z6|\x00\xa0\x03d\n|\x07\x9b\x00d\x0bt\x12|\n\x83\x01\x9b\x00\x9d\x04\xa1\x01I\x00d\x00H\x00\x01\x00t\x0f\x83\x00I\x00d\x00H\x00\x01\x00W\x00Y\x00\xa2\x04d\x04S\x00d\x00}\n~\nX\x00Y\x00n0X\x00|\x00\xa0\x03d\x0c|\x07\x9b\x00d\r|\t\x9b\x00d\x0e\x9d\x05\xa1\x01I\x00d\x00H\x00\x01\x00t\x0f\x83\x00I\x00d\x00H\x00\x01\x00d\tS\x00\x90\x02nt|\x02\x90\x01s\xe4|\x00j\x02\x90\x01r\xe4|\x01t\x13|\x00|\x05\x83\x02I\x00d\x00H\x007\x00}\x01|\x00\xa0\x0e|\x01\xa1\x01I\x00d\x00H\x00\x01\x00|\x00\xa0\x10\xa1\x00I\x00d\x00H\x00\x01\x00d\x00S\x00t\x14\xa0\x15d\x0f|\x02\xa1\x02\x90\x02r\xc0t\x14\xa0\x15d\x0f|\x02\xa1\x02}\x02|\x02D\x00]\x88}\x04z\x1a|\x01t\x16|\x00|\x05|\x04\x83\x03I\x00d\x00H\x007\x00}\x01W\x00nf\x04\x00t\rk\n\x90\x02rF\x01\x00\x01\x00\x01\x00|\x01d\x107\x00}\x01Y\x00\x01\x00\x90\x02q\x8cY\x00nB\x04\x00t\x11k\n\x90\x02r\x86\x01\x00}\n\x01\x00z"|\x01d\x11t\x12|\n\x83\x01\x9b\x00d\x12\x9d\x037\x00}\x01W\x00Y\x00\xa2\x08\x90\x02q\x02W\x005\x00d\x00}\n~\nX\x00Y\x00n\x02X\x00\x90\x02q\x02|\x01\x90\x02r\xb8|\x00j\x0e|\x01d\x04d\x13\x8d\x02I\x00d\x00H\x00\x01\x00|\x00\xa0\x10\xa1\x00I\x00d\x00H\x00\x01\x00d\tS\x00d\x00S\x00\x90\x01n2t\x14\xa0\x15d\x14|\x02\xa1\x02\x90\x02s\xd8d\x15|\x02k\x06\x90\x02r\xe4|\x02\xa0\x17\xa1\x00}\x04\x90\x01n\x0e|\x02\xa0\x17\xa1\x00D\x00]\xd4}\x0bt\x18t\x19t\x12j\x1a|\x0b\x83\x02\x83\x01\x90\x03r\x08d\t}\x0cn\x04d\x04}\x0cd\x16|\x0bk\x06\x90\x03s d\x17|\x0bk\x06\x90\x03r&d\t}\rn\x04d\x04}\rd\t|\x0c\x90\x03p4|\rf\x01k\x06\x90\x02r\xecz\x1a|\x01t\x16|\x00|\x05|\x0b\x83\x03I\x00d\x00H\x007\x00}\x01W\x00nf\x04\x00t\rk\n\x90\x03r|\x01\x00\x01\x00\x01\x00|\x01d\x107\x00}\x01Y\x00\x01\x00\x90\x03q\xc2Y\x00nB\x04\x00t\x11k\n\x90\x03r\xbc\x01\x00}\n\x01\x00z"|\x01d\x11t\x12|\n\x83\x01\x9b\x00d\x12\x9d\x037\x00}\x01W\x00Y\x00\xa2\x08\x90\x02q\xecW\x005\x00d\x00}\n~\nX\x00Y\x00n\x02X\x00\x90\x02q\xec|\x01\x90\x03r\xee|\x00j\x0e|\x01d\x04d\x13\x8d\x02I\x00d\x00H\x00\x01\x00|\x00\xa0\x10\xa1\x00I\x00d\x00H\x00\x01\x00d\tS\x00d\x00S\x00|\x04\x90\x04s\x14|\x00j\x02\x90\x04s\x14|\x00\xa0\x03d\x18\xa1\x01I\x00d\x00H\x00\x01\x00d\x04S\x00|\x04\x90\x04r\xfa|\x00j\x02\x90\x04s\xfa|\x04D\x00]\xac}\x0ez\x1a|\x01t\x13|\x00|\x05|\x0e\x83\x03I\x00d\x00H\x007\x00}\x01W\x00n\x8a\x04\x00t\x11k\n\x90\x04r\xce\x01\x00}\n\x01\x00zjd\x19t\x12|\n\x83\x01k\x06\x90\x04std\x1at\x12|\n\x83\x01k\x06\x90\x04r\x9a|\x01d\x107\x00}\x01t\x1b\xa0\x1cd\x1b\xa1\x01I\x00d\x00H\x00\x01\x00W\x00Y\x00\xa20\x01\x00\x90\x04q\xd4n$|\x01d\x1c|\x0e\x9b\x00d\x1dt\x12|\n\x83\x01\x9b\x00d\x1e\x9d\x057\x00}\x01W\x00Y\x00\xa2\x08\x90\x04q&W\x005\x00d\x00}\n~\nX\x00Y\x00n\x02X\x00\x90\x04q&|\x00j\x0e|\x01d\x04d\x13\x8d\x02I\x00d\x00H\x00\x01\x00|\x00\xa0\x10\xa1\x00I\x00d\x00H\x00\x01\x00d\x00S\x00t\x1d|\x03\x83\x01I\x00d\x00H\x00}\x0ft\x08|\x03\x83\x01I\x00d\x00H\x00}\x10z\x1at\x1e|\x00|\x05|\x03|\x10|\x0f\x83\x05I\x00d\x00H\x00}\x11W\x00n \x04\x00t\rk\n\x90\x05rP\x01\x00\x01\x00\x01\x00|\x00\xa0\x0ed\x10\xa1\x01\x01\x00Y\x00n\x02X\x00|\x11\x90\x05r\x92|\x00j\x0ed\x1f|\x10\x9b\x00d t\x1f|\x11d!\x19\x00\x83\x01\x9b\x00d"|\x10\x9b\x00d\r|\x11d\x02\x19\x00\x9b\x00d#\x9d\td\x04d\x13\x8d\x02I\x00d\x00H\x00\x01\x00|\x00\xa0\x10\xa1\x00I\x00d\x00H\x00\x01\x00d\x00S\x00)$NrT\x00\x00\x00r!\x00\x00\x00zk`[UNKNOWN - ERROR]`\n\n`Status` : **failed**\n`Reason` : Confused to upload file or the replied message/media.Frd\x00\x00\x00rG\x00\x00\x00rt\x00\x00\x00ru\x00\x00\x00Tz\x16`[FOLDER - UPLOAD]`\n\n`z `\n`Status` : **BAD**\n`Reason` : z\x16`[FOLDER - UPLOAD]`\n\n[rs\x00\x00\x00rv\x00\x00\x00\xfa \\bhttps?://drive\\.google\\.com\\S+ro\x00\x00\x00z0`[FILE - ERROR]`\n\n`Status` : **BAD**\n`Reason` : r\x03\x01\x00\x00)\x01Z\x0clink_previewz\x12\\bhttps?://.*\\.\\S+z\x08magnet:?r\'\x00\x00\x00r(\x00\x00\x00a\x0f\x01\x00\x00`[VALUE - ERROR]`\n\n`Status` : **BAD**\n`Reason` : given value is not URL nor file/folder path. If you think this is wrong, maybe you use .gd with multiple value of files/folders, e.g `.gd <filename1> <filename2>` for upload from files/folders path this doesn\'t support it.z\n not foundz\x06\'file\'g\x00\x00\x00\x00\x00\x00\x04@z4`[UNKNOWN - ERROR]`\n\n`Status` : **BAD**\n`Reason` : `z\x05` | `rx\x00\x00\x00r\xb3\x00\x00\x00rq\x00\x00\x00r\x01\x00\x00\x00rr\x00\x00\x00z-)\n`Status :` **OK** - Successfully uploaded.\n) r\xf9\x00\x00\x00r\xbf\x00\x00\x00Z\x0freply_to_msg_idr2\x00\x00\x00rY\x00\x00\x00r\x03\x00\x00\x00r|\x00\x00\x00r\x04\x00\x00\x00r^\x00\x00\x00r\x86\x00\x00\x00r\x87\x00\x00\x00r\x88\x00\x00\x00r\x89\x00\x00\x00r\x1c\x00\x00\x00r<\x00\x00\x00r\x8a\x00\x00\x00r5\x00\x00\x00r\x81\x00\x00\x00r0\x00\x00\x00r\x97\x00\x00\x00r\xbd\x00\x00\x00\xda\x07findallr\xd9\x00\x00\x00r\\\x00\x00\x00\xda\x03any\xda\x03map\xda\x07isdigitr3\x00\x00\x00r4\x00\x00\x00rb\x00\x00\x00r\x84\x00\x00\x00r\x19\x00\x00\x00)\x12rN\x00\x00\x00r\x8c\x00\x00\x00\xda\x05valuer]\x00\x00\x00r\x8b\x00\x00\x00rX\x00\x00\x00r\xed\x00\x00\x00r\xe4\x00\x00\x00r\x94\x00\x00\x00r\x95\x00\x00\x00r\x96\x00\x00\x00r\xaf\x00\x00\x00Z\x03oneZ\x03twoZ\x02dlra\x00\x00\x00r\x91\x00\x00\x00r\x93\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x0cgoogle_drive\xd3\x03\x00\x00s\x16\x01\x00\x00\x00\x02\x04\x02\x0c\x01\x04\x01\x04\x01\n\x01\x04\x01\n\x01\x04\x01\x02\xff\n\x05\x04\x01\x0e\x01\x08\x01\x04\x01\x08\x01\x04\x01\n\x01\x06\x01\x08\x01\n\x01\x04\x02\x0e\x01\x10\x01\n\x01\x08\x01\x02\x01\x16\x01\x10\x01\x04\x01\x02\xff\n\x04\x0c\x01\x0e\x01\x06\x01\x12\x01\x04\x01\x12\xff\n\x06\x0c\x01\x18\x02\x04\x01\x10\xff\n\x05\x0c\x01\x08\x01\x0e\x01\x14\x01\x10\x01\x0e\x01\x04\x02\x0e\x02\x0c\x01\x08\x01\x02\x01\x1a\x01\x10\x01\x02\x01\x02\xff\x04\x04\x0c\x01\x12\x01\x02\x01\x0e\xff\x04\x05 \x01\x06\x01\x14\x01\x0e\x01\x04\x02\x08\x01\x18\x01\x0c\x02\x0c\x01\x12\x01\x06\x02\x04\x01\x14\x01\x06\x02\x04\x01\x12\x01\x02\x01\x1a\x01\x10\x01\x02\x01\x02\xff\x04\x04\x0c\x01\x12\x01\x02\x01\x0e\xff\x04\x05 \x01\x06\x01\x14\x01\x0e\x01\x04\x02\x04\x01\x0e\x01\x04\x01\x02\xff\n\x08\x04\x01\x0e\x01\x08\x01\x02\x01\x1a\x01\x12\x01\x1c\x01\x02\x01\x02\xff\x04\x04\x10\x01\x0e\x03\x02\x01\x14\xff\x04\x05 \x01\x14\x01\x0e\x01\x04\x01\x0e\x01\x0e\x01\x02\x01\x06\x01\x02\x00\x02\x00\x02\xff\x0e\x02\x10\x01\x04\x01\x02\xff\n\x04\x06\x01\x04\x01(\x05\x02\xfa\x0c\x08\x0e\x01r\x0e\x01\x00\x00z\x1c^.gdfset (put|rm)(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x0e\x00\x00\x00\xc3\x00\x00\x00s.\x02\x00\x00|\x00\xa0\x00d\x01\xa1\x01I\x00d\x02H\x00\x01\x00|\x00j\x01\xa0\x02d\x03\xa1\x01}\x01|\x01d\x04k\x02r\x86t\x03d\x02k\trDt\x03a\x04|\x00\xa0\x00d\x05\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00z\x06b\x04W\x00n&\x04\x00t\x05k\nrp\x01\x00\x01\x00\x01\x00|\x00\xa0\x00d\x06\xa1\x01I\x00d\x02H\x00\x01\x00Y\x00d\x07S\x00X\x00|\x00\xa0\x00d\x08\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00|\x00j\x01\xa0\x02d\t\xa1\x01}\x02|\x02s\xaa|\x00\xa0\x00d\n\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00z\x14t\x06\xa0\x07d\x0b|\x02\xa1\x02d\x0c\x19\x00}\x03W\x00n\x8e\x04\x00t\x08k\n\x90\x01rL\x01\x00\x01\x00\x01\x00t\tt\nt\x0bj\x0c|\x02\x83\x02\x83\x01r\xe6d\r}\x04n\x04d\x07}\x04d\x0e|\x02k\x06\x90\x00s\xfed\x0f|\x02k\x06\x90\x01r\x04d\r}\x05n\x04d\x07}\x05d\r|\x04\x90\x01p\x12|\x05f\x01k\x06\x90\x01r4|\x02a\x04|\x00\xa0\x00d\x10\xa1\x01I\x00d\x02H\x00\x01\x00Y\x00d\x02S\x00|\x00\xa0\x00d\x11\xa1\x01I\x00d\x02H\x00\x01\x00|\x02a\x04Y\x00n\xdeX\x00d\x12|\x03k\x06\x90\x01rl|\x00\xa0\x00d\x13\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00z\x12|\x03\xa0\rd\x14\xa1\x01d\x03\x19\x00a\x04W\x00n\x9a\x04\x00t\x08k\n\x90\x02r\x18\x01\x00\x01\x00\x01\x00z\x12|\x03\xa0\rd\x15\xa1\x01d\x03\x19\x00a\x04W\x00np\x04\x00t\x08k\n\x90\x02r\x12\x01\x00\x01\x00\x01\x00d\x16|\x03k\x06\x90\x01r\xce|\x03\xa0\rd\x17\xa1\x01d\x18\x19\x00a\x04n@z\x12|\x03\xa0\rd\x19\xa1\x01d\x03\x19\x00a\x04W\x00n,\x04\x00t\x08k\n\x90\x02r\x0c\x01\x00\x01\x00\x01\x00|\x00\xa0\x00d\x13\xa1\x01I\x00d\x02H\x00\x01\x00Y\x00Y\x00Y\x00d\x02S\x00X\x00Y\x00n\x02X\x00Y\x00n\x02X\x00|\x00\xa0\x00d\x10\xa1\x01I\x00d\x02H\x00\x01\x00d\x02S\x00)\x1az5 - Set parents dir for upload/check/makedir/remove - r\xff\x00\x00\x00Nr!\x00\x00\x00r\x02\x01\x00\x00zD`[FOLDER - SET]`\n\n`Status` : **OK** - using `G_DRIVE_FOLDER_ID` now.z;`[FOLDER - SET]`\n\n`Status` : **BAD** - No parent_Id is set.FzO`[FOLDER - SET]`\n\n`Status` : **OK** - `G_DRIVE_FOLDER_ID` empty, will use root.r\x9f\x00\x00\x00z#>`.gdfset put <folderURL/folderID>`r\x08\x01\x00\x00r\x01\x00\x00\x00Tr\'\x00\x00\x00r(\x00\x00\x00z>`[PARENT - FOLDER]`\n\n`Status` : **OK** - Successfully changed.z<`[PARENT - FOLDER]`\n\n`Status` : **WARNING** - forcing use...r\x1f\x00\x00\x00z<`[URL - ERROR]`\n\n`Status` : **BAD** - Not a valid folderURL.r \x00\x00\x00r"\x00\x00\x00r#\x00\x00\x00r$\x00\x00\x00r%\x00\x00\x00r&\x00\x00\x00)\x0er2\x00\x00\x00r\xf9\x00\x00\x00r\xbf\x00\x00\x00r\x12\x00\x00\x00r\x88\x00\x00\x00r\xe3\x00\x00\x00r\xbd\x00\x00\x00r\t\x01\x00\x00r\xb5\x00\x00\x00r\n\x01\x00\x00r\x0b\x01\x00\x00r0\x00\x00\x00r\x0c\x01\x00\x00r\\\x00\x00\x00)\x06rN\x00\x00\x00r\x04\x01\x00\x00Z\x03inpZ\x06ext_idZ\x02c1Z\x02c2rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x11set_upload_folder\x86\x04\x00\x00s\x86\x00\x00\x00\x00\x03\x10\x02\x0c\x01\x08\x01\x08\x01\x04\x01\x04\x01\x02\xff\n\x04\x04\x02\x02\x01\x06\x01\x0e\x01\x04\x01\x02\xff\n\x04\x08\x02\x04\x01\x02\xff\n\x05\x04\x01\x0c\x01\x04\x01\x10\x01\x04\x02\x02\x01\x14\x01\x10\x02\x10\x01\x06\x02\x04\x01\x14\x01\x06\x02\x04\x01\x12\x01\x04\x01\x04\x01\x02\xff\n\x04\x06\x02\x04\x01\x02\xff\n\x04\n\x02\n\x01\x04\x01\x02\xff\n\x04\x04\x01\x02\x01\x12\x01\x10\x02\x02\x01\x12\x01\x10\x01\n\x01\x10\x02\x02\x01\x12\x01\x10\x01\x04\x01\x02\xff\n\x04\x18\x01\x04\x01\x02\xff\n\x04r\x0f\x01\x00\x00c\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x0f\x00\x00\x00\xc3\x00\x00\x00s.\x02\x00\x00d\x00}\x03d\x01a\x00|\x03\x90\x02s*t\x00d\x02k\x08r\x1at\x01\x82\x01t\x02\xa0\x03|\x01\xa1\x01}\x04|\x04j\x04}\x03z\n|\x04j\x05a\x06W\x00n\x14\x04\x00t\x07k\nrH\x01\x00\x01\x00\x01\x00Y\x00n\x02X\x00\x90\x01zf|\x03\x90\x01s@|\x04j\x08\x90\x01s@t\t|\x04j\n\x83\x01}\x05|\x05t\t|\x04j\x0b\x83\x01\x14\x00d\x03\x1b\x00}\x06d\x04\xa0\x0cd\x05\xa0\rd\x06d\x07\x84\x00t\x0et\x0f\xa0\x10|\x05d\x08\x1b\x00\xa1\x01\x83\x01D\x00\x83\x01\xa1\x01d\x05\xa0\rd\td\x07\x84\x00t\x0ed\x08t\x0f\xa0\x10|\x05d\x08\x1b\x00\xa1\x01\x18\x00\x83\x01D\x00\x83\x01\xa1\x01|\x04\xa0\x11\xa1\x00\xa1\x03}\x07d\n|\x04j\x05\x9b\x00d\x0b|\x04j\x12\xa0\x13\xa1\x00\x9b\x00d\x0c|\x07\x9b\x00d\rt\x14|\x06\x83\x01\x9b\x00d\x0e|\x04\xa0\x15\xa1\x00\x9b\x00d\x0f|\x04\xa0\x16\xa1\x00\x9b\x00d\x10|\x04\xa0\x17\xa1\x00\x9b\x00d\x11\x9d\x0f}\x08|\x08|\x02k\x03\x90\x01s*|\x06|\x04\xa0\x15\xa1\x00k\x02\x90\x01rX|\x00\xa0\x18|\x08\xa1\x01I\x00d\x00H\x00\x01\x00|\x02}\x08n\x18|\x00\xa0\x18d\x12|\x08\x9b\x00d\x12\x9d\x03\xa1\x01I\x00d\x00H\x00\x01\x00t\x19\xa0\x1ad\x13\xa1\x01I\x00d\x00H\x00\x01\x00t\x1b|\x00|\x01|\x02\x83\x03I\x00d\x00H\x00\x01\x00t\x02\xa0\x03|\x01\xa1\x01}\x04|\x04j\x04}\x03|\x03\x90\x01r\xb0|\x00\xa0\x18d\x12|\x04j\x05\x9b\x00d\x14\x9d\x03\xa1\x01I\x00d\x00H\x00\x01\x00W\x00d\x02S\x00W\x00q\x08\x04\x00t\x1ck\n\x90\x02r&\x01\x00}\t\x01\x00zTd\x15t\x1d|\t\x83\x01k\x06\x90\x02r\x16|\x04j\x1ed\x02d\x16\x8d\x01\x01\x00z\x1e|\x00\xa0\x18d\n|\x04j\x05\x9b\x00d\x17\x9d\x03\xa1\x01I\x00d\x00H\x00\x01\x00W\x00n\x16\x04\x00t\x1ck\n\x90\x02r\x14\x01\x00\x01\x00\x01\x00Y\x00n\x02X\x00W\x005\x00d\x00}\t~\tX\x00Y\x00q\x08X\x00q\x08d\x00S\x00)\x18NFTr\x9e\x00\x00\x00z\x1e`Downloading` | [{0}{1}] `{2}`rT\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00r\xa0\x00\x00\x00rS\x00\x00\x00r\xa1\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xa4\x00\x00\x00\xf6\x04\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00z)check_progress_for_dl.<locals>.<listcomp>r\xa5\x00\x00\x00c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00S\x00\x00\x00s\x10\x00\x00\x00g\x00|\x00]\x08}\x01d\x00\x91\x02q\x04S\x00r\xa6\x00\x00\x00rS\x00\x00\x00r\xa1\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r\xa4\x00\x00\x00\xf8\x04\x00\x00s\x04\x00\x00\x00\x06\x00\x02\x00z\x15`[URI - DOWNLOAD]`\n\n`z\x10`\n`Status` -> **z\x03**\nr\xa8\x00\x00\x00r\xa9\x00\x00\x00r\xaa\x00\x00\x00r\xab\x00\x00\x00r\x9b\x00\x00\x00rp\x00\x00\x00\xe9\x0f\x00\x00\x00z\x1d`\n\nSuccessfully downloaded...z\x0f depth exceededr\x06\x01\x00\x00zM`\n`Status` : **failed**\n`Reason` : Auto cancelled download, URI/Torrent dead.)\x1fri\x00\x00\x00r\x1c\x00\x00\x00r\x1d\x00\x00\x00r\x7f\x00\x00\x00Z\x0bis_completer`\x00\x00\x00r\x80\x00\x00\x00r\xb5\x00\x00\x00Z\rerror_messager\xbc\x00\x00\x00r\x18\x00\x00\x00Z\x0ctotal_lengthr\xc4\x00\x00\x00r\x05\x00\x00\x00r\xc5\x00\x00\x00r\xc6\x00\x00\x00r\xc7\x00\x00\x00Z\x0fprogress_stringr\x92\x00\x00\x00r\xcd\x00\x00\x00r\x19\x00\x00\x00Z\x13total_length_stringZ\x15download_speed_stringZ\neta_stringr2\x00\x00\x00r3\x00\x00\x00r4\x00\x00\x00r~\x00\x00\x00r\x81\x00\x00\x00r0\x00\x00\x00\xda\x06remove)\nrN\x00\x00\x00r}\x00\x00\x00rh\x00\x00\x00r\xd8\x00\x00\x00r\x8f\x00\x00\x00r\xd3\x00\x00\x00r\xd2\x00\x00\x00r\xd6\x00\x00\x00rO\x00\x00\x00r\x96\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00r~\x00\x00\x00\xe2\x04\x00\x00s\\\x00\x00\x00\x00\x01\x04\x03\x04\x01\x06\x01\x08\x01\x04\x02\n\x01\x06\x01\x02\x01\n\x01\x0e\x01\x06\x01\x04\x01\x0e\x01\n\x01\x12\x01\x04\x01\x0c\x01\x0c\xff\x08\x02\x0c\x01\x10\xff\x08\x02\x06\xfb\x04\x07F\xff\x02\n\x18\x01\x10\x01\x06\x02\x18\x01\x10\x01\x12\x01\n\x01\x06\x01\x06\x01\x1a\x02\n\x01\x12\x01\x0e\x01\x0c\x01\x02\x01\x04\x01\x0c\xff\x0e\x06\x10\x01r~\x00\x00\x00rN\x00\x00\x00a\xa2\x04\x00\x00`.gdauth`\nUsage: generate token to enable all cmd google drive service.\nThis only need to run once in life time.\n\n`.gdreset`\nUsage: reset your token if something bad happened or change drive acc.\n\n`.gd`\nUsage: Upload file from local or uri/url/drivelink into google drive.\nfor drivelink it\'s upload only if you want to.\n\n`.gdabort`\nUsage: Abort process uploading or downloading.\n\n`.gdlist`\nUsage: Get list of folders and files with default size 50.\nUse flags `-l range[1-1000]` for limit output.\nUse flags `-p parents-folder_id` for lists given folder in gdrive.\n\n`.gdf mkdir`\nUsage: Create gdrive folder.\n\n`.gdf check`\nUsage: Check file/folder in gdrive.\n\n`.gdf rm`<file/folder>name\nUsage: Delete files/folders in gdrive.\nCan\'t be undone, this method skipping file trash, so be caution...\n\n`.gdfset put`\nUsage: Change upload directory in gdrive.\n\n`.gdfset rm`\nUsage: remove set parentId from cmd\n`.gdfset put` into **G_DRIVE_FOLDER_ID** and if empty upload will go to root.\n\nNOTE:\nfor `.gdlist` you can combine -l and -p flags with or without name at the same time, it must be `-l` flags first before use `-p` flags.\nAnd by default it lists from latest \'modifiedTime\' and then folders.)\x01N)a\xda\x07__doc__r\xc0\x00\x00\x00ry\x00\x00\x00rJ\x00\x00\x00rH\x00\x00\x00r6\x00\x00\x00r3\x00\x00\x00r\xc6\x00\x00\x00r\x82\x00\x00\x00r\xbd\x00\x00\x00r\xb7\x00\x00\x00Z\x07loggingZ+userbot.modules.sql_helper.google_drive_sql\xda\x07modulesZ\nsql_helperZ\x10google_drive_sqlr.\x00\x00\x00Z\x03bs4r\x02\x00\x00\x00Z\x07os.pathr\x03\x00\x00\x00r\x04\x00\x00\x00r\x05\x00\x00\x00Z\tmimetypesr\x07\x00\x00\x00Z\x08telethonr\x08\x00\x00\x00Z\x19google_auth_oauthlib.flowr\t\x00\x00\x00Z\x19googleapiclient.discoveryr\n\x00\x00\x00Z\x16googleapiclient.errorsr\x0b\x00\x00\x00Z\x1egoogle.auth.transport.requestsr\x0c\x00\x00\x00Z\x14googleapiclient.httpr\r\x00\x00\x00r\x0e\x00\x00\x00Z\x07userbotr\x0f\x00\x00\x00r\x10\x00\x00\x00r\x11\x00\x00\x00r\x12\x00\x00\x00r\x13\x00\x00\x00r\x14\x00\x00\x00r\x15\x00\x00\x00r\x16\x00\x00\x00Z\x0euserbot.eventsr\x17\x00\x00\x00Z\ruserbot.utilsr\x18\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00r\x1b\x00\x00\x00Z\x18userbot.utils.exceptionsr\x1c\x00\x00\x00Z\x14userbot.modules.ariar\x1d\x00\x00\x00r\x1e\x00\x00\x00r8\x00\x00\x00r9\x00\x00\x00r:\x00\x00\x00r;\x00\x00\x00Z\x02__\xda\x04infor\\\x00\x00\x00r\xb5\x00\x00\x00r\n\x01\x00\x00r\x0b\x01\x00\x00r0\x00\x00\x00r\x0c\x01\x00\x00Z\x02_1Z\x02_2Z\tgetLoggerZ\x06loggerZ\x08setLevelZ\x05ERRORrU\x00\x00\x00rY\x00\x00\x00rZ\x00\x00\x00r^\x00\x00\x00rb\x00\x00\x00r\x97\x00\x00\x00r\xd9\x00\x00\x00r\xe0\x00\x00\x00r\xb6\x00\x00\x00r\x86\x00\x00\x00r\x84\x00\x00\x00r\x89\x00\x00\x00r\x8a\x00\x00\x00r\xee\x00\x00\x00r\x05\x01\x00\x00r\x07\x01\x00\x00r\x0e\x01\x00\x00r\x0f\x01\x00\x00r~\x00\x00\x00\xda\x06updaterS\x00\x00\x00rS\x00\x00\x00rS\x00\x00\x00rT\x00\x00\x00\xda\x08<module>\x08\x00\x00\x00s\xce\x00\x00\x00\x04\x04\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x08\x02\x18\x02\x0c\x01\x14\x01\x0c\x02\x0c\x02\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x10\x02(\x04\x0c\x01\x18\x01\x0c\x01\x10\x04\x04\x01\x04\x02\x02\x01\x02\xfe\x04\x04\x04\x04\x04\x01\n\x01\n\x01\x04\x01\x02\xff\x04\x02\x04\x01\x02\x01\x12\x01\x10\x01\x02\x01\x12\x01\x10\x01\n\x01\x10\x02\x02\x01\x04\x01\x02\xff\x02\x01\x02\xff\x08\x02\x10\x01\x12\x01\x06\x02\x04\x01\x14\x01\x06\x02\x04\x01\x12\x01\x02\x02\x04\x01\x02\xff\x04\x03\x16\x04\n\x01\x0c\x06\n\x01\nB\x08\x15\n\x01\n\n\x08\x05\x08\x08\nx\x08\x7f\x00V\x08\x12\x08\x07\x08\x16\x08I\x08\x1a\x08\r\x04\x01\x02\xff\x04\x02\nk\n\x01\n\x7f\x00\x13\n\x01\n\x0f\n\x01\n\x7f\x003\n\x01\n[\x08?\x04\x01\x02\x01\x02\xfe'))
| 16,353
| 65,341
| 0.748165
| 14,087
| 65,412
| 3.461063
| 0.106339
| 0.175363
| 0.094881
| 0.068668
| 0.512655
| 0.443125
| 0.369637
| 0.285257
| 0.253015
| 0.215009
| 0
| 0.336546
| 0.011726
| 65,412
| 4
| 65,341
| 16,353
| 0.417666
| 0.000795
| 0
| 0
| 0
| 9.5
| 0.582659
| 0.543368
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
723492ba33c1b129438603a6ea6b609e25092c5f
| 10,411
|
py
|
Python
|
inventory/migrations/0001_initial.py
|
common1/newassetcms
|
65eee3c2ed9dac4cc56bfff863a6cbaff9830d26
|
[
"MIT"
] | null | null | null |
inventory/migrations/0001_initial.py
|
common1/newassetcms
|
65eee3c2ed9dac4cc56bfff863a6cbaff9830d26
|
[
"MIT"
] | 7
|
2020-06-05T20:43:46.000Z
|
2022-01-13T01:14:21.000Z
|
inventory/migrations/0001_initial.py
|
common1/newassetcms
|
65eee3c2ed9dac4cc56bfff863a6cbaff9830d26
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.8 on 2019-05-27 22:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_userforeignkey.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=64, unique=True)),
('code', models.CharField(blank=True, max_length=12)),
],
options={
'verbose_name_plural': 'Assets',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='AssetType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('shortcut', models.CharField(max_length=12, unique=True)),
('name', models.CharField(max_length=64, unique=True)),
('info', models.TextField(blank=True)),
('created_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventory_assettype_related', related_query_name='inventory_assettypes', to=settings.AUTH_USER_MODEL)),
('last_modified_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventorys_assettype_related', related_query_name='inventory_assettypes', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Asset Types',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='LoanedAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('pickup_date', models.DateField(blank=True, null=True)),
('pickup_time', models.TimeField(blank=True, null=True)),
('info', models.TextField(blank=True)),
('active', models.BooleanField(default=False)),
('created_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventory_loanedasset_related', related_query_name='inventory_loanedassets', to=settings.AUTH_USER_MODEL)),
('last_modified_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventorys_loanedasset_related', related_query_name='inventory_loanedassets', to=settings.AUTH_USER_MODEL)),
('receiver_out', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='loanedasset_receiver_out', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Loaned Assets',
},
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('name', models.TextField()),
('active', models.BooleanField(default=True)),
('title', models.CharField(blank=True, max_length=128, null=True)),
('start_date', models.DateField(blank=True, null=True)),
('start_time', models.TimeField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('end_time', models.TimeField(blank=True, null=True)),
('consumer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reservation_consumer', to=settings.AUTH_USER_MODEL)),
('created_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventory_reservation_related', related_query_name='inventory_reservations', to=settings.AUTH_USER_MODEL)),
('last_modified_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventorys_reservation_related', related_query_name='inventory_reservations', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Reservations',
'ordering': ('-start_date',),
},
),
migrations.CreateModel(
name='ReservedAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('name', models.TextField()),
('info', models.TextField(blank=True)),
('active', models.BooleanField(default=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.Asset')),
('created_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventory_reservedasset_related', related_query_name='inventory_reservedassets', to=settings.AUTH_USER_MODEL)),
('last_modified_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventorys_reservedasset_related', related_query_name='inventory_reservedassets', to=settings.AUTH_USER_MODEL)),
('reservation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.Reservation')),
],
options={
'verbose_name_plural': 'Reserved Assets',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='ReturnedAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_modified_at', models.DateTimeField(auto_now=True)),
('deliver_date', models.DateField(blank=True, null=True)),
('deliver_time', models.TimeField(blank=True, null=True)),
('info', models.TextField(blank=True)),
('active', models.BooleanField(default=False)),
('created_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventory_returnedasset_related', related_query_name='inventory_returnedassets', to=settings.AUTH_USER_MODEL)),
('last_modified_by', django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventorys_returnedasset_related', related_query_name='inventory_returnedassets', to=settings.AUTH_USER_MODEL)),
('loanedasset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='inventory.LoanedAsset')),
('receiver_in', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='returnedasset_receiver_in', to=settings.AUTH_USER_MODEL)),
('supplier_in', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='returnedasset_supplier_in', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Returned Assets',
},
),
migrations.AddField(
model_name='loanedasset',
name='reservedasset',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='inventory.ReservedAsset'),
),
migrations.AddField(
model_name='loanedasset',
name='supplier_out',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='loanedasset_supplier_out', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='asset',
name='assettype',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='asset_type', to='inventory.AssetType'),
),
migrations.AddField(
model_name='asset',
name='created_by',
field=django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventory_asset_related', related_query_name='inventory_assets', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='asset',
name='last_modified_by',
field=django_userforeignkey.models.fields.UserForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inventorys_asset_related', related_query_name='inventory_assets', to=settings.AUTH_USER_MODEL),
),
]
| 67.603896
| 299
| 0.658726
| 1,118
| 10,411
| 5.884615
| 0.101073
| 0.042408
| 0.048944
| 0.076911
| 0.865785
| 0.849825
| 0.79921
| 0.766378
| 0.766378
| 0.71485
| 0
| 0.00317
| 0.212275
| 10,411
| 153
| 300
| 68.045752
| 0.799049
| 0.004322
| 0
| 0.534247
| 1
| 0
| 0.170108
| 0.066866
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027397
| 0
| 0.054795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a0d6f0683b2ef6c0c1059b0213f13a62e8f01e67
| 12,423
|
py
|
Python
|
tests/test_operators.py
|
NJDFan/fxpmath
|
a4d67e421c351c3901d62e22c60a5c81d427811d
|
[
"MIT"
] | 97
|
2020-06-08T13:09:04.000Z
|
2022-03-30T23:15:56.000Z
|
tests/test_operators.py
|
NJDFan/fxpmath
|
a4d67e421c351c3901d62e22c60a5c81d427811d
|
[
"MIT"
] | 48
|
2020-06-08T15:12:20.000Z
|
2022-03-10T13:40:29.000Z
|
tests/test_operators.py
|
NJDFan/fxpmath
|
a4d67e421c351c3901d62e22c60a5c81d427811d
|
[
"MIT"
] | 22
|
2020-05-20T15:30:08.000Z
|
2022-03-04T23:46:13.000Z
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import fxpmath as fxp
from fxpmath.objects import Fxp
from fxpmath import utils
import numpy as np
def test_shift_bitwise():
# integer val
x = Fxp(32, True, 8, 0)
# left
assert (x << 1)() == 64
assert (x << 2)() == 128
assert (x << 2).n_word == 9
assert (x << 3)() == 256
assert (x << 10)() == 32*(2**10)
# right
assert (x >> 1)() == 16
assert (x >> 2)() == 8
assert (x >> 3)() == 4
assert (x >> 5)() == 1
assert (x >> 6)() == 0.5
# float val
x = Fxp(24.25, True, 8, 2)
#left
assert (x << 1)() == 48.5
assert (x << 4)() == 388.0
#right
x = Fxp(24.5, True, 8, 2)
assert (x >> 1)() == 12.25
assert (x >> 2)() == 6.125
# negative
x = Fxp(-24.25, True, 8, 2)
#left
assert (x << 1)() == -48.5
assert (x << 4)() == -388.0
#right
x = Fxp(-24.5, True, 8, 2)
assert (x >> 1)() == -12.25
assert (x >> 2)() == -6.125
# trunc shift
# left
x = Fxp(32, True, 8, 0, shifting='trunc')
assert (x << 1)() == 64
assert (x << 2)() == x.upper
# right
assert (x >> 3)() == 4
assert (x >> 5)() == 1
assert (x >> 6)() == 0
# unsigned
x = Fxp(32, False, 8, 0)
# left
assert (x << 1)() == 64
assert (x << 2)() == 128
assert (x << 3)() == 256
assert (x << 3).n_word == 9
assert (x << 10)() == 32*(2**10)
# right
assert (x >> 1)() == 16
assert (x >> 2)() == 8
assert (x >> 3)() == 4
assert (x >> 5)() == 1
assert (x >> 6)() == 0.5
# float val
x = Fxp(24.25, False, 8, 2)
#left
assert (x << 1)() == 48.5
assert (x << 4)() == 388.0
#right
x = Fxp(24.5, False, 8, 2)
assert (x >> 1)() == 12.25
assert (x >> 2)() == 6.125
# trunc left shift
x = Fxp(64, False, 8, 0, shifting='trunc')
assert (x << 1)() == 128
assert (x << 2)() == x.upper
def test_invert():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
x('0b 0010 1100')
y = ~x
assert y.bin() == '11010011'
x('0b0000 0000')
assert (~x).bin() == '11111111'
xu('0b0000 0000')
assert (~xu).bin() == '11111111'
x('0b 1111 1111')
assert (~x).bin() == '00000000'
xu('0b 1111 1111')
assert (~xu).bin() == '00000000'
x('0b 1000 0000')
assert (~x).bin() == '01111111'
xu('0b 1000 0000')
assert (~xu).bin() == '01111111'
x = Fxp(None, True, 32, 0)
xu = Fxp(None, False, 32, 0)
val_str = '10100000111101011100001100110101'
inv_str = '01011111000010100011110011001010'
x('0b'+val_str)
assert (~x).bin() == inv_str
xu('0b'+val_str)
assert (~xu).bin() == inv_str
def test_and():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
y = Fxp(None, True, 8, 4)
yu = Fxp(None, False, 8, 4)
val_str = '00110101'
mks_str = '11110000'
and_str = '00110000'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x & y).bin() == and_str
assert (x & yu).bin() == and_str
assert (xu & y).bin() == and_str
assert (xu & yu).bin() == and_str
assert (x & utils.str2num('0b'+mks_str)).bin() == and_str
assert (xu & utils.str2num('0b'+mks_str)).bin() == and_str
assert (utils.str2num('0b'+mks_str) & x).bin() == and_str
assert (utils.str2num('0b'+mks_str) & xu).bin() == and_str
val_str = '10101100'
mks_str = '11001100'
and_str = '10001100'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x & y).bin() == and_str
assert (x & yu).bin() == and_str
assert (xu & y).bin() == and_str
assert (xu & yu).bin() == and_str
assert (x & utils.str2num('0b'+mks_str)).bin() == and_str
assert (xu & utils.str2num('0b'+mks_str)).bin() == and_str
assert (utils.str2num('0b'+mks_str) & x).bin() == and_str
assert (utils.str2num('0b'+mks_str) & xu).bin() == and_str
def test_or():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
y = Fxp(None, True, 8, 4)
yu = Fxp(None, False, 8, 4)
val_str = '00110101'
mks_str = '11110000'
or_str = '11110101'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x | y).bin() == or_str
assert (x | yu).bin() == or_str
assert (xu | y).bin() == or_str
assert (xu | yu).bin() == or_str
assert (x | utils.str2num('0b'+mks_str)).bin() == or_str
assert (xu | utils.str2num('0b'+mks_str)).bin() == or_str
assert (utils.str2num('0b'+mks_str) | x).bin() == or_str
assert (utils.str2num('0b'+mks_str) | xu).bin() == or_str
val_str = '10101100'
mks_str = '11001100'
or_str = '11101100'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x | y).bin() == or_str
assert (x | yu).bin() == or_str
assert (xu | y).bin() == or_str
assert (xu | yu).bin() == or_str
assert (x | utils.str2num('0b'+mks_str)).bin() == or_str
assert (xu | utils.str2num('0b'+mks_str)).bin() == or_str
assert (utils.str2num('0b'+mks_str) | x).bin() == or_str
assert (utils.str2num('0b'+mks_str) | xu).bin() == or_str
def test_xor():
x = Fxp(None, True, 8, 4)
xu = Fxp(None, False, 8, 4)
y = Fxp(None, True, 8, 4)
yu = Fxp(None, False, 8, 4)
val_str = '00110101'
mks_str = '11110000'
xor_str = '11000101'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x ^ y).bin() == xor_str
assert (x ^ yu).bin() == xor_str
assert (xu ^ y).bin() == xor_str
assert (xu ^ yu).bin() == xor_str
assert (x ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (xu ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ x).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ xu).bin() == xor_str
val_str = '10101100'
mks_str = '11001100'
xor_str = '01100000'
x('0b'+val_str)
xu('0b'+val_str)
y('0b'+mks_str)
yu('0b'+mks_str)
assert (x ^ y).bin() == xor_str
assert (x ^ yu).bin() == xor_str
assert (xu ^ y).bin() == xor_str
assert (xu ^ yu).bin() == xor_str
assert (x ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (xu ^ utils.str2num('0b'+mks_str)).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ x).bin() == xor_str
assert (utils.str2num('0b'+mks_str) ^ xu).bin() == xor_str
def test_arrays():
x = Fxp(None, True, 8, 4)
y = Fxp(None, True, 8, 4)
x(['0b00110101', '0b10101100'])
y('0b11110000')
z = x & y
assert z.bin()[0] == '00110000'
assert z.bin()[1] == '10100000'
def test_operations_with_combinations():
v = [-256, -64, -16, -4.75, -3.75, -3.25, -1, -0.75, -0.125, 0.0, 0.125, 0.75, 1, 1.5, 3.75, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(v)):
vx, vy = v[i], v[j]
x = Fxp(vx)
y = Fxp(vy)
assert (vx + vy) == (x + y)()
assert (vy + vx) == (y + x)()
assert (vx - vy) == (x - y)()
assert -(vy - vx) == -(y - x)()
assert (vx * vy) == (x * y)()
assert (vy * vx) == (y * x)()
v = [-256, -64, -16, -4.75, -4.25, -1, -0.75, -0.125, 0.125, 0.75, 1, 1.5, 2.75, 4.0, 8.0, 32, 128]
d = [-256, -64, -16, -1, -0.5, -0.125, 0.125, 0.5, 1, 2, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(d)):
vx, vy = v[i], d[j]
x = Fxp(vx)
y = Fxp(vy)
assert (vx / vy) == (x / y)()
assert (vx // vy) == (x // y)()
assert (vx % vy) == (x % y)()
def test_operations_with_constants_with_combinations():
v = [-256, -64, -16, -4.75, -3.75, -3.25, -1, -0.75, -0.125, 0.0, 0.125, 0.75, 1, 1.5, 3.75, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(v)):
vx, vy = v[i], v[j]
x = Fxp(vx, True, 16, 3)
y = Fxp(vy, True, 16, 3)
assert (x + vy)() == (vx + vy) == (vx + y)() == (x + y)()
assert (vy + x)() == (vy + vx) == (y + vx)() == (y + x)()
assert (x - vy)() == (vx - vy) == (vx - y)() == (x - y)()
assert -(vy - x)() == -(vy - vx) == -(y - vx)() == -(y - x)()
for i in range(len(v)):
for j in range(len(v)):
vx, vy = v[i], v[j]
x = Fxp(vx, True, 24, 6)
y = Fxp(vy, True, 24, 6)
assert (x * vy)() == (vx * vy) == (vx * y)() == (x * y)()
assert (vy * x)() == (vy * vx) == (y * vx)() == (y * x)()
v = [-256, -64, -16, -4.75, -4.25, -1, -0.75, -0.125, 0.125, 0.75, 1, 1.5, 2.75, 4.0, 8.0, 32, 128]
d = [-256, -64, -16, -1, -0.5, -0.125, 0.125, 0.5, 1, 2, 4.0, 8.0, 32, 128]
for i in range(len(v)):
for j in range(len(d)):
vx, vy = v[i], d[j]
x = Fxp(vx, True, 32, 12)
y = Fxp(vy, True, 32, 12)
assert (x / vy)() == (vx / vy) == (vx / y)() == (x / y)()
# assert (vy / x)() == (vy / vx) == (y / vx)() == (y / x)()
assert (x // vy)() == (vx // vy) == (vx // y)() == (x // y)()
# assert (vy // x)() == (vy // vx) == (y // vx)() == (y // x)()
assert (x % vy)() == (vx % vy) == (vx % y)() == (x % y)()
# assert (vy % x)() == (vy % vx) == (y % vx)() == (y % x)()
def test_pow():
x = Fxp(16, True, n_int=14, n_frac=8)
n = Fxp(-1, True, n_int=14, n_frac=8)
assert(x**n)() == 1/16
v = 15
n_vals = [0, 1, 2, 3]
x = Fxp(v, signed=True, n_int=12, n_frac=0)
xu = Fxp(v, signed=False, n_int=12, n_frac=0)
for n in n_vals:
assert (x**n)() == v**n
assert (xu**n)() == v**n
v = -16
x = Fxp(v, signed=True, n_int=12, n_frac=0)
for n in n_vals:
assert (x**n)() == v**n
v = 16.0
n_vals = [-2, -1, 0, 1, 2, 3]
x = Fxp(v, signed=True, n_int=14, n_frac=8)
# xu = Fxp(v, signed=False, n_int=12, n_frac=0)
for n in n_vals:
assert (x**n)() == v**n
# assert (xu**n)() == v**n
v = -16.0
x = Fxp(v, signed=True, n_int=14, n_frac=8)
for n in n_vals:
assert (x**n)() == (v)**n
v = 81
n_vals = [0, 0.25, 0.5]
x = Fxp(v, signed=True, n_int=14, n_frac=8)
xu = Fxp(v, signed=False, n_int=14, n_frac=8)
for n in n_vals:
assert (x**n)() == v**n
assert (xu**n)() == v**n
v = 16.
n = 2
v_vals = [-4, -2, -1, 0, 1, 2, 4]
n_vals = [-2, -1, 0, 1, 2]
x = Fxp(v, signed=True, n_int=12, n_frac=0)
xu = Fxp(v, signed=False, n_int=12, n_frac=0)
p = Fxp(n, signed=True, n_int=8, n_frac=0)
assert ((x**p)() == np.power(v, n)).all()
assert ((xu**p)() == np.power(v, n)).all()
x = Fxp(v, signed=True, n_int=12, n_frac=8)
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
x.config.op_sizing = 'same'
assert ((x**p_vals)() == np.power(v, n_vals)).all()
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
assert ((x**p_vals)() == np.power(v, n_vals)).all()
x_vals = Fxp(v_vals, signed=True, n_int=12, n_frac=8)
p = Fxp(n, signed=True, n_int=8, n_frac=0)
x_vals.config.op_sizing = 'same'
assert ((x_vals**p)() == np.power(v_vals, n)).all()
p = Fxp(n, signed=True, n_int=8, n_frac=2)
assert ((x_vals**p)() == np.power(v_vals, n)).all()
v_vals = [-1, 1, 2, 3, 4]
n_vals = [-2, -1, 0, 1, 2]
x_vals = Fxp(v_vals, signed=True, n_int=12, n_frac=8)
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
x_vals.config.op_sizing = 'same'
assert ((x_vals**p_vals)() == np.array([vi**ni for vi, ni in zip(v_vals, n_vals)])).all()
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=2)
assert ((x_vals**p_vals)() == np.array([vi**ni for vi, ni in zip(v_vals, n_vals)])).all()
v_vals = [[1, 2],[3, 4]]
n_vals = [[1, 2],[3, 4]]
x_vals = Fxp(v_vals, signed=True, n_int=12, n_frac=8)
p_vals = Fxp(n_vals, signed=True, n_int=8, n_frac=0)
x_vals.config.op_sizing = 'same'
assert ((x_vals**p_vals)() == np.power(v_vals, n_vals)).all()
def test_scaled():
x = Fxp(10.5, True, 16, 8, scale=2, bias=1)
assert x() == 10.5
assert x + 2 == 12.5
assert x - 2.5 == 8.0
assert x * 3 == 31.5
assert x / 2 == 5.25
| 29.368794
| 115
| 0.48539
| 2,132
| 12,423
| 2.712008
| 0.064728
| 0.104116
| 0.04981
| 0.070564
| 0.81477
| 0.797475
| 0.777932
| 0.746973
| 0.744206
| 0.740401
| 0
| 0.123251
| 0.286807
| 12,423
| 422
| 116
| 29.438389
| 0.529345
| 0.031152
| 0
| 0.615625
| 0
| 0
| 0.043294
| 0.005328
| 0
| 0
| 0
| 0
| 0.43125
| 1
| 0.03125
| false
| 0
| 0.01875
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
19e7540196ad90163feb7018771ccb4ab2bcb873
| 10,175
|
py
|
Python
|
cpm_kernels/kernels/arith.py
|
Achazwl/cpm_kernels
|
926d06461ad460dc8e80a66239328739eed16618
|
[
"Apache-2.0"
] | 1
|
2022-03-04T11:04:09.000Z
|
2022-03-04T11:04:09.000Z
|
cpm_kernels/kernels/arith.py
|
Achazwl/cpm_kernels
|
926d06461ad460dc8e80a66239328739eed16618
|
[
"Apache-2.0"
] | 1
|
2022-03-07T03:45:00.000Z
|
2022-03-19T06:16:37.000Z
|
cpm_kernels/kernels/arith.py
|
Achazwl/cpm_kernels
|
926d06461ad460dc8e80a66239328739eed16618
|
[
"Apache-2.0"
] | 1
|
2022-03-04T16:52:08.000Z
|
2022-03-04T16:52:08.000Z
|
from .base import Kernel, DevicePointer, CUDAStream, round_up
import ctypes
arith_kernel = Kernel(
"arith",
[
"cu_arith_global_scale",
"cu_arith_element_add",
"cu_arith_element_mul",
"cu_arith_batch_add_forward",
"cu_arith_batch_add_backward",
"cu_arith_ln_mul_add",
"cu_arith_ln_add",
"cu_arith_ln_mul",
"cu_arith_ln_div",
"cu_arith_ln_sub_div",
"cu_arith_ln_mul_backward",
"cu_arith_ln_add_backward",
"cu_arith_batch_mul_add",
"cu_arith_batch_mul"
]
)
def arith_global_scale(
n : int,
inp : DevicePointer, # (n,) fp16
scale : float,
out : DevicePointer, # (n,) fp16
stream : CUDAStream
):
threads = min(round_up(n, 32), 1024)
gridDim = (round_up(n, threads) // threads, 1, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_global_scale(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(n),
ctypes.c_void_p(inp),
ctypes.c_float(scale),
ctypes.c_void_p(out)
]
)
def arith_element_add(
batch : int, n : int,
x : DevicePointer, # (batch, n) fp16
y : DevicePointer, # (batch, n) fp16
out : DevicePointer, # (batch, n) fp16
stream : CUDAStream
):
"""
out = x + y
"""
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_element_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(y),
ctypes.c_void_p(out)
]
)
def arith_element_mul(
batch : int, n : int,
x : DevicePointer, # (batch, n) fp16
y : DevicePointer, # (batch, n) fp16
out : DevicePointer, # (batch, n) fp16
stream : CUDAStream
):
"""
out = x * y
"""
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_element_mul(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(y),
ctypes.c_void_p(out)
]
)
def arith_batch_add_forward(
batch : int, n : int,
x : DevicePointer, # (batch, n) fp16
y : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n) fp16
stream : CUDAStream
):
"""
out = x + y[None, :]
"""
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_batch_add_forward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(y),
ctypes.c_void_p(out)
]
)
def arith_batch_add_backward(
batch : int, n : int,
grad_out : DevicePointer, # (batch, n) fp16
grad : DevicePointer, # (n) fp16
stream : CUDAStream
):
gridDim = ( round_up(n, 32) // 32, 1, 1 )
blockDim = (32, 32, 1)
arith_kernel.cu_arith_batch_add_backward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(grad_out),
ctypes.c_void_p(grad)
]
)
def arith_ln_mul_add(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
beta : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x * alpha[None, :, None] + beta[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_mul_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_ln_add(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
beta : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x + beta[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_ln_mul(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x * alpha[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_mul(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(out)
]
)
def arith_ln_div(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = x / alpha[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_div(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(out)
]
)
def arith_ln_sub_div(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
alpha : DevicePointer, # (n) fp16
beta : DevicePointer, # (n) fp16
out : DevicePointer, # (batch, n, m) fp16
stream : CUDAStream
):
"""
out = (x - beta[None, :, None]) / alpha[None, :, None]
"""
assert m % 2 == 0
m = m // 2
threads = min(round_up(m, 32), 1024)
gridDim = (batch, n, round_up(m, threads) // threads)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_ln_sub_div(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(alpha),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_ln_mul_backward(
batch : int, n : int, m : int,
inp : DevicePointer, # (batch, n, m) fp16
grad_out : DevicePointer, # (batch, n, m) fp16
grad : DevicePointer, # (n) fp16
stream : CUDAStream
):
gridDim = (n, 1, 1)
blockDim = (32, 32, 1)
arith_kernel.cu_arith_ln_mul_backward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(inp),
ctypes.c_void_p(grad_out),
ctypes.c_void_p(grad)
]
)
def arith_ln_add_backward(
batch : int, n : int, m : int,
grad_out : DevicePointer, # (batch, n, m) fp16
grad : DevicePointer, # (n) fp16
stream : CUDAStream
):
gridDim = (n, 1, 1)
blockDim = (32, 32, 1)
arith_kernel.cu_arith_ln_add_backward(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_int64(m),
ctypes.c_void_p(grad_out),
ctypes.c_void_p(grad)
]
)
def arith_batch_mul_add(
batch : int, n : int,
x : DevicePointer, # (batch, n)
alpha : DevicePointer, # (n)
beta : DevicePointer, # (n)
out : DevicePointer, # (batch, n)
stream : CUDAStream
):
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_batch_mul_add(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(alpha),
ctypes.c_void_p(beta),
ctypes.c_void_p(out)
]
)
def arith_batch_mul(
batch : int, n : int,
x : DevicePointer, # (batch, n)
alpha : DevicePointer, # (n)
out : DevicePointer, # (batch, n)
stream : CUDAStream
):
assert n % 2 == 0
n = n // 2
threads = min(round_up(n, 32), 1024)
gridDim = (batch, round_up(n, threads) // threads, 1)
blockDim = (threads, 1, 1)
arith_kernel.cu_arith_batch_mul(
gridDim, blockDim, 0, stream, [
ctypes.c_int64(batch),
ctypes.c_int64(n),
ctypes.c_void_p(x),
ctypes.c_void_p(alpha),
ctypes.c_void_p(out)
]
)
| 28.661972
| 61
| 0.52226
| 1,289
| 10,175
| 3.892164
| 0.039566
| 0.107435
| 0.092087
| 0.100458
| 0.940203
| 0.904923
| 0.889376
| 0.874427
| 0.845127
| 0.835559
| 0
| 0.045702
| 0.350565
| 10,175
| 354
| 62
| 28.742938
| 0.713529
| 0.087469
| 0
| 0.737179
| 0
| 0
| 0.031791
| 0.015786
| 0
| 0
| 0
| 0
| 0.032051
| 1
| 0.044872
| false
| 0
| 0.00641
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df9d237726d5d359f7bdb462a59f3b344116b9ca
| 212
|
py
|
Python
|
sisvac_appointments/services/__init__.py
|
opticrd/sisvac-odoo-modules
|
8ca71ec8f116a04416d62a780acc1ff5784acd3a
|
[
"MIT"
] | 3
|
2021-03-16T17:14:25.000Z
|
2021-08-15T17:40:04.000Z
|
sisvac_appointments/services/__init__.py
|
opticrd/sisvac-odoo-modules
|
8ca71ec8f116a04416d62a780acc1ff5784acd3a
|
[
"MIT"
] | 3
|
2021-03-19T01:37:40.000Z
|
2021-04-14T12:27:20.000Z
|
sisvac_appointments/services/__init__.py
|
opticrd/sisvac-odoo-modules
|
8ca71ec8f116a04416d62a780acc1ff5784acd3a
|
[
"MIT"
] | null | null | null |
from . import common
from . import appointments_service
from . import application_services
from . import symptom_services
from . import consent_services
from . import location_services
from . import lot_services
| 26.5
| 34
| 0.834906
| 27
| 212
| 6.333333
| 0.407407
| 0.409357
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 212
| 7
| 35
| 30.285714
| 0.929348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
261166e77197b46a8c6f5cd00a6a1e19768d8823
| 4,015
|
py
|
Python
|
flaskie/api/v1/tests/test_logout.py
|
asheuh/flaskie
|
290fd2a6602abdf3b11434e2a72a3428acc0c0f4
|
[
"MIT"
] | 7
|
2018-06-20T19:06:05.000Z
|
2019-11-03T02:23:20.000Z
|
flaskie/api/v1/tests/test_logout.py
|
asheux/flaskie
|
290fd2a6602abdf3b11434e2a72a3428acc0c0f4
|
[
"MIT"
] | 23
|
2018-07-09T13:00:22.000Z
|
2018-08-04T10:48:42.000Z
|
flaskie/api/v1/tests/test_logout.py
|
asheux/flaskie
|
290fd2a6602abdf3b11434e2a72a3428acc0c0f4
|
[
"MIT"
] | 1
|
2018-09-22T15:39:20.000Z
|
2018-09-22T15:39:20.000Z
|
import json
from .base_test import BaseTestCase
class TestLogout(BaseTestCase):
def test_logout(self):
with self.client:
response_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
username='paulla',
password='mermaid'
)),
content_type='application/json'
)
response_data = json.loads(response_login.data.decode())
self.assertTrue(response_data['status'] == 'success')
self.assertTrue(response_data['message'] == 'Successfully logged in as Paulla Mboya')
self.assertTrue(response_data['Authorization']['access_token'])
self.assertEqual(response_login.status_code, 201)
# valid logout
response = self.client.post(
'/api/v1/auth/logout_access',
headers=dict(
Authorization='Bearer ' + json.loads(
response_login.data.decode()
)['Authorization']['access_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Access token has been revoked, you are now logged out')
self.assertEqual(response.status_code, 200)
def test_logout_refresh(self):
with self.client:
response_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
username='paulla',
password='mermaid'
)),
content_type='application/json'
)
response_data = json.loads(response_login.data.decode())
self.assertTrue(response_data['status'] == 'success')
self.assertTrue(response_data['message'] == 'Successfully logged in as Paulla Mboya')
self.assertTrue(response_data['Authorization']['access_token'])
self.assertEqual(response_login.status_code, 201)
# valid logout
response = self.client.post(
'/api/v1/auth/logout_refresh',
headers=dict(
Authorization='Bearer ' + json.loads(
response_login.data.decode()
)['Authorization']['refresh_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'Refresh token has been revoked')
self.assertEqual(response.status_code, 200)
def test_token_refresh(self):
with self.client:
response_login = self.client.post(
'/api/v1/auth/login',
data=json.dumps(dict(
username='paulla',
password='mermaid'
)),
content_type='application/json'
)
response_data = json.loads(response_login.data.decode())
self.assertTrue(response_data['status'] == 'success')
self.assertTrue(response_data['message'] == 'Successfully logged in as Paulla Mboya')
self.assertTrue(response_data['Authorization']['access_token'])
self.assertEqual(response_login.status_code, 201)
# valid logout
response = self.client.post(
'/api/v1/auth/refresh_token',
headers=dict(
Authorization='Bearer ' + json.loads(
response_login.data.decode()
)['Authorization']['refresh_token']
)
)
data = json.loads(response.data.decode())
self.assertTrue(data['status'] == 'success')
self.assertTrue(data['message'] == 'token refreshed successfully')
self.assertEqual(response.status_code, 201)
| 43.172043
| 103
| 0.539726
| 366
| 4,015
| 5.789617
| 0.161202
| 0.084946
| 0.072204
| 0.110429
| 0.896177
| 0.880604
| 0.880604
| 0.880604
| 0.840019
| 0.840019
| 0
| 0.009143
| 0.346202
| 4,015
| 93
| 104
| 43.172043
| 0.798095
| 0.009465
| 0
| 0.72619
| 0
| 0
| 0.185707
| 0.019879
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.035714
| false
| 0.035714
| 0.02381
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2622545d6da9d4e0b483916a9e3466b892e652a8
| 2,299
|
py
|
Python
|
tests/unit/butterfree/transform/transformations/user_defined_functions/conftest.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | 208
|
2020-07-17T18:46:10.000Z
|
2022-03-21T12:44:12.000Z
|
tests/unit/butterfree/transform/transformations/user_defined_functions/conftest.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | 124
|
2020-07-17T19:42:47.000Z
|
2021-07-21T00:38:05.000Z
|
tests/unit/butterfree/transform/transformations/user_defined_functions/conftest.py
|
fossabot/butterfree
|
8a7da8c540b51c6560b2825cb926c40a351f202b
|
[
"Apache-2.0"
] | 30
|
2020-07-17T20:24:09.000Z
|
2022-03-17T00:50:37.000Z
|
from pytest import fixture
@fixture
def feature_set_dataframe(spark_context, spark_session):
data = [
{"id": 1, "feature1": 100},
{"id": 1, "feature1": 100},
{"id": 1, "feature1": 200},
{"id": 1, "feature1": 200},
{"id": 1, "feature1": 200},
{"id": 1, "feature1": 300},
{"id": 1, "feature1": 300},
{"id": 1, "feature1": 300},
{"id": 1, "feature1": 300},
{"id": 1, "feature1": 300},
{"id": 2, "feature1": 100},
{"id": 2, "feature1": 100},
{"id": 2, "feature1": 200},
{"id": 2, "feature1": 200},
{"id": 2, "feature1": 200},
{"id": 2, "feature1": 300},
{"id": 2, "feature1": 300},
{"id": 2, "feature1": 300},
{"id": 2, "feature1": 300},
{"id": 2, "feature1": 300},
]
return spark_session.read.json(spark_context.parallelize(data, 1))
@fixture
def feature_set_custom_dataframe(spark_context, spark_session):
data = [
{"id": 1, "feature1": "abc"},
{"id": 1, "feature1": "abc"},
{"id": 1, "feature1": "abc"},
{"id": 1, "feature1": "def"},
{"id": 1, "feature1": "def"},
{"id": 2, "feature1": "def"},
{"id": 2, "feature1": "def"},
{"id": 2, "feature1": "def"},
{"id": 2, "feature1": "abc"},
{"id": 2, "feature1": "abc"},
]
return spark_session.read.json(spark_context.parallelize(data, 1))
@fixture
def mode_target_df(spark_context, spark_session):
data = [
{"id": 1, "mode(feature1)": "300"},
{"id": 2, "mode(feature1)": "300"},
]
return spark_session.read.json(spark_context.parallelize(data, 1))
@fixture
def most_frequent_set_target_df(spark_context, spark_session):
data = [
{"id": 1, "most_frequent_set(feature1)": ["300", "200", "100"]},
{"id": 2, "most_frequent_set(feature1)": ["300", "200", "100"]},
]
return spark_session.read.json(spark_context.parallelize(data, 1))
@fixture
def most_frequent_set_str_target_df(spark_context, spark_session):
data = [
{"id": 1, "most_frequent_set(feature1)": ["abc", "def"]},
{"id": 2, "most_frequent_set(feature1)": ["def", "abc"]},
]
return spark_session.read.json(spark_context.parallelize(data, 1))
| 31.493151
| 72
| 0.531535
| 274
| 2,299
| 4.29927
| 0.113139
| 0.04584
| 0.140068
| 0.071307
| 0.905773
| 0.900679
| 0.876061
| 0.804754
| 0.804754
| 0.699491
| 0
| 0.092956
| 0.246629
| 2,299
| 72
| 73
| 31.930556
| 0.587182
| 0
| 0
| 0.725806
| 0
| 0
| 0.223575
| 0.046977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.016129
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
26b40eaaa82b9f2ed4027bee64eeace23b2d04e5
| 174
|
py
|
Python
|
tests/test_punters_client.py
|
predictive-punter/punters_client
|
87fa0ba29f716937ae2a602b6946662f79b3452d
|
[
"MIT"
] | 4
|
2019-11-03T06:07:53.000Z
|
2021-04-20T17:33:24.000Z
|
tests/test_punters_client.py
|
justjasongreen/punters_client
|
87fa0ba29f716937ae2a602b6946662f79b3452d
|
[
"MIT"
] | 50
|
2016-07-20T05:14:40.000Z
|
2016-07-27T07:12:12.000Z
|
tests/test_punters_client.py
|
predictive-punter/punters_client
|
87fa0ba29f716937ae2a602b6946662f79b3452d
|
[
"MIT"
] | 5
|
2016-12-15T06:04:46.000Z
|
2020-09-15T07:02:58.000Z
|
import punters_client
def test_version():
"""punters_client.__version__ should return the correct version string"""
assert punters_client.__version__ == '1.0.0b8'
| 21.75
| 77
| 0.752874
| 22
| 174
| 5.409091
| 0.681818
| 0.327731
| 0.336134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.149425
| 174
| 7
| 78
| 24.857143
| 0.777027
| 0.385057
| 0
| 0
| 0
| 0
| 0.069307
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f803ecd26c75201d8b091079b451b29c2f847fca
| 5,372
|
py
|
Python
|
src/go/outcome.py
|
sadakatsu/strawman
|
b374f2bb6268ebe9aa25da8578fb0f0f25af5b87
|
[
"MIT"
] | null | null | null |
src/go/outcome.py
|
sadakatsu/strawman
|
b374f2bb6268ebe9aa25da8578fb0f0f25af5b87
|
[
"MIT"
] | null | null | null |
src/go/outcome.py
|
sadakatsu/strawman
|
b374f2bb6268ebe9aa25da8578fb0f0f25af5b87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from enum import auto, Enum
from math import isclose
from .color import Color
class Outcome:
@property
def over(self) -> bool:
raise NotImplementedError()
@property
def black_points_on_board(self) -> int:
raise NotImplementedError()
@property
def black_score(self) -> float:
raise NotImplementedError()
@property
def white_points_on_board(self) -> int:
raise NotImplementedError()
@property
def white_score(self) -> float:
raise NotImplementedError()
@property
def margin(self) -> float:
raise NotImplementedError()
@property
def winner(self) -> Color:
raise NotImplementedError()
class CompleteButNotScored(Outcome, Enum):
INSTANCE = auto()
@property
def over(self) -> bool:
return True
@property
def black_points_on_board(self) -> int:
return None
@property
def black_score(self) -> float:
return None
@property
def white_points_on_board(self) -> int:
return None
@property
def white_score(self) -> float:
return None
@property
def margin(self) -> float:
return None
@property
def winner(self) -> Color:
return None
def __str__(self):
return 'Complete but not Scored'
class Draw(Outcome):
def __init__(
self,
black_points_on_board: int,
black_score: float,
white_points_on_board: int,
white_score: float
):
self._black_points_on_board = black_points_on_board
self._black_score = black_score
self._white_points_on_board = white_points_on_board
self._white_score = white_score
@property
def over(self) -> bool:
return True
@property
def black_points_on_board(self) -> int:
return self._black_points_on_board
@property
def black_score(self) -> float:
return self._black_score
@property
def white_points_on_board(self) -> int:
return self._white_points_on_board
@property
def white_score(self) -> float:
return self._white_score
@property
def margin(self) -> float:
return 0.
@property
def winner(self) -> Color:
return None
def __str__(self):
return 'Draw'
class InProgress(Outcome, Enum):
INSTANCE = auto()
@property
def over(self) -> bool:
return False
@property
def black_points_on_board(self) -> int:
return None
@property
def black_score(self) -> float:
return None
@property
def white_points_on_board(self) -> int:
return None
@property
def white_score(self) -> float:
return None
@property
def margin(self) -> float:
return None
@property
def winner(self) -> Color:
return None
def __str__(self):
return 'In Progress'
class Invalidated(Outcome, Enum):
INSTANCE = auto()
@property
def over(self) -> bool:
return True
@property
def black_points_on_board(self) -> int:
return None
@property
def black_score(self) -> float:
return None
@property
def white_points_on_board(self) -> int:
return None
@property
def white_score(self) -> float:
return None
@property
def margin(self) -> float:
return None
@property
def winner(self) -> Color:
return None
def __str__(self):
return 'Invalidated'
class Win(Outcome):
def __init__(
self,
black_points_on_board: int,
black_score: float,
white_points_on_board: int,
white_score: float
):
self._black_points_on_board = black_points_on_board
self._black_score = black_score
self._white_points_on_board = white_points_on_board
self._white_score = white_score
self._margin = abs(black_score - white_score)
self._winner = Color.BLACK if black_score > white_score else Color.WHITE
@property
def over(self) -> bool:
return True
@property
def black_points_on_board(self) -> int:
return self._black_points_on_board
@property
def black_score(self) -> float:
return self._black_score
@property
def white_points_on_board(self) -> int:
return self._white_points_on_board
@property
def white_score(self) -> float:
return self._white_score
@property
def margin(self) -> float:
return self._margin
@property
def winner(self) -> Color:
return self._winner
def __str__(self):
winner = 'Black' if self._winner is Color.BLACK else 'White'
return f'{winner} won by {self._margin} points'
def calculate_outcome(
black_points_on_board: int,
black_point_adjustment: float,
white_points_on_board: int,
white_point_adjustment: float,
):
final_black = black_points_on_board + black_point_adjustment
final_white = white_points_on_board + white_point_adjustment
if isclose(final_black, final_white):
outcome = Draw(black_points_on_board, final_black, white_points_on_board, final_white)
else:
outcome = Win(black_points_on_board, final_black, white_points_on_board, final_white)
return outcome
| 21.488
| 94
| 0.638496
| 645
| 5,372
| 4.996899
| 0.093023
| 0.143345
| 0.145206
| 0.100527
| 0.811666
| 0.776295
| 0.741545
| 0.704933
| 0.695005
| 0.660875
| 0
| 0.000518
| 0.281646
| 5,372
| 249
| 95
| 21.574297
| 0.834672
| 0.003909
| 0
| 0.833333
| 0
| 0
| 0.017944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.268817
| false
| 0
| 0.016129
| 0.209677
| 0.553763
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
f80e74da52382a01276f3649b5a1d96628728874
| 1,782
|
py
|
Python
|
common/src/main/python/gov/nasa/jpl/edrn/labcas/preprocess/utils.py
|
EDRN/labcas-backend
|
87a0a4bfbb98782b3ed58270aebcc8ba6a392106
|
[
"Apache-2.0"
] | null | null | null |
common/src/main/python/gov/nasa/jpl/edrn/labcas/preprocess/utils.py
|
EDRN/labcas-backend
|
87a0a4bfbb98782b3ed58270aebcc8ba6a392106
|
[
"Apache-2.0"
] | 3
|
2020-01-30T01:02:14.000Z
|
2021-01-01T00:55:59.000Z
|
common/src/main/python/gov/nasa/jpl/edrn/labcas/preprocess/utils.py
|
EDRN/labcas-backend
|
87a0a4bfbb98782b3ed58270aebcc8ba6a392106
|
[
"Apache-2.0"
] | null | null | null |
# Collection of Python utilities for LabCAS operations
def write_description(metadata_filepath, description):
'''Writes the file description to the ancillary metadata file.'''
print "Writing metadata file: %s" % metadata_filepath
with open(metadata_filepath,'w') as file:
file.write('<cas:metadata xmlns:cas="http://oodt.jpl.nasa.gov/1.0/cas">\n')
file.write('\t<keyval type="vector">\n')
file.write('\t\t<key>_File_Description</key>\n')
file.write('\t\t<val>%s</val>\n' % description)
file.write('\t</keyval>\n')
file.write('</cas:metadata>\n')
def write_file_metadata(metadata_filepath, metadata):
'''Writes file metadata to the ancillary file.'''
print "Writing metadata file: %s" % metadata_filepath
with open(metadata_filepath,'w') as file:
file.write('<cas:metadata xmlns:cas="http://oodt.jpl.nasa.gov/1.0/cas">\n')
for key, value in metadata.items():
file.write('\t<keyval type="vector">\n')
file.write('\t\t<key>_File_%s</key>\n' % key)
file.write('\t\t<val>%s</val>\n' % value)
file.write('\t</keyval>\n')
file.write('</cas:metadata>\n')
def write_dataset_metadata(filepath, metadata):
'''Writes file metadata to the ancillary file.'''
print "Writing metadata file: %s" % filepath
with open(filepath,'w') as file:
file.write('<cas:metadata xmlns:cas="http://oodt.jpl.nasa.gov/1.0/cas">\n')
for key, value in metadata.items():
file.write('\t<keyval type="vector">\n')
file.write('\t\t<key>%s</key>\n' % key)
file.write('\t\t<val>%s</val>\n' % value)
file.write('\t</keyval>\n')
file.write('</cas:metadata>\n')
| 45.692308
| 83
| 0.602694
| 252
| 1,782
| 4.202381
| 0.174603
| 0.152975
| 0.113314
| 0.113314
| 0.813031
| 0.812087
| 0.812087
| 0.812087
| 0.794145
| 0.794145
| 0
| 0.004283
| 0.213805
| 1,782
| 39
| 84
| 45.692308
| 0.751606
| 0.029181
| 0
| 0.689655
| 0
| 0.103448
| 0.360153
| 0.037676
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f81b7fadf51f8e70075bb54b3346cb40132c7496
| 1,578
|
py
|
Python
|
Blinker/BlinkerDebug.py
|
zhcong/blinker-mpy
|
b52538497a5e734c63d00a72c3f8ed28ff749a86
|
[
"MIT"
] | 17
|
2019-07-30T08:43:32.000Z
|
2021-12-08T21:47:10.000Z
|
Blinker/BlinkerDebug.py
|
zhcong/blinker-mpy
|
b52538497a5e734c63d00a72c3f8ed28ff749a86
|
[
"MIT"
] | 3
|
2020-05-05T10:51:57.000Z
|
2021-04-21T03:06:20.000Z
|
Blinker/BlinkerDebug.py
|
zhcong/blinker-mpy
|
b52538497a5e734c63d00a72c3f8ed28ff749a86
|
[
"MIT"
] | 10
|
2019-08-10T16:01:02.000Z
|
2021-12-13T08:43:22.000Z
|
from BlinkerUtility.BlinkerUtility import *
class BlinkerDebug():
def __init__(self):
self.isDebug = False
self.isDebugAll = False
def debug(self):
self.isDebug = True
self.isDebugAll = False
def debugAll(self):
self.isDebug = True
self.isDebugAll = True
BLINKER_DEBUG = BlinkerDebug()
def BLINKER_LOG(arg1, *vartuple):
# timeInfo = time.strftime("%H:%M:%S %Y", time.localtime())
if BLINKER_DEBUG.isDebug == False :
return
data = str(arg1)
for var in vartuple:
data = data + str(var)
data = '[' + str(millis()) + '] ' + data
print(data)
def BLINKER_ERR_LOG(arg1, *vartuple):
# timeInfo = time.strftime("%H:%M:%S %Y", time.localtime())
if BLINKER_DEBUG.isDebug == False :
return
data = str(arg1)
for var in vartuple:
data = data + str(var)
data = '[' + str(millis()) + '] Error: ' + data
print(data)
def BLINKER_LOG_ALL(arg1, *vartuple):
# timeInfo = time.strftime("%H:%M:%S %Y", time.localtime())
if BLINKER_DEBUG.isDebugAll == False :
return
data = str(arg1)
for var in vartuple:
data = data + str(var)
data = '[' + str(millis()) + '] ' + data
print(data)
def BLINKER_ERR_LOG_ALL(arg1, *vartuple):
# timeInfo = time.strftime("%H:%M:%S %Y", time.localtime())
if BLINKER_DEBUG.isDebugAll == False :
return
data = str(arg1)
for var in vartuple:
data = data + str(var)
data = '[' + str(millis()) + '] Error: ' + data
print(data)
| 25.868852
| 63
| 0.575412
| 190
| 1,578
| 4.689474
| 0.2
| 0.094276
| 0.089787
| 0.107744
| 0.817059
| 0.805836
| 0.731762
| 0.731762
| 0.731762
| 0.731762
| 0
| 0.007024
| 0.2782
| 1,578
| 60
| 64
| 26.3
| 0.775241
| 0.146388
| 0
| 0.727273
| 0
| 0
| 0.019374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159091
| false
| 0
| 0.022727
| 0
| 0.295455
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f8613fdae4ef7c8224f173f11a7de0c6dc0d49f3
| 1,925
|
py
|
Python
|
backend/wod_board/tests/models/test_user.py
|
GuillaumeOj/P13-WOD-Board
|
36df7979e63c354507edb56eabdfc548b1964d08
|
[
"MIT"
] | null | null | null |
backend/wod_board/tests/models/test_user.py
|
GuillaumeOj/P13-WOD-Board
|
36df7979e63c354507edb56eabdfc548b1964d08
|
[
"MIT"
] | 82
|
2021-01-17T18:12:23.000Z
|
2021-06-12T21:46:49.000Z
|
backend/wod_board/tests/models/test_user.py
|
GuillaumeOj/WodBoard
|
1ac12404f6094909c9bf116bcaf6ccd60e85bc00
|
[
"MIT"
] | null | null | null |
import pytest
import sqlalchemy.exc
from wod_board.models import user
def test_user(db):
new_user = user.User(
email="foo@bar.com",
password="foo-password",
username="foo-username",
first_name="foo",
last_name="bar",
is_admin=False,
)
db.add(new_user)
db.commit()
db.refresh(new_user)
assert str(new_user) == f"<User {new_user.email}>"
new_user = user.User(
password="foo-password",
username="foo-username",
is_admin=False,
)
db.add(new_user)
with pytest.raises(sqlalchemy.exc.IntegrityError) as error:
db.commit()
db.rollback()
assert (
'null value in column "email" of relation "user" '
"violates not-null constraint" in str(error)
)
new_user = user.User(
email="foo@bar.com",
username="foo-username",
is_admin=False,
)
db.add(new_user)
with pytest.raises(sqlalchemy.exc.IntegrityError) as error:
db.commit()
db.rollback()
assert (
'null value in column "password" of relation "user" '
"violates not-null constraint" in str(error)
)
new_user = user.User(
email="foo@bar.com",
password="foo-password",
is_admin=False,
)
db.add(new_user)
with pytest.raises(sqlalchemy.exc.IntegrityError) as error:
db.commit()
db.rollback()
assert (
'null value in column "username" of relation "user" '
"violates not-null constraint" in str(error)
)
new_user = user.User(
email="foo@bar.com",
password="foo-password",
username="foo-username",
)
db.add(new_user)
with pytest.raises(sqlalchemy.exc.IntegrityError) as error:
db.commit()
db.rollback()
assert (
'null value in column "is_admin" of relation "user" '
"violates not-null constraint" in str(error)
)
| 25.328947
| 63
| 0.598961
| 241
| 1,925
| 4.692946
| 0.190871
| 0.08046
| 0.04863
| 0.066313
| 0.827586
| 0.827586
| 0.810787
| 0.789567
| 0.789567
| 0.789567
| 0
| 0
| 0.277922
| 1,925
| 75
| 64
| 25.666667
| 0.813669
| 0
| 0
| 0.691176
| 0
| 0
| 0.25039
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 1
| 0.014706
| false
| 0.073529
| 0.044118
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
3e29f0eb4ce004dc76ef6dd0876aa930cd9d76f6
| 40,996
|
py
|
Python
|
o/soft_robot/prev/kinematics.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/prev/kinematics.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/prev/kinematics.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
import numpy as np
from math import sin, cos, sqrt
from math_utils import *
class Base:
"""ベース"""
# モーダル同時変換行列のパラメータ
c1 = 837019575
c2 = 4133430
c3 = 32805
c4 = 486
c5 = 18
c6 = 55801305
c7 = 688905
c8 = 3645
c9 = 81
c10 = 279006525
c11 = 1377810
c12 = 10935
c13 = 162
c14 = 243
c15 = 2066715
r = 0.0125
L0 = 0.15
sq3 = sqrt(3)
def calc_P(self, q, xi):
"""線形化されたアクチュエータ空間からタスク空間への写像
順運動学
"""
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
A1 = l1**2 + l2**2 + l3**2 - l1*l2 - l1*l3 - l2*l3
A2 = 2*l1 - l2 - l3
A3 = l2 - l3
A4 = 3*self.L0 + l1 + l2 + l3
x = -(A2 * A1**4 * A4 * xi**10) / ((self.c1 * self.r**9)) + \
(A2 * A1**3 * A4 * xi**8) / (self.c2 * self.r**7) - \
(A2 * A1**2 * A4 * xi**6) / (self.c3 * self.r**5) + \
(A2 * A1 * A4 * xi**4) / (self.c4 * self.r**3) - \
(A2 * A4 * xi**2) / (self.c5 * self.r)
y = -(self.sq3 * A4 * A3 * A1**4 * xi**10) / (self.c1 * self.r**9) + \
(self.sq3 * A4 * A3 * A1**3 * xi**8) / (self.c2 * self.r**7) - \
(self.sq3 * A4 * A3 * A1**2 * xi**6) / (self.c3 * self.r**5) + \
(self.sq3 * A4 * A1 * A2 * xi**4) / (self.c4 * self.r**3) - \
(self.sq3 * A4 * A3 * xi**2) / (self.c5 * self.r)
z = (2 * A1**4 * A4 * xi**9) / (self.c6 * self.r**8) - \
(4 * A1**3 * A4 * xi**7) / (self.c7 * self.r**6) + \
(2 * A1**2 * A4 * xi**5) / (self.c8 * self.r**4) - \
(2 * A1 *A4 * xi**3) / (self.c9 * self.r**2) + \
(A4 * xi) / 3
return np.array([[x, y, z]]).T
def calc_R(self, q, xi):
"""線形化された回転行列"""
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
A1 = l1**2 + l2**2 + l3**2 - l1*l2 - l1*l3 - l2*l3
A2 = 2*l1 - l2 - l3
A3 = l2 - l3
A4 = 3*self.L0 + l1 + l2 + l3
R11 = 1 - (A2**2 * A1**4 * xi**10) / (self.c1 * self.r**10) + \
(A2**2 * A1**3 * xi**8) / (self.c1 * self.r**8) - \
(A2**2 * A1**2 * xi**6) / (self.c3 * self.r**6) + \
(A1 * A2**2 * xi**4) / (self.c4 * self.r**4) - \
(A2**2 * xi**2) / (self.c5 * self.r**2)
R12 = (self.sq3 * A2 * A3 * A1**4 * xi**10) / (self.c1 * self.r**10) + \
(self.sq3 * A2 * A3 * A1**3 * xi**8) / (self.c2 * self.r**8) - \
(self.sq3 * A2 * A3 * A1**2 * xi**6) / (self.c3 * self.r**6) + \
(self.sq3 * A2 * A3 * A1 * xi**4) / (self.c4 * self.r**4) - \
(self.sq3 * A2 * A3 * xi**2) / (self.c5 * self.r**2)
R13 = -(2 * A2 * A1**4 * xi**9) / (self.c6 * self.r**9) + \
(4 * A2 * A1**3 * xi**7) / (self.c7 * self.r**7) - \
(2 * A2 * A1**2 * xi**5) / (self.c8 * self.r**5) + \
(2 * A2 * A1 * xi**3) / (self.c9 * self.r**3) - \
(A2 * xi) / (3 * self.r)
R22 = 1 - (A3**2 * A1**4 * xi**10) / (self.c10 * self.r**10) + \
(A3**2 * A1**3 * xi**8) / (self.c11 * self.r**8) - \
(A3**2 * A1**2 * xi**6) / (self.c12 * self.r**6) + \
(A3**2 * A1 * xi**4) / (self.c13 * self.r**4) - \
(A3**2 * xi**2) / (6 * self.r**2)
R23 = -(2*self.sq3 * A3 * A1**4 * xi**9) / (self.c6 * self.r**9) + \
(4*self.sq3 * A3 * A1**3 * xi**7) / (self.c7 * self.r**7) - \
(2*self.sq3 * A3 * A1**2 * xi**5) / (self.c8 * self.r**5) + \
(2*self.sq3 * A3 * A1 * xi**3) / (self.c9 * self.r**3) - \
(self.sq3 * A3 * xi) / (3 * self.r)
R33 = 1 - (2 * xi**2 * A1) / (9 * self.r**2) + \
(2 * xi**4 * A1**2) / (self.c14 * self.r**4) - \
(4 * xi**6 * A1**3) / (self.c3 * self.r**6) + \
(2 * xi**8 * A1**4) / (self.c15 * self.r**8) - \
(4 * xi**10 * A1**5) / (self.c1 * self.r**10)
R21 = R12
R31 = -R13
R32 = -R23
return np.array([
[R11, R12, R13],
[R21, R22, R23],
[R31, R32, R33],
])
def calc_MHTM(self, q, xi):
"""モーダル同時変換行列
線形化されたHomogeneous Transformation Matrix
"""
return np.block([
[self.calc_R(q, xi), self.calc_P(q, xi)],
[np.zeros((1, 3)), np.eye(1)],
])
def calc_dPdl1(self, q, xi):
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
return np.array([[
-xi**2*(2*l1 - l2 - l3)/(self.c5*self.r) - 2*xi**2*(3*self.L0 + l1 + l2 + l3)/(self.c5*self.r) + xi**4*(2*l1 - l2 - l3)**2*(3*self.L0 + l1 + l2 + l3)/(self.c4*self.r**3) + xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) + 2*xi**4*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - xi**6*(2*l1 - l2 - l3)*(4*l1 - 2*l2 - 2*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c3*self.r**5) - xi**6*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) - 2*xi**6*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + xi**8*(2*l1 - l2 - l3)*(6*l1 - 3*l2 - 3*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c2*self.r**7) + xi**8*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) + 2*xi**8*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - xi**10*(2*l1 - l2 - l3)*(8*l1 - 4*l2 - 4*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c1*self.r**9) - xi**10*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9) - 2*xi**10*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9),
-self.sq3*xi**2*(l2 - l3)/(self.c5*self.r) + self.sq3*xi**4*(2*l1 - l2 - l3)**2*(3*self.L0 + l1 + l2 + l3)/(self.c4*self.r**3) + self.sq3*xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) + 2*self.sq3*xi**4*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - self.sq3*xi**6*(l2 - l3)*(4*l1 - 2*l2 - 2*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c3*self.r**5) - self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + self.sq3*xi**8*(l2 - l3)*(6*l1 - 3*l2 - 3*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c2*self.r**7) + self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - self.sq3*xi**10*(l2 - l3)*(8*l1 - 4*l2 - 4*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c1*self.r**9) - self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9),
xi/3 - xi**3*(4*l1 - 2*l2 - 2*l3)*(3*self.L0 + l1 + l2 + l3)/(self.c9*self.r**2) - xi**3*(2*l1**2 - 2*l1*l2 - 2*l1*l3 + 4*l2**2 - 2*l2*l3)/(self.c9*self.r**2) + 2*xi**5*(4*l1 - 2*l2 - 2*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c8*self.r**4) + 2*xi**5*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c8*self.r**4) - 4*xi**7*(6*l1 - 3*l2 - 3*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c7*self.r**6) - 4*xi**7*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c7*self.r**6) + 2*xi**9*(8*l1 - 4*l2 - 4*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c6*self.r**8) + 2*xi**9*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c6*self.r**8),
]]).T
def calc_dPdl2(self, q, xi):
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
return np.array([[
-xi**2*(2*l1 - l2 - l3)/(self.c5*self.r) + xi**2*(3*self.L0 + l1 + l2 + l3)/(self.c5*self.r) + xi**4*(-l1 + 4*l2 - l3)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)/(self.c4*self.r**3) + xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - xi**4*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - xi**6*(-2*l1 + 8*l2 - 2*l3)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c3*self.r**5) - xi**6*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + xi**6*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + xi**8*(-3*l1 + 12*l2 - 3*l3)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c2*self.r**7) + xi**8*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - xi**8*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - xi**10*(-4*l1 + 16*l2 - 4*l3)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c1*self.r**9) - xi**10*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9) + xi**10*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9),
-self.sq3*xi**2*(l2 - l3)/(self.c5*self.r) - self.sq3*xi**2*(3*self.L0 + l1 + l2 + l3)/(self.c5*self.r) + self.sq3*xi**4*(-l1 + 4*l2 - l3)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)/(self.c4*self.r**3) + self.sq3*xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - self.sq3*xi**4*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - self.sq3*xi**6*(l2 - l3)*(-2*l1 + 8*l2 - 2*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c3*self.r**5) - self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) - self.sq3*xi**6*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + self.sq3*xi**8*(l2 - l3)*(-3*l1 + 12*l2 - 3*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c2*self.r**7) + self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) + self.sq3*xi**8*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - self.sq3*xi**10*(l2 - l3)*(-4*l1 + 16*l2 - 4*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c1*self.r**9) - self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9) - self.sq3*xi**10*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9),
xi/3 - xi**3*(-2*l1 + 8*l2 - 2*l3)*(3*self.L0 + l1 + l2 + l3)/(self.c9*self.r**2) - xi**3*(2*l1**2 - 2*l1*l2 - 2*l1*l3 + 4*l2**2 - 2*l2*l3)/(self.c9*self.r**2) + 2*xi**5*(-2*l1 + 8*l2 - 2*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c8*self.r**4) + 2*xi**5*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c8*self.r**4) - 4*xi**7*(-3*l1 + 12*l2 - 3*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c7*self.r**6) - 4*xi**7*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c7*self.r**6) + 2*xi**9*(-4*l1 + 16*l2 - 4*l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c6*self.r**8) + 2*xi**9*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c6*self.r**8),
]]).T
def calc_dPdl3(self, q, xi):
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
return np.array([[
-xi**2*(2*l1 - l2 - l3)/(self.c5*self.r) + xi**2*(3*self.L0 + l1 + l2 + l3)/(self.c5*self.r) + xi**4*(-l1 - l2)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)/(self.c4*self.r**3) + xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - xi**4*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - xi**6*(-2*l1 - 2*l2)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c3*self.r**5) - xi**6*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + xi**6*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + xi**8*(-3*l1 - 3*l2)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c2*self.r**7) + xi**8*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - xi**8*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - xi**10*(-4*l1 - 4*l2)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c1*self.r**9) - xi**10*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9) + xi**10*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9),
-self.sq3*xi**2*(l2 - l3)/(self.c5*self.r) + self.sq3*xi**2*(3*self.L0 + l1 + l2 + l3)/(self.c5*self.r) + self.sq3*xi**4*(-l1 - l2)*(2*l1 - l2 - l3)*(3*self.L0 + l1 + l2 + l3)/(self.c4*self.r**3) + self.sq3*xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - self.sq3*xi**4*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c4*self.r**3) - self.sq3*xi**6*(-2*l1 - 2*l2)*(l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c3*self.r**5) - self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + self.sq3*xi**6*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c3*self.r**5) + self.sq3*xi**8*(-3*l1 - 3*l2)*(l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c2*self.r**7) + self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - self.sq3*xi**8*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c2*self.r**7) - self.sq3*xi**10*(-4*l1 - 4*l2)*(l2 - l3)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c1*self.r**9) - self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9) + self.sq3*xi**10*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c1*self.r**9),
xi/3 - xi**3*(-2*l1 - 2*l2)*(3*self.L0 + l1 + l2 + l3)/(self.c9*self.r**2) - xi**3*(2*l1**2 - 2*l1*l2 - 2*l1*l3 + 4*l2**2 - 2*l2*l3)/(self.c9*self.r**2) + 2*xi**5*(-2*l1 - 2*l2)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)/(self.c8*self.r**4) + 2*xi**5*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c8*self.r**4) - 4*xi**7*(-3*l1 - 3*l2)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**2/(self.c7*self.r**6) - 4*xi**7*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c7*self.r**6) + 2*xi**9*(-4*l1 - 4*l2)*(3*self.L0 + l1 + l2 + l3)*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**3/(self.c6*self.r**8) + 2*xi**9*(l1**2 - l1*l2 - l1*l3 + 2*l2**2 - l2*l3)**4/(self.c6*self.r**8)
]]).T
def calc_dRdl1(self, q, xi):
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
return np.array([
[
-xi**2*(8*l1 - 4*l2 - 4*l3)/(self.c5*self.r**2) + xi**4*(2*l1 - l2 - l3)**3/(self.c4*self.r**4) + xi**4*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - xi**6*(2*l1 - l2 - l3)**2*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) - xi**6*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + xi**8*(2*l1 - l2 - l3)**2*(6*l1 - 3*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c1*self.r**8) + xi**8*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**8) - xi**10*(2*l1 - l2 - l3)**2*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) - xi**10*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
-2*self.sq3*xi**2*(l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**4*(l2 - l3)*(2*l1 - l2 - l3)**2/(self.c4*self.r**4) + 2*self.sq3*xi**4*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**6*(l2 - l3)*(2*l1 - l2 - l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) - 2*self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**8*(l2 - l3)*(2*l1 - l2 - l3)*(6*l1 - 3*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c2*self.r**8) + 2*self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**10*(l2 - l3)*(2*l1 - l2 - l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) + 2*self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
-2*xi/(3*self.r) + xi**3*(2*l1 - l2 - l3)*(4*l1 - 2*l2 - 2*l3)/(self.c9*self.r**3) + 4*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) - xi**5*(4*l1 - 2*l2 - 2*l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) - 4*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) + xi**7*(6*l1 - 3*l2 - 3*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) + 8*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) - xi**9*(4*l1 - 2*l2 - 2*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) - 4*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9)
],
[
-2*self.sq3*xi**2*(l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**4*(l2 - l3)*(2*l1 - l2 - l3)**2/(self.c4*self.r**4) + 2*self.sq3*xi**4*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**6*(l2 - l3)*(2*l1 - l2 - l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) - 2*self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**8*(l2 - l3)*(2*l1 - l2 - l3)*(6*l1 - 3*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c2*self.r**8) + 2*self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**10*(l2 - l3)*(2*l1 - l2 - l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) + 2*self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
xi**4*(l2 - l3)**2*(2*l1 - l2 - l3)/(self.c13*self.r**4) - xi**6*(l2 - l3)**2*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c12*self.r**6) + xi**8*(l2 - l3)**2*(6*l1 - 3*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c11*self.r**8) - xi**10*(l2 - l3)**2*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c10*self.r**10),
2*self.sq3*xi**3*(l2 - l3)*(2*l1 - l2 - l3)/(self.c9*self.r**3) - 2*self.sq3*xi**5*(l2 - l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) + 4*self.sq3*xi**7*(l2 - l3)*(6*l1 - 3*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) - 2*self.sq3*xi**9*(l2 - l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9)
],
[
2*xi/(3*self.r) - xi**3*(2*l1 - l2 - l3)*(4*l1 - 2*l2 - 2*l3)/(self.c9*self.r**3) - 4*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) + xi**5*(4*l1 - 2*l2 - 2*l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) + 4*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) - xi**7*(6*l1 - 3*l2 - 3*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) - 8*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) + xi**9*(4*l1 - 2*l2 - 2*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) + 4*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9),
-2*self.sq3*xi**3*(l2 - l3)*(2*l1 - l2 - l3)/(self.c9*self.r**3) + 2*self.sq3*xi**5*(l2 - l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) - 4*self.sq3*xi**7*(l2 - l3)*(6*l1 - 3*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) + 2*self.sq3*xi**9*(l2 - l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9),
-2*xi**2*(2*l1 - l2 - l3)/(9*self.r**2) - 4*xi**6*(6*l1 - 3*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + 2*xi**8*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c15*self.r**8) + 2*xi**4*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c14*self.r**4) - 4*xi**10*(10*l1 - 5*l2 - 5*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10)
]
])
def calc_dRdl2(self, q, xi):
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
return np.array([
[
-xi**2*(-4*l1 + 2*l2 + 2*l3)/(self.c5*self.r**2) + xi**4*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) + xi**4*(-l1 + 2*l2 - l3)*(2*l1 - l2 - l3)**2/(self.c4*self.r**4) - xi**6*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) - xi**6*(-2*l1 + 4*l2 - 2*l3)*(2*l1 - l2 - l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) + xi**8*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**8) + xi**8*(-3*l1 + 6*l2 - 3*l3)*(2*l1 - l2 - l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c1*self.r**8) - xi**10*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10) - xi**10*(-4*l1 + 8*l2 - 4*l3)*(2*l1 - l2 - l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10),
self.sq3*xi**2*(l2 - l3)/(self.c5*self.r**2) - self.sq3*xi**2*(2*l1 - l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**4*(l2 - l3)*(-l1 + 2*l2 - l3)*(2*l1 - l2 - l3)/(self.c4*self.r**4) - self.sq3*xi**4*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) + self.sq3*xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**6*(l2 - l3)*(-2*l1 + 4*l2 - 2*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) + self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) - self.sq3*xi**6*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**8*(l2 - l3)*(-3*l1 + 6*l2 - 3*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c2*self.r**8) - self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**8*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**10*(l2 - l3)*(-4*l1 + 8*l2 - 4*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) - self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10) + self.sq3*xi**10*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
xi/(3*self.r) + xi**3*(-l1 + 2*l2 - l3)*(4*l1 - 2*l2 - 2*l3)/(self.c9*self.r**3) - 2*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) - xi**5*(-2*l1 + 4*l2 - 2*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) + 2*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) + xi**7*(-3*l1 + 6*l2 - 3*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) - 4*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) - xi**9*(-4*l1 + 8*l2 - 4*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) + 2*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9),
],
[
self.sq3*xi**2*(l2 - l3)/(self.c5*self.r**2) - self.sq3*xi**2*(2*l1 - l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**4*(l2 - l3)*(-l1 + 2*l2 - l3)*(2*l1 - l2 - l3)/(self.c4*self.r**4) - self.sq3*xi**4*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) + self.sq3*xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**6*(l2 - l3)*(-2*l1 + 4*l2 - 2*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) + self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) - self.sq3*xi**6*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**8*(l2 - l3)*(-3*l1 + 6*l2 - 3*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c2*self.r**8) - self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**8*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**10*(l2 - l3)*(-4*l1 + 8*l2 - 4*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) - self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10) + self.sq3*xi**10*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
-xi**2*(2*l2 - 2*l3)/(6*self.r**2) + xi**4*(l2 - l3)**2*(-l1 + 2*l2 - l3)/(self.c13*self.r**4) + xi**4*(2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c13*self.r**4) - xi**6*(l2 - l3)**2*(-2*l1 + 4*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c12*self.r**6) - xi**6*(2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c12*self.r**6) + xi**8*(l2 - l3)**2*(-3*l1 + 6*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c11*self.r**8) + xi**8*(2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c11*self.r**8) - xi**10*(l2 - l3)**2*(-4*l1 + 8*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c10*self.r**10) - xi**10*(2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c10*self.r**10),
-self.sq3*xi/(3*self.r) + 2*self.sq3*xi**3*(l2 - l3)*(-l1 + 2*l2 - l3)/(self.c9*self.r**3) + 2*self.sq3*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) - 2*self.sq3*xi**5*(l2 - l3)*(-2*l1 + 4*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) - 2*self.sq3*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) + 4*self.sq3*xi**7*(l2 - l3)*(-3*l1 + 6*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) + 4*self.sq3*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) - 2*self.sq3*xi**9*(l2 - l3)*(-4*l1 + 8*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) - 2*self.sq3*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9),
],
[
-xi/(3*self.r) - xi**3*(-l1 + 2*l2 - l3)*(4*l1 - 2*l2 - 2*l3)/(self.c9*self.r**3) + 2*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) + xi**5*(-2*l1 + 4*l2 - 2*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) - 2*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) - xi**7*(-3*l1 + 6*l2 - 3*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) + 4*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) + xi**9*(-4*l1 + 8*l2 - 4*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) - 2*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9),
self.sq3*xi/(3*self.r) - 2*self.sq3*xi**3*(l2 - l3)*(-l1 + 2*l2 - l3)/(self.c9*self.r**3) - 2*self.sq3*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) + 2*self.sq3*xi**5*(l2 - l3)*(-2*l1 + 4*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) + 2*self.sq3*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) - 4*self.sq3*xi**7*(l2 - l3)*(-3*l1 + 6*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) - 4*self.sq3*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) + 2*self.sq3*xi**9*(l2 - l3)*(-4*l1 + 8*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) + 2*self.sq3*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9),
-2*xi**2*(-l1 + 2*l2 - l3)/(9*self.r**2) - 4*xi**6*(-3*l1 + 6*l2 - 3*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + 2*xi**8*(-4*l1 + 8*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c15*self.r**8) + 2*xi**4*(-2*l1 + 4*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c14*self.r**4) - 4*xi**10*(-5*l1 + 10*l2 - 5*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10)
]
])
def calc_dRdl3(self, q, xi):
l1 = q[0,0]
l2 = q[1,0]
l3 = q[2,0]
return np.array([
[
-xi**2*(-4*l1 + 2*l2 + 2*l3)/(self.c5*self.r**2) + xi**4*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) + xi**4*(-l1 - l2 + 2*l3)*(2*l1 - l2 - l3)**2/(self.c4*self.r**4) - xi**6*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) - xi**6*(-2*l1 - 2*l2 + 4*l3)*(2*l1 - l2 - l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) + xi**8*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**8) + xi**8*(-3*l1 - 3*l2 + 6*l3)*(2*l1 - l2 - l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c1*self.r**8) - xi**10*(-4*l1 - 4*l2 + 8*l3)*(2*l1 - l2 - l3)**2*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) - xi**10*(-4*l1 + 2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
self.sq3*xi**2*(l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**2*(2*l1 - l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**4*(l2 - l3)*(-l1 - l2 + 2*l3)*(2*l1 - l2 - l3)/(self.c4*self.r**4) - self.sq3*xi**4*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**6*(l2 - l3)*(-2*l1 - 2*l2 + 4*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) + self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**6*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**8*(l2 - l3)*(-3*l1 - 3*l2 + 6*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c2*self.r**8) - self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) - self.sq3*xi**8*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**10*(l2 - l3)*(-4*l1 - 4*l2 + 8*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) - self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10) - self.sq3*xi**10*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
xi/(3*self.r) + xi**3*(-l1 - l2 + 2*l3)*(4*l1 - 2*l2 - 2*l3)/(self.c9*self.r**3) - 2*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) - xi**5*(-2*l1 - 2*l2 + 4*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) + 2*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) + xi**7*(-3*l1 - 3*l2 + 6*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) - 4*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) - xi**9*(-4*l1 - 4*l2 + 8*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) + 2*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9)
],
[
self.sq3*xi**2*(l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**2*(2*l1 - l2 - l3)/(self.c5*self.r**2) + self.sq3*xi**4*(l2 - l3)*(-l1 - l2 + 2*l3)*(2*l1 - l2 - l3)/(self.c4*self.r**4) - self.sq3*xi**4*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**4*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c4*self.r**4) - self.sq3*xi**6*(l2 - l3)*(-2*l1 - 2*l2 + 4*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c3*self.r**6) + self.sq3*xi**6*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**6*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + self.sq3*xi**8*(l2 - l3)*(-3*l1 - 3*l2 + 6*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c2*self.r**8) - self.sq3*xi**8*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) - self.sq3*xi**8*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c2*self.r**8) + self.sq3*xi**10*(l2 - l3)*(-4*l1 - 4*l2 + 8*l3)*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c1*self.r**10) - self.sq3*xi**10*(l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10) - self.sq3*xi**10*(2*l1 - l2 - l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10),
-xi**2*(-2*l2 + 2*l3)/(6*self.r**2) + xi**4*(-2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c13*self.r**4) + xi**4*(l2 - l3)**2*(-l1 - l2 + 2*l3)/(self.c13*self.r**4) - xi**6*(-2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c12*self.r**6) - xi**6*(l2 - l3)**2*(-2*l1 - 2*l2 + 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c12*self.r**6) + xi**8*(-2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c11*self.r**8) + xi**8*(l2 - l3)**2*(-3*l1 - 3*l2 + 6*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c11*self.r**8) - xi**10*(-2*l2 + 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c10*self.r**10) - xi**10*(l2 - l3)**2*(-4*l1 - 4*l2 + 8*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c10*self.r**10),
self.sq3*xi/(3*self.r) + 2*self.sq3*xi**3*(l2 - l3)*(-l1 - l2 + 2*l3)/(self.c9*self.r**3) - 2*self.sq3*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) - 2*self.sq3*xi**5*(l2 - l3)*(-2*l1 - 2*l2 + 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) + 2*self.sq3*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) + 4*self.sq3*xi**7*(l2 - l3)*(-3*l1 - 3*l2 + 6*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) - 4*self.sq3*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) - 2*self.sq3*xi**9*(l2 - l3)*(-4*l1 - 4*l2 + 8*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) + 2*self.sq3*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9)],
[
-xi/(3*self.r) - xi**3*(-l1 - l2 + 2*l3)*(4*l1 - 2*l2 - 2*l3)/(self.c9*self.r**3) + 2*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) + xi**5*(-2*l1 - 2*l2 + 4*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) - 2*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) - xi**7*(-3*l1 - 3*l2 + 6*l3)*(8*l1 - 4*l2 - 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) + 4*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) + xi**9*(-4*l1 - 4*l2 + 8*l3)*(4*l1 - 2*l2 - 2*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) - 2*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9), -self.sq3*xi/(3*self.r) - 2*self.sq3*xi**3*(l2 - l3)*(-l1 - l2 + 2*l3)/(self.c9*self.r**3) + 2*self.sq3*xi**3*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c9*self.r**3) + 2*self.sq3*xi**5*(l2 - l3)*(-2*l1 - 2*l2 + 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c8*self.r**5) - 2*self.sq3*xi**5*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c8*self.r**5) - 4*self.sq3*xi**7*(l2 - l3)*(-3*l1 - 3*l2 + 6*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c7*self.r**7) + 4*self.sq3*xi**7*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c7*self.r**7) + 2*self.sq3*xi**9*(l2 - l3)*(-4*l1 - 4*l2 + 8*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c6*self.r**9) - 2*self.sq3*xi**9*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c6*self.r**9),
-2*xi**2*(-l1 - l2 + 2*l3)/(9*self.r**2) - 4*xi**6*(-3*l1 - 3*l2 + 6*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**2/(self.c3*self.r**6) + 2*xi**8*(-4*l1 - 4*l2 + 8*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**3/(self.c15*self.r**8) + 2*xi**4*(-2*l1 - 2*l2 + 4*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)/(self.c14*self.r**4) - 4*xi**10*(-5*l1 - 5*l2 + 10*l3)*(l1**2 - l1*l2 - l1*l3 + l2**2 - l2*l3 + l3**2)**4/(self.c1*self.r**10)
]
])
class OneSection(Base):
"""1つセクションのローカル変数"""
def __init__(self, n, n_step):
self.n = n # セクション番号
self.J_OMEGAs = None
self.J_omegas = None
self.J_vs = None
def P():
pass
class AllSection:
"""アーム全体の運動学"""
def __init__(self, N):
self.N = N # セクションの数
self.n_step = 10
self.set_section()
def set_section(self,):
"""ローカルセクションを追加"""
self.sections = [
OneSection(i, self.n_step) for i in range(self.N)
]
def update_all(self, q_all, q_dot_all):
"""全部更新"""
self.q_all = q_all # 縦ベクトル
self.q_dot_all = q_dot_all
def update_local(self,):
"""ローカル位置,回転行列を更新"""
for i in range(self.N):
self.sections[i].update_local_state(self.q_all[i:i+3, :])
def update_J_OMEGA_ij(self,):
"""J_OMEGAを更新"""
for k in range(self.N):
print("k = ", k)
J_OMEGA_ijs_all = []
for l in range(self.n_step): # xiの一個一個の分を順番に計算
J_OMEGA_ijs = []
for i in range(k+1):
print("i = ", i)
Ri = self.sections[i].Rs[l]
J_OMEGA_ij = []
for j in range(3):
if i == k:
print("ketu")
dRidlj = self.sections[i].dRdls[l][j]
J_OMEGA_ij.append(Ri.T @ dRidlj)
else:
print("mae")
print(self.sections[i].J_OMEGAs)
J_OMEGGA_prev = self.sections[i-1].J_OMEGAs[-1][i][j]
J_OMEGA_ij.append(Ri.T @ J_OMEGGA_prev @ Ri)
J_OMEGA_ijs.append(J_OMEGA_ij)
J_OMEGA_ijs_all.append(J_OMEGA_ijs)
self.sections[k].J_OMEGAs = J_OMEGA_ijs_all
return
def update_J_v_ij(self,):
pass
if __name__ == "__main__":
N = 3
q_all = np.zeros((3*N, 1))
q_dot_all = np.zeros((3*N, 1))
hoge = AllSection(N)
hoge.update_all(q_all, q_dot_all)
hoge.update_local()
hoge.update_J_OMEGA_ij()
| 120.932153
| 1,594
| 0.450361
| 9,230
| 40,996
| 1.989599
| 0.017443
| 0.120889
| 0.100468
| 0.101775
| 0.915705
| 0.906284
| 0.89169
| 0.874646
| 0.866914
| 0.857275
| 0
| 0.228173
| 0.222485
| 40,996
| 338
| 1,595
| 121.289941
| 0.347953
| 0.005464
| 0
| 0.247934
| 0
| 0
| 0.000566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070248
| false
| 0.008264
| 0.012397
| 0
| 0.210744
| 0.020661
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
e4267ee5d7be19604157f354de614036a3513084
| 4,467
|
py
|
Python
|
api/migrations/0005_disasterneighborhoodgrid.py
|
hackoregon/disaster-resilience-backend
|
7776ca37bc50ef79e8bbf0830b6ca4b798f0df9f
|
[
"MIT"
] | 2
|
2018-04-27T09:10:08.000Z
|
2018-05-01T08:38:29.000Z
|
api/migrations/0005_disasterneighborhoodgrid.py
|
hackoregon/disaster-resilience-backend
|
7776ca37bc50ef79e8bbf0830b6ca4b798f0df9f
|
[
"MIT"
] | 21
|
2018-05-27T23:51:40.000Z
|
2021-06-10T20:15:17.000Z
|
api/migrations/0005_disasterneighborhoodgrid.py
|
hackoregon/disaster-resilience-backend
|
7776ca37bc50ef79e8bbf0830b6ca4b798f0df9f
|
[
"MIT"
] | 3
|
2018-04-27T09:11:06.000Z
|
2019-03-10T19:32:26.000Z
|
# Generated by Django 2.0.1 on 2018-06-21 01:22
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_disasterneighborhoods_disasterneighborhoodview'),
]
operations = [
migrations.CreateModel(
name='DisasterNeighborhoodGrid',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('centroidx', models.CharField(blank=True, max_length=255, null=True)),
('centroidy', models.CharField(blank=True, max_length=255, null=True)),
('x_simple', models.CharField(blank=True, max_length=255, null=True)),
('y_simple', models.CharField(blank=True, max_length=255, null=True)),
('wkb_geometry', django.contrib.gis.db.models.fields.GeometryField(blank=True, null=True, srid=4326)),
('pgv_site_count', models.CharField(blank=True, max_length=255, null=True)),
('pgv_site_max', models.CharField(blank=True, max_length=255, null=True)),
('pgv_site_mean', models.CharField(blank=True, max_length=255, null=True)),
('pgv_site_min', models.CharField(blank=True, max_length=255, null=True)),
('pgv_site_std', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_dry_count', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_dry_max', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_dry_mean', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_dry_min', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_dry_std', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_count', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_max', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_mean', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_min', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_std', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_count', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_max', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_mean', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_min', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_std', models.CharField(blank=True, max_length=255, null=True)),
('pgd_total_wet_mean', models.CharField(blank=True, max_length=255, null=True)),
('pgv_site_min_mmi', models.IntegerField(blank=True, null=True)),
('pgv_site_max_mmi', models.IntegerField(blank=True, null=True)),
('pgv_site_mean_mmi', models.IntegerField(blank=True, null=True)),
('pgd_landslide_dry_min_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_dry_max_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_dry_mean_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_min_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_max_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_landslide_wet_mean_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_min_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_max_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_liquefaction_wet_mean_di', models.CharField(blank=True, max_length=255, null=True)),
('pgd_total_wet_mean_di', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'db_table': 'disaster_neighborhood_grid',
'managed': False,
},
),
]
| 69.796875
| 118
| 0.645176
| 551
| 4,467
| 4.956443
| 0.134301
| 0.128524
| 0.256316
| 0.30758
| 0.849872
| 0.847309
| 0.819846
| 0.805932
| 0.805932
| 0.740754
| 0
| 0.036613
| 0.217372
| 4,467
| 63
| 119
| 70.904762
| 0.744565
| 0.010074
| 0
| 0
| 1
| 0
| 0.200226
| 0.155882
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035088
| 0
| 0.087719
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e4d55357f38e2ca42648fb7ea064b6a959bc2b42
| 55,657
|
py
|
Python
|
external/TriBITS/test/python_utils/gitdist_UnitTests.py
|
murraypurves/BootsOnTheGround
|
15acc4ed064e368f6af5114408f1be8a62749f32
|
[
"MIT"
] | 4
|
2017-02-01T00:39:29.000Z
|
2018-08-09T11:53:18.000Z
|
external/TriBITS/test/python_utils/gitdist_UnitTests.py
|
murraypurves/BootsOnTheGround
|
15acc4ed064e368f6af5114408f1be8a62749f32
|
[
"MIT"
] | 14
|
2017-01-19T17:56:04.000Z
|
2017-08-27T21:52:35.000Z
|
external/TriBITS/test/python_utils/gitdist_UnitTests.py
|
murraypurves/BootsOnTheGround
|
15acc4ed064e368f6af5114408f1be8a62749f32
|
[
"MIT"
] | 1
|
2019-10-03T12:13:36.000Z
|
2019-10-03T12:13:36.000Z
|
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
#################################
# Unit testing code for gitdist #
#################################
import sys
import imp
import shutil
from unittest_helpers import *
pythonDir = os.path.abspath(GeneralScriptSupport.getScriptBaseDir())
utilsDir = pythonDir+"/utils"
tribitsDir = os.path.abspath(pythonDir+"/..")
sys.path = [pythonUtilsDir] + sys.path
from gitdist import *
#
# Utility functions for testing
#
gitdistPath = pythonUtilsDir+"/gitdist"
gitdistPathNoColor = gitdistPath+" --dist-no-color"
gitdistPathMock = gitdistPathNoColor+" --dist-use-git=mockgit --dist-no-opt"
mockGitPath = pythonUtilsDir+"/mockprogram.py"
unitTestDataDir = testPythonUtilsDir
tempMockProjectDir = "MockProjectDir"
testBaseDir = os.getcwd()
def getCmndOutputInMockProjectDir(cmnd):
os.chdir(mockProjectDir)
cmndOut = getCmndOutput(cmnd)
os.chdir(testBaseDir)
return cmndOut
def createAndMoveIntoTestDir(testDir):
if os.path.exists(testDir): shutil.rmtree(testDir)
os.mkdir(testDir)
os.chdir(testDir)
if not os.path.exists(tempMockProjectDir): os.mkdir(tempMockProjectDir)
os.chdir(tempMockProjectDir)
return os.path.join(testBaseDir, testDir, tempMockProjectDir)
class GitDistOptions:
def __init__(self, useGit):
self.useGit = useGit
#
# Unit tests for createAsciiTable
#
class test_createAsciiTable(unittest.TestCase):
def test_full_table(self):
tableData = [
{ "label" : "ID", "align" : "R",
"fields" : ["0", "1", "2"] },
{ "label" : "Repo Dir", "align" : "L",
"fields" : ["Base: BaseRepo", "ExtraRepo1", "Path/To/ExtraRepo2" ] },
{ "label":"Branch", "align":"L",
"fields" : ["dummy", "master", "HEAD" ] },
{ "label" : "Tracking Branch", "align":"L",
"fields" : ["", "origin/master", "" ] },
{ "label" : "C", "align":"R", "fields" : ["", "1", "" ] },
{ "label" : "M", "align":"R", "fields" : ["0", "2", "25" ] },
{ "label" : "?", "align":"R", "fields" : ["0", "0", "4" ] },
]
asciiTable = createAsciiTable(tableData)
#print(asciiTable)
asciiTable_expected = \
"-------------------------------------------------------------------\n" \
"| ID | Repo Dir | Branch | Tracking Branch | C | M | ? |\n" \
"|----|--------------------|--------|-----------------|---|----|---|\n" \
"| 0 | Base: BaseRepo | dummy | | | 0 | 0 |\n" \
"| 1 | ExtraRepo1 | master | origin/master | 1 | 2 | 0 |\n" \
"| 2 | Path/To/ExtraRepo2 | HEAD | | | 25 | 4 |\n" \
"-------------------------------------------------------------------\n"
self.assertEqual(asciiTable, asciiTable_expected)
def test_no_rows(self):
tableData = [
{ "label" : "ID", "align" : "R",
"fields" : [] },
{ "label" : "Repo Dir", "align" : "L",
"fields" : [] },
{ "label":"Branch", "align":"L",
"fields" : [] },
{ "label" : "Tracking Branch", "align":"L",
"fields" : [] },
{ "label" : "C", "align":"R", "fields" : [] },
{ "label" : "M", "align":"R", "fields" : [] },
{ "label" : "?", "align":"R", "fields" : [] },
]
asciiTable = createAsciiTable(tableData)
#print(asciiTable)
asciiTable_expected = \
"--------------------------------------------------------\n" \
"| ID | Repo Dir | Branch | Tracking Branch | C | M | ? |\n" \
"|----|----------|--------|-----------------|---|---|---|\n" \
"--------------------------------------------------------\n"
self.assertEqual(asciiTable, asciiTable_expected)
def test_one_row(self):
tableData = [
{ "label" : "ID", "align" : "R",
"fields" : ["0"] },
{ "label" : "Repo Dir", "align" : "L",
"fields" : ["Base: BaseRepo"] },
{ "label":"Branch", "align":"L",
"fields" : ["dummy"] },
{ "label" : "Tracking Branch", "align":"L",
"fields" : ["origin/master"] },
{ "label" : "C", "align":"R", "fields" : ["24"] },
{ "label" : "M", "align":"R", "fields" : ["25"] },
{ "label" : "?", "align":"R", "fields" : ["4"] },
]
asciiTable = createAsciiTable(tableData)
#print(asciiTable)
asciiTable_expected = \
"----------------------------------------------------------------\n" \
"| ID | Repo Dir | Branch | Tracking Branch | C | M | ? |\n" \
"|----|----------------|--------|-----------------|----|----|---|\n" \
"| 0 | Base: BaseRepo | dummy | origin/master | 24 | 25 | 4 |\n" \
"----------------------------------------------------------------\n"
self.assertEqual(asciiTable, asciiTable_expected)
def test_row_mismatch(self):
tableData = [
{ "label" : "ID", "align" : "R",
"fields" : ["0", "1"] },
{ "label" : "Repo Dir", "align" : "L",
"fields" : ["Base: BaseRepo"] },
]
#createAsciiTable(tableData)
self.assertRaises(Exception, createAsciiTable, tableData)
#
# Unit tests for functions in gitdist
#
class test_gitdist_getRepoStats(unittest.TestCase):
def test_no_change(self):
try:
testDir = createAndMoveIntoTestDir("gitdist_getRepoStats_no_change")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo/remote_branch\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo/remote_branch\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
options = GitDistOptions(mockGitPath)
repoStats = getRepoStats(options)
repoStats_expected = "{branch='local_branch'," \
" trackingBranch='origin_repo/remote_branch', numCommits='0'," \
" numModified='0', numUntracked='0'}"
self.assertEqual(str(repoStats), repoStats_expected)
finally:
os.chdir(testBaseDir)
def test_all_changed_no_tracking_branch(self):
try:
testDir = createAndMoveIntoTestDir(
"gitdist_getRepoStats_all_changed_no_tracking_branch")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 55\n" \
"MOCK_PROGRAM_OUTPUT: error: blah blahh blah\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
" T file2\n" \
" D file3\n" \
"?? file4\n" \
"?? file5\n" \
)
options = GitDistOptions(mockGitPath)
repoStats = getRepoStats(options)
repoStats_expected = "{branch='local_branch'," \
" trackingBranch='', numCommits=''," \
" numModified='3', numUntracked='2'}"
self.assertEqual(str(repoStats), repoStats_expected)
finally:
os.chdir(testBaseDir)
def test_modified_and_staged_no_tracking_branch(self):
try:
testDir = createAndMoveIntoTestDir(
"gitdist_getRepoStats_all_changed_no_tracking_branch")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 55\n" \
"MOCK_PROGRAM_OUTPUT: error: blah blahh blah\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
"MM file1b\n" \
" T file2\n" \
"MT file2b\n" \
" D file3\n" \
"MD file3\n" \
"?? file4\n" \
"?? file5\n" \
"?? file5b\n" \
" A file6\n" \
"A file6b\n" \
" U file7\n" \
"U file7b\n" \
"R file8\n" \
)
options = GitDistOptions(mockGitPath)
repoStats = getRepoStats(options)
repoStats_expected = "{branch='local_branch'," \
" trackingBranch='', numCommits=''," \
" numModified='11', numUntracked='3'}"
self.assertEqual(str(repoStats), repoStats_expected)
finally:
os.chdir(testBaseDir)
def test_all_changed_detached_head(self):
try:
testDir = createAndMoveIntoTestDir("gitdist_getRepoStats_all_changed_detached_head")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: HEAD\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 128\n" \
"MOCK_PROGRAM_OUTPUT: fatal: blah blahh blah\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
" M file2\n" \
"?? file3\n" \
"?? file4\n" \
"?? file5\n" \
)
options = GitDistOptions(mockGitPath)
repoStats = getRepoStats(options)
repoStats_expected = "{branch='HEAD'," \
" trackingBranch='', numCommits=''," \
" numModified='2', numUntracked='3'}"
self.assertEqual(str(repoStats), repoStats_expected)
finally:
os.chdir(testBaseDir)
def test_all_ambiguous_head(self):
try:
testDir = createAndMoveIntoTestDir("gitdist_getRepoStats_all_changed_detached_head")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: warning: refname 'HEAD' is ambiguous.\n" \
"error: refname 'HEAD' is ambiguous\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: remoterepo/trackingbranch\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^remoterepo/trackingbranch\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 7\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
" M file2\n" \
"?? file3\n" \
"?? file4\n" \
"?? file5\n" \
)
options = GitDistOptions(mockGitPath)
repoStats = getRepoStats(options)
repoStats_expected = "{branch='<AMBIGUOUS-HEAD>'," \
" trackingBranch='remoterepo/trackingbranch', numCommits='7'," \
" numModified='2', numUntracked='3'}"
self.assertEqual(str(repoStats), repoStats_expected)
finally:
os.chdir(testBaseDir)
# NOTE: Above is a very strange test case. It is what happens when
# someone creates a tag called 'HEAD' using the command 'git tag HEAD'
# (which was an accident obviously). But amazingly, 'git rev-parse
# --abbrev HEAD' still returns 0 but returns no name! See TriBITS #100
# for details.
def test_all_changed_1_author(self):
try:
testDir = createAndMoveIntoTestDir("gitdist_getRepoStats_all_changed_1_author")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo/remote_branch\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo/remote_branch\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 1\tsome author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
" M file2\n" \
"?? file3\n" \
"?? file4\n" \
"?? file5\n" \
)
options = GitDistOptions(mockGitPath)
repoStats = getRepoStats(options)
repoStats_expected = "{branch='local_branch'," \
" trackingBranch='origin_repo/remote_branch', numCommits='1'," \
" numModified='2', numUntracked='3'}"
self.assertEqual(str(repoStats), repoStats_expected)
finally:
os.chdir(testBaseDir)
def test_all_changed_3_authors(self):
try:
testDir = createAndMoveIntoTestDir("gitdist_getRepoStats_all_changed_3_authors")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo/remote_branch\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo/remote_branch\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 1 some author1\n" \
"2 some author2\n" \
"3 some author2\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
" M file2\n" \
"?? file3\n" \
"?? file4\n" \
"?? file5\n" \
)
options = GitDistOptions(mockGitPath)
repoStats = getRepoStats(options)
repoStats_expected = "{branch='local_branch'," \
" trackingBranch='origin_repo/remote_branch', numCommits='6'," \
" numModified='2', numUntracked='3'}"
self.assertEqual(str(repoStats), repoStats_expected)
finally:
os.chdir(testBaseDir)
repoVersionFile_withSummary_1 = """*** Base Git Repo: MockTrilinos
sha1_1 [Mon Sep 23 11:34:59 2013 -0400] <author_1@ornl.gov>
First summary message
*** Git Repo: extraTrilinosRepo
sha1_2 [Fri Aug 30 09:55:07 2013 -0400] <author_2@ornl.gov>
Second summary message
*** Git Repo: extraRepoOnePackage
sha1_3 [Thu Dec 1 23:34:06 2011 -0500] <author_3@ornl.gov>
Third summary message
"""
repoVersionFile_withoutSummary_1 = """*** Base Git Repo: MockTrilinos
sha1_1 [Mon Sep 23 11:34:59 2013 -0400] <author_1@ornl.gov>
*** Git Repo: extraRepoTwoPackages
sha1_2 [Fri Aug 30 09:55:07 2013 -0400] <author_2@ornl.gov>
*** Git Repo: extraRepoOnePackageThreeSubpackages
sha1_3 [Thu Dec 1 23:34:06 2011 -0500] <author_3@ornl.gov>
"""
def writeGitMockProgram_base_3_2_1_repo1_22_0_2_repo2_0_0_0():
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch0\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 3 some author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
" M file2\n" \
"?? file2\n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch0\n" \
"Your branch is ahead of 'origin_repo0/remote_branch0' by 3 commits.\n" \
)
open("ExtraRepo1/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch1\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 22 some author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: ?? file1\n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch1\n" \
"Your branch is ahead of 'origin_repo1/remote_branch1' by 22 commits.\n" \
)
open("ExtraRepo2/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch2\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
def writeGitMockProgram_base_3_2_1_repo1_0_0_0_repo2_4_0_2():
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch0\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 3 some author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
" M file2\n" \
"?? file3\n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch0\n" \
"Your branch is ahead of 'origin_repo0/remote_branch0' by 3 commits.\n" \
)
open("ExtraRepo1/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch1\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
open("ExtraRepo2/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch2\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 3 some author\n" \
"1 some other author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: ?? file1\n" \
"?? file3\n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch2\n" \
"Your branch is ahead of 'origin_repo2/remote_branch2' by 4 commits.\n" \
)
class test_gitdist_getRepoVersionDictFromRepoVersionFileString(unittest.TestCase):
def setUp(self):
None
def test_repoVersionFile_withSummary_1(self):
repoVersionDict = \
getRepoVersionDictFromRepoVersionFileString(repoVersionFile_withSummary_1)
expectedDict = {
'MockTrilinos': 'sha1_1',
'extraTrilinosRepo': 'sha1_2',
'extraRepoOnePackage': 'sha1_3'
}
#print("repoVersionDict =\n" + str(repoVersionDict))
self.assertEqual(repoVersionDict, expectedDict)
def test_repoVersionFile_withoutSummary_1(self):
repoVersionDict = \
getRepoVersionDictFromRepoVersionFileString(repoVersionFile_withoutSummary_1)
expectedDict = {
'MockTrilinos': 'sha1_1',
'extraRepoTwoPackages': 'sha1_2',
'extraRepoOnePackageThreeSubpackages': 'sha1_3'
}
#print("repoVersionDict =\n" + str(repoVersionDict))
self.assertEqual(repoVersionDict, expectedDict)
# ToDo: Add unit tests for requoteCmndLineArgsIntoArray!
#
# Test entire script gitdist
#
def assertContainsGitdistHelpHeader(testObj, cmndOut):
cmndOutList = cmndOut.splitlines()
cmndOutFirstLine = cmndOutList[0]
cmndOutFirstLineAfterComma = cmndOutFirstLine.split(s(":"))[1].strip()
cmndOutFirstLineAfterComma_expected = s("gitdist [gitdist arguments] <raw-git-command> [git arguments]")
testObj.assertEqual(cmndOutFirstLineAfterComma, cmndOutFirstLineAfterComma_expected)
def assertContainsAllGitdistHelpSections(testObj, cmndOut):
testObj.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^OVERVIEW:$"), "OVERVIEW:\n")
testObj.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^REPO SELECTION AND SETUP:$"), "REPO SELECTION AND SETUP:\n")
testObj.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^SUMMARY OF REPO STATUS:$"), "SUMMARY OF REPO STATUS:\n")
testObj.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^REPO VERSION FILES:$"), "REPO VERSION FILES:\n")
testObj.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^USEFUL ALIASES:$"), "USEFUL ALIASES:\n")
testObj.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^USAGE TIPS:$"), "USAGE TIPS:\n")
testObj.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^SCRIPT DEPENDENCIES:$"), "SCRIPT DEPENDENCIES:\n")
class test_gitdist(unittest.TestCase):
def setUp(self):
None
def test_default(self):
(cmndOut, errOut) = getCmndOutput(gitdistPathNoColor, rtnCode=True)
cmndOut_expected = "Must specify git command. See 'git --help' for options.\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
self.assertEqual(errOut, 1)
# Make sure the default --help shows the section "OVERVIEW"
def test_help(self):
cmndOut = getCmndOutput(gitdistPath+" --help")
assertContainsGitdistHelpHeader(self, cmndOut)
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^OVERVIEW:$"), "")
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^REPO SELECTION AND SETUP:$"), "")
# Make sure --dist-help= does not print OVERVIEW section
def test_dist_help_none_help(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help= --help")
assertContainsGitdistHelpHeader(self, cmndOut)
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^OVERVIEW:$"), "")
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^Options:$"), "Options:\n")
# --dist-help=aliases --help
def test_dist_help_aliases_help(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help=aliases --help")
assertContainsGitdistHelpHeader(self, cmndOut)
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^USEFUL ALIASES:$"), "USEFUL ALIASES:\n")
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^REPO SELECTION AND SETUP:$"), "")
# Make sure --dist-help=all prints all the topic headers
def test_dist_help_all_help(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help=all --help")
assertContainsGitdistHelpHeader(self, cmndOut)
assertContainsAllGitdistHelpSections(self, cmndOut)
# Tet that --dist-help --help prints nice error message
def test_dist_help_help(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help --help")
cmndOut_expected = "gitdist: error: option --dist-help: invalid choice: '--help' (choose from '', 'overview', 'repo-selection-and-setup', 'dist-repo-status', 'repo-versions', 'aliases', 'usage-tips', 'script-dependencies', 'all')\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
# Test --dist-helps=invalid-pick picked up as invalid value.
def test_dist_help_invalid_pick_help(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help=invalid-pick --help")
assertContainsGitdistHelpHeader(self, cmndOut)
errorToFind = "gitdist: error: option --dist-help: invalid choice: 'invalid-pick' (choose from '', 'overview', 'repo-selection-and-setup', 'dist-repo-status', 'repo-versions', 'aliases', 'usage-tips', 'script-dependencies', 'all')"
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingSubstr(cmndOut,errorToFind), errorToFind+"\n")
# Test --dist-help (show error string)
def test_dist_help(self):
(cmndOut, errOut) = getCmndOutput(gitdistPath+" --dist-help", rtnCode=True)
if sys.version_info < (3,):
anOrOne = "an"
else:
anOrOne = "1"
self.assertEqual(
s(cmndOut), s("gitdist: error: --dist-help option requires "+anOrOne+" argument\n"))
self.assertEqual(errOut, 2)
# Test --dist-help= (show no-op string)
def test_dist_help_none(self):
(cmndOut, errOut) = getCmndOutput(gitdistPathNoColor+" --dist-help=", rtnCode=True)
self.assertEqual(
s(cmndOut), s("Must specify git command. See 'git --help' for options.\n"))
self.assertEqual(errOut, 1)
# Test --dist-help=overview
def test_dist_help_overview(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help=overview")
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^OVERVIEW:$"), "OVERVIEW:\n")
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^Options:$"), "")
# Test --dist-help=usage-tips
def test_dist_help_usage_tips(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help=usage-tips")
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^USAGE TIPS:$"), "USAGE TIPS:\n")
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^Options:$"), "")
# Test --dist-help=all
def test_dist_help_all(self):
cmndOut = getCmndOutput(gitdistPath+" --dist-help=all")
assertContainsAllGitdistHelpSections(self, cmndOut)
self.assertEqual(
GeneralScriptSupport.extractLinesMatchingRegex(cmndOut,"^Options:$"), "")
def test_noEgGit(self):
(cmndOut, errOut) = getCmndOutput(gitdistPathNoColor+" --dist-use-git= log",
rtnCode=True)
cmndOut_expected = "Can't find git, please set --dist-use-git\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
self.assertEqual(errOut, 1)
def test_log_args(self):
cmndOut = getCmndOutputInMockProjectDir(gitdistPathMock+" log HEAD -1")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_dot_gitdist(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("gitdist_dot_gitdist")
os.mkdir("ExtraRepo1")
os.makedirs("Path/To/ExtraRepo2")
os.mkdir("ExtraRepo3")
# Make sure .gitdist.default is found and read correctly
open(".gitdist.default", "w").write(
".\n" \
"ExtraRepo1\n" \
"Path/To/ExtraRepo2\n" \
"MissingExtraRep\n" \
"ExtraRepo3\n"
)
cmndOut = GeneralScriptSupport.getCmndOutput(gitdistPathMock+" status",
workingDir=testDir)
cmndOut_expected = \
"\n*** Base Git Repo: MockProjectDir\n" \
"['mockgit', 'status']\n\n" \
"*** Git Repo: ExtraRepo1\n" \
"['mockgit', 'status']\n\n" \
"*** Git Repo: Path/To/ExtraRepo2\n" \
"['mockgit', 'status']\n\n" \
"*** Git Repo: ExtraRepo3\n" \
"['mockgit', 'status']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
# NOTE: Above ensures that all of the paths are read correctly and that
# missing paths (MissingExtraRepo) are ignored.
# Make sure that .gitdist overrides .gitdist.default
open(".gitdist", "w").write(
".\n" \
"ExtraRepo1\n" \
"ExtraRepo3\n"
)
cmndOut = GeneralScriptSupport.getCmndOutput(gitdistPathMock+" status",
workingDir=testDir)
cmndOut_expected = \
"\n*** Base Git Repo: MockProjectDir\n" \
"['mockgit', 'status']\n\n" \
"*** Git Repo: ExtraRepo1\n" \
"['mockgit', 'status']\n\n" \
"*** Git Repo: ExtraRepo3\n" \
"['mockgit', 'status']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
# Make sure that --dist-repos overrides all files
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPathMock+" --dist-repos=.,ExtraRepo1,Path/To/ExtraRepo2 status",
workingDir=testDir)
cmndOut_expected = \
"\n*** Base Git Repo: MockProjectDir\n" \
"['mockgit', 'status']\n\n" \
"*** Git Repo: ExtraRepo1\n" \
"['mockgit', 'status']\n\n" \
"*** Git Repo: Path/To/ExtraRepo2\n" \
"['mockgit', 'status']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_log_args_extra_repo_1(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+" --dist-repos=.,extraTrilinosRepo log HEAD -1")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n" \
"*** Git Repo: extraTrilinosRepo\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_args_extra_repo_2_not_first(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+\
" --dist-repos=.,extraTrilinosRepo,extraRepoOnePackage "+\
" --dist-not-repos=extraTrilinosRepo "+\
" log HEAD -1"
)
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n" \
"*** Git Repo: extraRepoOnePackage\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_args_extra_repo_2_not_second(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+\
" --dist-repos=.,extraTrilinosRepo,extraRepoOnePackage "+\
" --dist-not-repos=extraTrilinosRepo "+\
" log HEAD -1"
)
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n" \
"*** Git Repo: extraRepoOnePackage\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_args_extra_repo_1_not_base(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+\
" --dist-repos=.,extraTrilinosRepo "+\
" --dist-not-repos=. "+\
" log HEAD -1"
)
cmndOut_expected = \
"\n*** Git Repo: extraTrilinosRepo\n" \
"['mockgit', 'log', 'HEAD', '-1']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_dist_mod_only_1_change_base(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("gitdist_dist_mod_only_1_change_base")
os.mkdir("ExtraRepo1")
os.mkdir("ExtraRepo2")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch0\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 3 some author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch0\n" \
"Your branch is ahead of 'origin_repo0/remote_branch0' by 3 commits.\n" \
)
open("ExtraRepo1/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch1\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
open("ExtraRepo2/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch2\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-mod-only --dist-repos=.,ExtraRepo1,ExtraRepo2 status",
workingDir=testDir)
cmndOut_expected = \
"\n*** Base Git Repo: MockProjectDir\n" \
"On branch local_branch0\n" \
"Your branch is ahead of 'origin_repo0/remote_branch0' by 3 commits.\n\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_dist_mod_only_1_change_extrarepo1(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("gitdist_dist_mod_only_1_change_extrarepo1")
os.mkdir("ExtraRepo1")
os.mkdir("ExtraRepo2")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch0\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
open("ExtraRepo1/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch1\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo1/remote_branch1\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 1 some author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch1\n" \
"Your branch is ahead of 'origin_repo1/remote_branch1' by 1 commits.\n" \
)
open("ExtraRepo2/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch2\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo2/remote_branch2\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-mod-only --dist-repos=.,ExtraRepo1,ExtraRepo2 status",
workingDir=testDir)
cmndOut_expected = \
"\n*** Git Repo: ExtraRepo1\nOn branch local_branch1\n" \
"Your branch is ahead of 'origin_repo1/remote_branch1' by 1 commits.\n\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_dist_mod_only_1_extrarepo1_not_tracking_branch(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("dist_mod_only_1_extrarepo1_not_tracking_branch")
os.mkdir("ExtraRepo1")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch0\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: 3 some author\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch0\n" \
"Your branch is ahead of 'origin_repo0/remote_branch0' by 3 commits.\n" \
)
open("ExtraRepo1/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch1\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 128\n" \
"MOCK_PROGRAM_OUTPUT: error: No upstream branch found for ''\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-mod-only --dist-repos=.,ExtraRepo1,ExtraRepo2 status",
workingDir=testDir)
cmndOut_expected = \
"\n*** Base Git Repo: MockProjectDir\n" \
"On branch local_branch0\n" \
"Your branch is ahead of 'origin_repo0/remote_branch0' by 3 commits.\n\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_dist_mod_only_1_extrarepo1_not_tracking_branch_with_mods(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("dist_mod_only_1_extrarepo1_not_tracking_branch_with_mods")
os.mkdir("ExtraRepo1")
open(".mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch0\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_INPUT: shortlog -s HEAD ^origin_repo0/remote_branch0\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: \n" \
)
open("ExtraRepo1/.mockprogram_inout.txt", "w").write(
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref HEAD\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: local_branch1\n" \
"MOCK_PROGRAM_INPUT: rev-parse --abbrev-ref --symbolic-full-name @{u}\n" \
"MOCK_PROGRAM_RETURN: 128\n" \
"MOCK_PROGRAM_OUTPUT: error: No upstream branch found for ''\n" \
"MOCK_PROGRAM_INPUT: status --porcelain\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: M file1\n" \
"MOCK_PROGRAM_INPUT: status\n" \
"MOCK_PROGRAM_RETURN: 0\n" \
"MOCK_PROGRAM_OUTPUT: On branch local_branch1\n" \
"Your branch is ahead of 'origin_repo1/remote_branch1' by 1 commits.\n" \
)
# Make sure that --dist-repos overrides all files
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-mod-only --dist-repos=.,ExtraRepo1,ExtraRepo2 status",
workingDir=testDir)
cmndOut_expected = \
"\n*** Git Repo: ExtraRepo1\n" \
"On branch local_branch1\n" \
"Your branch is ahead of 'origin_repo1/remote_branch1' by 1 commits.\n\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_log_version_file(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+\
" log _VERSION_ --some -other args")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'sha1_1', '--some', '-other', 'args']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_version_file_extra_repo_1(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+ \
" --dist-repos=.,extraTrilinosRepo"+ \
" log _VERSION_")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'sha1_1']\n" \
"\n*** Git Repo: extraTrilinosRepo\n['mockgit', 'log', 'sha1_2']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_version_file_extra_repo_2(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+ \
" --dist-repos=.,extraRepoOnePackage,extraTrilinosRepo"+ \
" log _VERSION_")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'sha1_1']\n" \
"\n*** Git Repo: extraRepoOnePackage\n['mockgit', 'log', 'sha1_3']\n" \
"\n*** Git Repo: extraTrilinosRepo\n['mockgit', 'log', 'sha1_2']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_HEAD_version_file_extra_repo_1(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+ \
" --dist-repos=.,extraTrilinosRepo"+ \
" log HEAD ^_VERSION_")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'HEAD', '^sha1_1']\n" \
"\n*** Git Repo: extraTrilinosRepo\n['mockgit', 'log', 'HEAD', '^sha1_2']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_version_file_invalid_extra_repo(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+ \
" --dist-repos=.,extraRepoTwoPackages"+ \
" log _VERSION_")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n['mockgit', 'log', 'sha1_1']\n" \
"\n*** Git Repo: extraRepoTwoPackages\nRepo 'extraRepoTwoPackages' is not in the list of repos ['.', 'extraRepoOnePackage', 'extraTrilinosRepo'] read in from the version file.\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_not_version_file_2(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+ \
" --dist-version-file2="+unitTestDataDir+"/versionFile_withSummary_1_2.txt"+ \
" log _VERSION_ ^_VERSION2_")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'sha1_1', '^sha1_1_2']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_not_version_file_2_extra_repo_1(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+ \
" --dist-version-file2="+unitTestDataDir+"/versionFile_withSummary_1_2.txt"+ \
" --dist-repos=.,extraTrilinosRepo"+ \
" log _VERSION_ ^_VERSION2_")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'sha1_1', '^sha1_1_2']\n" \
"\n*** Git Repo: extraTrilinosRepo\n['mockgit', 'log', 'sha1_2', '^sha1_2_2']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
def test_log_since_until_version_file_2_extra_repo_1(self):
cmndOut = getCmndOutputInMockProjectDir(
gitdistPathMock+ \
" --dist-version-file="+unitTestDataDir+"/versionFile_withSummary_1.txt"+ \
" --dist-version-file2="+unitTestDataDir+"/versionFile_withSummary_1_2.txt"+ \
" --dist-repos=.,extraTrilinosRepo"+ \
" log _VERSION2_.._VERSION_")
cmndOut_expected = \
"\n*** Base Git Repo: MockTrilinos\n" \
"['mockgit', 'log', 'sha1_1_2..sha1_1']\n" \
"\n*** Git Repo: extraTrilinosRepo\n['mockgit', 'log', 'sha1_2_2..sha1_2']\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
# The above test ensures that it repalces the SHA1s for in the same cmndline args
def test_dist_repo_status_all(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("gitdist_dist_repo_status_all")
os.mkdir("ExtraRepo1")
os.mkdir("ExtraRepo2")
writeGitMockProgram_base_3_2_1_repo1_22_0_2_repo2_0_0_0()
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-repos=.,ExtraRepo1,ExtraRepo2 dist-repo-status",
workingDir=testDir)
#print(cmndOut)
cmndOut_expected = \
"-----------------------------------------------------------------------------------------\n" \
"| ID | Repo Dir | Branch | Tracking Branch | C | M | ? |\n" \
"|----|-----------------------|---------------|-----------------------------|----|---|---|\n" \
"| 0 | MockProjectDir (Base) | local_branch0 | origin_repo0/remote_branch0 | 3 | 2 | 1 |\n" \
"| 1 | ExtraRepo1 | local_branch1 | origin_repo1/remote_branch1 | 22 | | 1 |\n" \
"| 2 | ExtraRepo2 | local_branch2 | origin_repo2/remote_branch2 | | | |\n" \
"-----------------------------------------------------------------------------------------\n" \
"\n" \
"(tip: to see a legend, pass in --dist-legend.)\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_dist_repo_status_mod_only_first(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("gitdist_dist_repo_status_mod_only_first")
os.mkdir("ExtraRepo1")
os.mkdir("ExtraRepo2")
writeGitMockProgram_base_3_2_1_repo1_22_0_2_repo2_0_0_0()
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-repos=.,ExtraRepo1,ExtraRepo2 --dist-mod-only dist-repo-status",
workingDir=testDir)
#print(cmndOut)
cmndOut_expected = \
"-----------------------------------------------------------------------------------------\n" \
"| ID | Repo Dir | Branch | Tracking Branch | C | M | ? |\n" \
"|----|-----------------------|---------------|-----------------------------|----|---|---|\n" \
"| 0 | MockProjectDir (Base) | local_branch0 | origin_repo0/remote_branch0 | 3 | 2 | 1 |\n" \
"| 1 | ExtraRepo1 | local_branch1 | origin_repo1/remote_branch1 | 22 | | 1 |\n" \
"-----------------------------------------------------------------------------------------\n" \
"\n" \
"(tip: to see a legend, pass in --dist-legend.)\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_dist_repo_status_mod_only_first_legend(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("gitdist_dist_repo_status_mod_only_first_legend")
os.mkdir("ExtraRepo1")
os.mkdir("ExtraRepo2")
writeGitMockProgram_base_3_2_1_repo1_22_0_2_repo2_0_0_0()
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-repos=.,ExtraRepo1,ExtraRepo2 --dist-mod-only" \
+" --dist-legend dist-repo-status",
workingDir=testDir)
#print("+++++++++\n" + cmndOut + "+++++++\n")
cmndOut_expected = \
"-----------------------------------------------------------------------------------------\n" \
"| ID | Repo Dir | Branch | Tracking Branch | C | M | ? |\n" \
"|----|-----------------------|---------------|-----------------------------|----|---|---|\n" \
"| 0 | MockProjectDir (Base) | local_branch0 | origin_repo0/remote_branch0 | 3 | 2 | 1 |\n" \
"| 1 | ExtraRepo1 | local_branch1 | origin_repo1/remote_branch1 | 22 | | 1 |\n" \
"-----------------------------------------------------------------------------------------\n" \
"\n" \
"Legend:\n" \
"* ID: Repository ID, zero based (order git commands are run)\n" \
"* Repo Dir: Relative to base repo (base repo shown first with '(Base)')\n" \
"* Branch: Current branch (or detached HEAD)\n" \
"* Tracking Branch: Tracking branch (or empty if no tracking branch exists)\n" \
"* C: Number local commits w.r.t. tracking branch (empty if zero or no TB)\n" \
"* M: Number of tracked modified (uncommitted) files (empty if zero)\n" \
"* ?: Number of untracked, non-ignored files (empty if zero)\n\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_dist_repo_status_mod_only_first_last(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("gitdist_dist_repo_status_mod_only_first_last")
os.mkdir("ExtraRepo1")
os.mkdir("ExtraRepo2")
writeGitMockProgram_base_3_2_1_repo1_0_0_0_repo2_4_0_2()
cmndOut = GeneralScriptSupport.getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-repos=.,ExtraRepo1,ExtraRepo2 --dist-mod-only dist-repo-status",
workingDir=testDir)
#print(cmndOut)
cmndOut_expected = \
"----------------------------------------------------------------------------------------\n" \
"| ID | Repo Dir | Branch | Tracking Branch | C | M | ? |\n" \
"|----|-----------------------|---------------|-----------------------------|---|---|---|\n" \
"| 0 | MockProjectDir (Base) | local_branch0 | origin_repo0/remote_branch0 | 3 | 2 | 1 |\n" \
"| 2 | ExtraRepo2 | local_branch2 | origin_repo2/remote_branch2 | 4 | | 2 |\n" \
"----------------------------------------------------------------------------------------\n" \
"\n" \
"(tip: to see a legend, pass in --dist-legend.)\n"
self.assertEqual(s(cmndOut), s(cmndOut_expected))
finally:
os.chdir(testBaseDir)
def test_dist_repo_status_extra_args_fail(self):
os.chdir(testBaseDir)
try:
# Create a mock git meta-project
testDir = createAndMoveIntoTestDir("dist_repo_status_extra_args_fail")
(cmndOut, errOut) = getCmndOutput(
gitdistPath + " --dist-no-color --dist-use-git="+mockGitPath \
+" --dist-repos=.,ExtraRepo1,ExtraRepo2 --dist-mod-only" \
+" --dist-legend dist-repo-status --name-status",
rtnCode=True)
#print(cmndOut)
cmndOut_expected = \
"Error, passing in extra git commands/args ='--name-status' with special comamnd 'dist-repo-status is not allowed!\n"
self.assertEqual(cmndOut, s(cmndOut_expected))
self.assertEqual(errOut, 1)
finally:
os.chdir(testBaseDir)
if __name__ == '__main__':
unittest.main()
| 39.613523
| 236
| 0.623282
| 6,303
| 55,657
| 5.291766
| 0.080279
| 0.093992
| 0.094262
| 0.051268
| 0.807279
| 0.778228
| 0.75823
| 0.733915
| 0.713258
| 0.695899
| 0
| 0.018414
| 0.207683
| 55,657
| 1,404
| 237
| 39.641738
| 0.737947
| 0.067341
| 0
| 0.721659
| 0
| 0.019355
| 0.510436
| 0.130392
| 0
| 0
| 0
| 0.000712
| 0.070968
| 1
| 0.053456
| false
| 0.003687
| 0.004608
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e4e2f7c240033460fd897db29fd259c8392ba838
| 205
|
py
|
Python
|
config.py
|
vortex-17/file-split-Cryptography
|
621673ad0aa327f97f0e38d9d94216cfd12f8e0e
|
[
"MIT"
] | null | null | null |
config.py
|
vortex-17/file-split-Cryptography
|
621673ad0aa327f97f0e38d9d94216cfd12f8e0e
|
[
"MIT"
] | null | null | null |
config.py
|
vortex-17/file-split-Cryptography
|
621673ad0aa327f97f0e38d9d94216cfd12f8e0e
|
[
"MIT"
] | null | null | null |
{
"data_storage" : ["/Users/vivek/Desktop/file-split/storage1/", "/Users/vivek/Desktop/file-split/storage2/", "/Users/vivek/Desktop/file-split/storage3/", "/Users/vivek/Desktop/file-split/storage4/"]
}
| 68.333333
| 201
| 0.721951
| 26
| 205
| 5.653846
| 0.423077
| 0.272109
| 0.462585
| 0.571429
| 0.707483
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020619
| 0.053659
| 205
| 3
| 202
| 68.333333
| 0.737113
| 0
| 0
| 0
| 0
| 0
| 0.854369
| 0.796117
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5f514cc095606ba5c49259cd8152b6de6b3a2b0c
| 12,071
|
py
|
Python
|
NiaPy/tests/test_pso.py
|
lucijabrezocnik/NiaPy
|
1582d1af835c022c77224ea0234178a399efc106
|
[
"MIT"
] | null | null | null |
NiaPy/tests/test_pso.py
|
lucijabrezocnik/NiaPy
|
1582d1af835c022c77224ea0234178a399efc106
|
[
"MIT"
] | null | null | null |
NiaPy/tests/test_pso.py
|
lucijabrezocnik/NiaPy
|
1582d1af835c022c77224ea0234178a399efc106
|
[
"MIT"
] | 1
|
2018-06-13T08:10:23.000Z
|
2018-06-13T08:10:23.000Z
|
# encoding=utf8
from NiaPy.algorithms.basic import ParticleSwarmOptimization, ParticleSwarmAlgorithm, OppositionVelocityClampingParticleSwarmOptimization, CenterParticleSwarmOptimization, MutatedParticleSwarmOptimization, MutatedCenterParticleSwarmOptimization, ComprehensiveLearningParticleSwarmOptimizer, MutatedCenterUnifiedParticleSwarmOptimization
from NiaPy.tests.test_algorithm import AlgorithmTestCase, MyBenchmark
class PSOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = ParticleSwarmOptimization
def test_algorithm_info(self):
al = self.algo.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = self.algo.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
def test_custom_works_fine(self):
pso_custom = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
pso_customc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, pso_custom, pso_customc, MyBenchmark())
def test_griewank_works_fine(self):
pso_griewank = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
pso_griewankc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, pso_griewank, pso_griewankc)
class PSATestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = ParticleSwarmAlgorithm
def test_algorithm_info(self):
al = ParticleSwarmAlgorithm.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = ParticleSwarmAlgorithm.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['vMax'](10))
self.assertTrue(d['vMin'](10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
self.assertFalse(d['vMin'](None))
self.assertFalse(d['vMax'](None))
self.assertFalse(d['w'](None))
self.assertFalse(d['w'](-.1))
self.assertFalse(d['w'](-10))
self.assertTrue(d['w'](.01))
self.assertTrue(d['w'](10.01))
def test_custom_works_fine(self):
wvcpso_custom = ParticleSwarmAlgorithm(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
wvcpso_customc = ParticleSwarmAlgorithm(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, wvcpso_custom, wvcpso_customc, MyBenchmark())
def test_griewank_works_fine(self):
wvcpso_griewank = ParticleSwarmAlgorithm(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
wvcpso_griewankc = ParticleSwarmAlgorithm(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, wvcpso_griewank, wvcpso_griewankc)
class OVCPSOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = OppositionVelocityClampingParticleSwarmOptimization
def test_algorithm_info(self):
al = self.algo.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = self.algo.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['vMax'](10))
self.assertTrue(d['vMin'](10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
self.assertFalse(d['vMin'](None))
self.assertFalse(d['vMax'](None))
self.assertFalse(d['w'](None))
self.assertFalse(d['w'](-.1))
self.assertFalse(d['w'](-10))
self.assertTrue(d['w'](.01))
self.assertTrue(d['w'](10.01))
def test_custom_works_fine(self):
wvcpso_custom = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
wvcpso_customc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, wvcpso_custom, wvcpso_customc, MyBenchmark())
def test_griewank_works_fine(self):
wvcpso_griewank = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
wvcpso_griewankc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, wvcpso_griewank, wvcpso_griewankc)
class CPSOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = CenterParticleSwarmOptimization
def test_algorithm_info(self):
al = self.algo.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = self.algo.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['vMax'](10))
self.assertTrue(d['vMin'](10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
self.assertFalse(d['vMin'](None))
self.assertFalse(d['vMax'](None))
self.assertFalse(d['w'](None))
self.assertFalse(d['w'](-.1))
self.assertFalse(d['w'](-10))
self.assertTrue(d['w'](.01))
self.assertTrue(d['w'](10.01))
def test_custom_works_fine(self):
cpso_custom = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
cpso_customc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, cpso_custom, cpso_customc, MyBenchmark())
def test_griewank_works_fine(self):
cpso_griewank = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
cpso_griewankc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, cpso_griewank, cpso_griewankc)
class MPSOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = MutatedParticleSwarmOptimization
def test_algorithm_info(self):
al = MutatedParticleSwarmOptimization.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = MutatedParticleSwarmOptimization.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['vMax'](10))
self.assertTrue(d['vMin'](10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
self.assertFalse(d['vMin'](None))
self.assertFalse(d['vMax'](None))
self.assertFalse(d['w'](None))
self.assertFalse(d['w'](-.1))
self.assertFalse(d['w'](-10))
self.assertTrue(d['w'](.01))
self.assertTrue(d['w'](10.01))
def test_custom_works_fine(self):
mpso_custom = MutatedParticleSwarmOptimization(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
mpso_customc = MutatedParticleSwarmOptimization(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, mpso_custom, mpso_customc, MyBenchmark())
def test_griewank_works_fine(self):
mpso_griewank = MutatedParticleSwarmOptimization(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
mpso_griewankc = MutatedParticleSwarmOptimization(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, mpso_griewank, mpso_griewankc)
class MCPSOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = MutatedCenterParticleSwarmOptimization
def test_algorithm_info(self):
al = self.algo.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = self.algo.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['vMax'](10))
self.assertTrue(d['vMin'](10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
self.assertFalse(d['vMin'](None))
self.assertFalse(d['vMax'](None))
self.assertFalse(d['w'](None))
self.assertFalse(d['w'](-.1))
self.assertFalse(d['w'](-10))
self.assertTrue(d['w'](.01))
self.assertTrue(d['w'](10.01))
def test_custom_works_fine(self):
mcpso_custom = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
mcpso_customc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, mcpso_custom, mcpso_customc, MyBenchmark())
def test_griewank_works_fine(self):
mcpso_griewank = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
mcpso_griewankc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, mcpso_griewank, mcpso_griewankc)
class MCUPSOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = MutatedCenterUnifiedParticleSwarmOptimization
def test_algorithm_info(self):
al = self.algo.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = self.algo.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['vMax'](10))
self.assertTrue(d['vMin'](10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
self.assertFalse(d['vMin'](None))
self.assertFalse(d['vMax'](None))
self.assertFalse(d['w'](None))
self.assertFalse(d['w'](-.1))
self.assertFalse(d['w'](-10))
self.assertTrue(d['w'](.01))
self.assertTrue(d['w'](10.01))
def test_custom_works_fine(self):
mcupso_custom = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
mcupso_customc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, mcupso_custom, mcupso_customc, MyBenchmark())
def test_griewank_works_fine(self):
mcupso_griewank = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
mcupso_griewankc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, mcupso_griewank, mcupso_griewankc)
class CLPSOTestCase(AlgorithmTestCase):
def setUp(self):
AlgorithmTestCase.setUp(self)
self.algo = ComprehensiveLearningParticleSwarmOptimizer
def test_algorithm_info(self):
al = self.algo.algorithmInfo()
self.assertIsNotNone(al)
def test_parameter_type(self):
d = self.algo.typeParameters()
self.assertTrue(d['C1'](10))
self.assertTrue(d['C2'](10))
self.assertTrue(d['C1'](0))
self.assertTrue(d['C2'](0))
self.assertFalse(d['C1'](-10))
self.assertFalse(d['C2'](-10))
self.assertTrue(d['vMax'](10))
self.assertTrue(d['vMin'](10))
self.assertTrue(d['NP'](10))
self.assertFalse(d['NP'](-10))
self.assertFalse(d['NP'](0))
def test_custom_works_fine(self):
clpso_custom = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
clpso_customc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, clpso_custom, clpso_customc, MyBenchmark())
def test_griewank_works_fine(self):
clpso_griewank = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
clpso_griewankc = self.algo(NP=40, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, seed=self.seed)
AlgorithmTestCase.test_algorithm_run(self, clpso_griewank, clpso_griewankc)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 38.93871
| 336
| 0.709883
| 1,881
| 12,071
| 4.461457
| 0.041999
| 0.048618
| 0.117969
| 0.089133
| 0.852717
| 0.852717
| 0.846521
| 0.846521
| 0.813632
| 0.753575
| 0
| 0.05865
| 0.098832
| 12,071
| 309
| 337
| 39.064725
| 0.712815
| 0.00555
| 0
| 0.75188
| 0
| 0
| 0.023165
| 0
| 0
| 0
| 0
| 0
| 0.511278
| 1
| 0.150376
| false
| 0
| 0.007519
| 0
| 0.18797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
39c56c5758a7476eba61e1c186c655d329a1c6a8
| 17,781
|
py
|
Python
|
fonts/romfonts/vga2_8x16.py
|
slabua/st7789py_mpy
|
31e6f94592563e2b5ad716c48486e605ca3911bb
|
[
"MIT"
] | 153
|
2020-02-02T11:03:14.000Z
|
2022-03-30T05:47:07.000Z
|
fonts/bitmap/vga2_8x16.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 58
|
2020-04-11T23:23:02.000Z
|
2022-03-26T20:45:23.000Z
|
fonts/bitmap/vga2_8x16.py
|
skylin008/st7789_mpy
|
f304991fc5558be653df5f0de928494b85cbc60d
|
[
"MIT"
] | 50
|
2020-02-02T11:05:23.000Z
|
2022-03-22T15:24:42.000Z
|
"""converted from vga_8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x00
LAST = 0xff
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x7e\x81\xa5\x81\x81\xbd\x99\x81\x81\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7e\xff\xdb\xff\xff\xc3\xe7\xff\xff\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x6c\xfe\xfe\xfe\xfe\x7c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x7c\xfe\x7c\x38\x10\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x18\x3c\x3c\xe7\xe7\xe7\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x18\x3c\x7e\xff\xff\x7e\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x18\x3c\x3c\x18\x00\x00\x00\x00\x00\x00'\
b'\xff\xff\xff\xff\xff\xff\xe7\xc3\xc3\xe7\xff\xff\xff\xff\xff\xff'\
b'\x00\x00\x00\x00\x00\x3c\x66\x42\x42\x66\x3c\x00\x00\x00\x00\x00'\
b'\xff\xff\xff\xff\xff\xc3\x99\xbd\xbd\x99\xc3\xff\xff\xff\xff\xff'\
b'\x00\x00\x1e\x0e\x1a\x32\x78\xcc\xcc\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\x66\x66\x66\x3c\x18\x7e\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x3f\x33\x3f\x30\x30\x30\x30\x70\xf0\xe0\x00\x00\x00\x00'\
b'\x00\x00\x7f\x63\x7f\x63\x63\x63\x63\x67\xe7\xe6\xc0\x00\x00\x00'\
b'\x00\x00\x00\x18\x18\xdb\x3c\xe7\x3c\xdb\x18\x18\x00\x00\x00\x00'\
b'\x00\x80\xc0\xe0\xf0\xf8\xfe\xf8\xf0\xe0\xc0\x80\x00\x00\x00\x00'\
b'\x00\x02\x06\x0e\x1e\x3e\xfe\x3e\x1e\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x7e\x18\x18\x18\x7e\x3c\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x66\x66\x66\x00\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x7f\xdb\xdb\xdb\x7b\x1b\x1b\x1b\x1b\x1b\x00\x00\x00\x00'\
b'\x00\x7c\xc6\x60\x38\x6c\xc6\xc6\x6c\x38\x0c\xc6\x7c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\xfe\xfe\xfe\xfe\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x7e\x18\x18\x18\x7e\x3c\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x7e\x18\x18\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x18\x18\x18\x7e\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x0c\xfe\x0c\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x30\x60\xfe\x60\x30\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xc0\xc0\xc0\xfe\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x28\x6c\xfe\x6c\x28\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x38\x7c\x7c\xfe\xfe\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xfe\xfe\x7c\x7c\x38\x38\x10\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00\x00'\
b'\x18\x18\x7c\xc6\xc2\xc0\x7c\x06\x06\x86\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\xc2\xc6\x0c\x18\x30\x60\xc6\x86\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xd6\xd6\xc6\xc6\x6c\x38\x00\x00\x00\x00'\
b'\x00\x00\x18\x38\x78\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc0\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x06\x0c\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x7c\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\xc6\x66\x3a\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\x00\xe6\x66\x66\x6c\x78\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x60\x38\x0c\x06\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\x6c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\x6c\x7c\x38\x38\x7c\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x00\x30\x18\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\x66\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x36\x32\x30\x78\x30\x30\x30\x30\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00'\
b'\x00\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x06\x06\x66\x66\x3c\x00'\
b'\x00\x00\xe0\x60\x60\x66\x6c\x78\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00'\
b'\x00\x00\x00\x00\x00\xdc\x76\x66\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\x60\x38\x0c\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\x6c\x38\x38\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00'\
b'\x00\x00\x00\x00\x00\xfe\xcc\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x6c\xc6\xc6\xc6\xfe\x00\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x18\x70\x00\x00'\
b'\x00\x00\xcc\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x0c\x18\x30\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\xcc\x00\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x60\x30\x18\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x38\x6c\x38\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x18\x70\x00\x00'\
b'\x00\x10\x38\x6c\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\x00\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x60\x30\x18\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x66\x00\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x18\x3c\x66\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x60\x30\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\xc6\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x38\x6c\x38\x10\x38\x6c\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x0c\x18\x00\xfe\x66\x62\x68\x78\x68\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\x36\x36\x7e\xd8\xd8\x6e\x00\x00\x00\x00'\
b'\x00\x00\x3e\x6c\xcc\xcc\xfe\xcc\xcc\xcc\xcc\xce\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x60\x30\x18\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x30\x78\xcc\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x60\x30\x18\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\xc6\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x7e\x06\x0c\x78\x00'\
b'\x00\xc6\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\xc6\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x18\x18\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x18\x18\x00\x00\x00\x00'\
b'\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\x60\xe6\xfc\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x3c\x18\x7e\x18\x7e\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\xf8\xcc\xcc\xf8\xc4\xcc\xde\xcc\xcc\xcc\xc6\x00\x00\x00\x00'\
b'\x00\x0e\x1b\x18\x18\x18\x7e\x18\x18\x18\xd8\x70\x00\x00\x00\x00'\
b'\x00\x18\x30\x60\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x0c\x18\x30\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x18\x30\x60\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x18\x30\x60\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x76\xdc\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x6c\x6c\x3e\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x00\x7c\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x00\x30\x30\x60\xc0\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\xc0\xc0\xc0\xc0\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x06\x06\x06\x06\x00\x00\x00\x00\x00'\
b'\x00\x60\xe0\x62\x66\x6c\x18\x30\x60\xdc\x86\x0c\x18\x3e\x00\x00'\
b'\x00\x60\xe0\x62\x66\x6c\x18\x30\x66\xce\x9a\x3f\x06\x06\x00\x00'\
b'\x00\x00\x18\x18\x00\x18\x18\x18\x3c\x3c\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x36\x6c\xd8\x6c\x36\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xd8\x6c\x36\x6c\xd8\x00\x00\x00\x00\x00\x00'\
b'\x11\x44\x11\x44\x11\x44\x11\x44\x11\x44\x11\x44\x11\x44\x11\x44'\
b'\x55\xaa\x55\xaa\x55\xaa\x55\xaa\x55\xaa\x55\xaa\x55\xaa\x55\xaa'\
b'\xdd\x77\xdd\x77\xdd\x77\xdd\x77\xdd\x77\xdd\x77\xdd\x77\xdd\x77'\
b'\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\x18\x18\xf8\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\xf8\x18\xf8\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x36\x36\x36\x36\x36\x36\x36\xf6\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x00\x00\x00\x00\x00\xf8\x18\xf8\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x36\x36\x36\x36\x36\xf6\x06\xf6\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x00\x00\x00\x00\x00\xfe\x06\xf6\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x36\x36\x36\x36\x36\xf6\x06\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x36\x36\x36\x36\x36\x36\x36\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x18\x18\x18\x18\xf8\x18\xf8\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xf8\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\x18\x18\x1f\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x18\x18\x18\x18\x18\x18\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\x18\x18\x1f\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x18\x18\x18\x18\x18\x18\xff\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\x1f\x18\x1f\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x36\x36\x36\x36\x36\x36\x36\x37\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x36\x36\x36\x36\x36\x37\x30\x3f\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x3f\x30\x37\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x36\x36\x36\x36\x36\xf7\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xff\x00\xf7\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x36\x36\x36\x36\x36\x37\x30\x37\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x00\x00\x00\x00\x00\xff\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x36\x36\x36\x36\x36\xf7\x00\xf7\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x18\x18\x18\x18\x18\xff\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x36\x36\x36\x36\x36\x36\x36\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xff\x00\xff\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x36\x36\x36\x36\x36\x36\x36\x3f\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x18\x18\x18\x18\x1f\x18\x1f\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x1f\x18\x1f\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x00\x00\x00\x00\x00\x00\x00\x3f\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x36\x36\x36\x36\x36\x36\x36\xff\x36\x36\x36\x36\x36\x36\x36\x36'\
b'\x18\x18\x18\x18\x18\xff\x18\xff\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\x18\x18\xf8\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x1f\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff'\
b'\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0\xf0'\
b'\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f'\
b'\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xdc\xd8\xd8\xd8\xdc\x76\x00\x00\x00\x00'\
b'\x00\x00\x78\xcc\xcc\xcc\xd8\xcc\xc6\xc6\xc6\xcc\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\xc6\xc0\xc0\xc0\xc0\xc0\xc0\xc0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x6c\x6c\x6c\x6c\x6c\x6c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x60\x30\x18\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\xd8\xd8\xd8\xd8\xd8\x70\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x66\x66\x66\x66\x66\x7c\x60\x60\xc0\x00'\
b'\x00\x00\x00\x00\x76\xdc\x18\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7e\x18\x3c\x66\x66\x66\x66\x3c\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\x6c\x38\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\x6c\x6c\x6c\x6c\xee\x00\x00\x00\x00'\
b'\x00\x00\x1e\x30\x18\x0c\x3e\x66\x66\x66\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\xdb\xdb\xdb\x7e\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x03\x06\x7e\xdb\xdb\xf3\x7e\x60\xc0\x00\x00\x00\x00'\
b'\x00\x00\x1c\x30\x60\x60\x7c\x60\x60\x60\x30\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\xfe\x00\x00\xfe\x00\x00\xfe\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x30\x18\x0c\x06\x0c\x18\x30\x00\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x0c\x18\x30\x60\x30\x18\x0c\x00\x7e\x00\x00\x00\x00'\
b'\x00\x00\x0e\x1b\x1b\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18\x18'\
b'\x18\x18\x18\x18\x18\x18\x18\x18\x18\xd8\xd8\xd8\x70\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x00\x7e\x00\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xdc\x00\x76\xdc\x00\x00\x00\x00\x00\x00'\
b'\x00\x38\x6c\x6c\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x0f\x0c\x0c\x0c\x0c\x0c\xec\x6c\x6c\x3c\x1c\x00\x00\x00\x00'\
b'\x00\x6c\x36\x36\x36\x36\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x3c\x66\x0c\x18\x32\x7e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x7e\x7e\x7e\x7e\x7e\x7e\x7e\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| 67.098113
| 68
| 0.709915
| 4,369
| 17,781
| 2.888533
| 0.022431
| 0.612361
| 0.632567
| 0.563867
| 0.889144
| 0.867353
| 0.840491
| 0.80103
| 0.746513
| 0.704913
| 0
| 0.384677
| 0.015635
| 17,781
| 264
| 69
| 67.352273
| 0.336342
| 0.001518
| 0
| 0.01145
| 0
| 0.977099
| 0.923198
| 0.923198
| 0
| 1
| 0.000451
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
39db27b1d77d155111d280f5ec244c06ca9b69cf
| 5,108
|
py
|
Python
|
tests/test_api.py
|
ProjetPP/PPP-Logger
|
dc529fea565e795a93a160c9415724b9f4cda24c
|
[
"MIT"
] | 1
|
2015-02-26T21:07:24.000Z
|
2015-02-26T21:07:24.000Z
|
tests/test_api.py
|
ProjetPP/PPP-Logger
|
dc529fea565e795a93a160c9415724b9f4cda24c
|
[
"MIT"
] | 4
|
2015-03-01T09:05:08.000Z
|
2015-03-01T09:58:19.000Z
|
tests/test_api.py
|
ProjetPP/PPP-Logger
|
dc529fea565e795a93a160c9415724b9f4cda24c
|
[
"MIT"
] | null | null | null |
import sqlite3
import json
import tempfile
from ppp_logger import app
from ppp_libmodule.tests import PPPTestCase
class HttpTest(PPPTestCase(app)):
config_var = 'PPP_LOGGER_CONFIG'
def setUp(self):
self.fd = tempfile.NamedTemporaryFile('w+')
self.config = '{"database_url": "sqlite:///%s"}' % self.fd.name
super(HttpTest, self).setUp()
def tearDown(self):
super(HttpTest, self).tearDown()
self.fd.close()
def testEmpty(self):
r = self.app.get('/')
self.assertEqual(r.content_type, 'application/json')
r = json.loads(r.body.decode())
self.assertEqual(r, [])
def testUnknownOrder(self):
self.assertEqual(self.app.get('/', {'order': 'foobar'}, status='*').status_int, 405)
def testLast(self):
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
r = self.app.get('/')
self.assertEqual(r.content_type, 'application/json')
r = json.loads(r.body.decode())
self.assertEqual(len(r), 2, r)
self.assertEqual(r[0][0], 'Baz qux?')
self.assertEqual(r[1][0], 'Foo bar?')
def testLimitLast(self):
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
r = self.app.get('/', params={'limit': 1})
self.assertEqual(r.content_type, 'application/json')
r = json.loads(r.body.decode())
self.assertEqual(len(r), 1, r)
self.assertEqual(r[0][0], 'Baz qux?')
def testFirst(self):
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
r = self.app.get('/', {'order': 'first', 'limit': '1'})
self.assertEqual(r.content_type, 'application/json')
r = json.loads(r.body.decode())
self.assertEqual(len(r), 1, r)
self.assertEqual(r[0][0], 'Foo bar?')
r = self.app.get('/', {'order': 'first', 'limit': '1', 'offset': '1'})
self.assertEqual(r.content_type, 'application/json')
r = json.loads(r.body.decode())
self.assertEqual(len(r), 1, r)
self.assertEqual(r[0][0], 'Baz qux?')
def testTop(self):
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'quux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
r = self.app.get('/', {'order': 'top'})
self.assertEqual(r.content_type, 'application/json')
r = json.loads(r.body.decode())
self.assertEqual(len(r), 3, r)
self.assertEqual(r[0][0], 'Baz qux?', r)
self.assertEqual(r[1][0], 'Foo bar?', r)
self.assertEqual(r[2][0], 'quux?', r)
"""
def testTopAmong(self):
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Baz qux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'quux?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
q = {'id': 'foo', 'question': 'Foo bar?', 'responses': []}
self.assertStatusInt(q, 200)
r = self.app.get('/', {'order': 'top', 'among': 6})
self.assertEqual(r.content_type, 'application/json')
r = json.loads(r.body.decode())
self.assertEqual(len(r), 3, r)
self.assertEqual(r[0][0], 'Foo bar?', r)
self.assertEqual(r[1][0], 'Baz qux?', r)
self.assertEqual(r[2][0], 'quux?', r)
"""
| 42.214876
| 92
| 0.536022
| 609
| 5,108
| 4.472906
| 0.119869
| 0.143172
| 0.052863
| 0.123348
| 0.825624
| 0.825624
| 0.825257
| 0.800294
| 0.76138
| 0.76138
| 0
| 0.028297
| 0.245889
| 5,108
| 120
| 93
| 42.566667
| 0.678868
| 0
| 0
| 0.580247
| 0
| 0
| 0.188976
| 0
| 0
| 0
| 0
| 0
| 0.432099
| 1
| 0.098765
| false
| 0
| 0.061728
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f2e6ef9b0f9999ef0cf5f83497ce17ad9dca6b12
| 8,769
|
py
|
Python
|
tests/test_registration_page.py
|
PopkovS/Testing_assistant_lk
|
8e935a5709570deb9e7c459cb3b8cbcd81f587e0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_registration_page.py
|
PopkovS/Testing_assistant_lk
|
8e935a5709570deb9e7c459cb3b8cbcd81f587e0
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:59:36.000Z
|
2021-06-02T00:59:36.000Z
|
tests/test_registration_page.py
|
PopkovS/Testing_assistant_lk
|
8e935a5709570deb9e7c459cb3b8cbcd81f587e0
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep
import pytest
from pages.locators import Links, TestData, BaseLocators
from pages.registration_page import RegistrationPage
from pages.mailforforspam_page import MailForSpamPage
@pytest.fixture(scope="module", autouse=True)
def setup_for_module(browser):
global page
page = RegistrationPage(browser, Links.LOGIN_LINK)
page.check_new_user_exist()
page.change_sys_paran(auth_ad="False")
page.open()
page.go_to_reg_page()
yield page
class TestsRegNegative():
@pytest.fixture(scope="function", autouse=True)
def setup_for_login_neg_function(self, browser):
browser.refresh()
yield page
page.should_be_no_more_necessary_alert()
page.should_not_be_user_in_bd()
def test_reg_with_empty_fields(self, browser):
page.registration("", "", "", "")
page.should_be_err_reg_fields(email="email_empty",
name="name_empty",
pas="pass_empty",
conf_pass="conf_pass_empty_what")
page.should_be_alert("not_valid_pass_or_log")
def test_reg_with_empty_email(self, browser):
page.registration("",
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_err_reg_fields(email="email_empty")
page.should_be_alert("not_valid_pass_or_log")
def test_reg_with_empty_name(self, browser):
page.registration(email=TestData.NEW_USER_EMAIL,
name="",
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_err_reg_fields(name="name_empty")
page.should_be_alert("not_valid_pass_or_log")
def test_reg_with_empty_pass_and_conf_pass(self, browser):
page.registration(email=TestData.NEW_USER_EMAIL,
name=TestData.NEW_USER_NAME,
password="",
conf_password="")
page.should_be_err_reg_fields(pas="pass_empty",
conf_pass="conf_pass_empty_what")
page.should_be_alert("not_valid_pass_or_log")
def test_reg_with_empty_pass(self, browser):
page.registration(email=TestData.NEW_USER_EMAIL,
name=TestData.NEW_USER_NAME,
password="",
conf_password=TestData.PASSWORD_USER_NORMAL)
page.submit_click()
page.should_be_err_reg_fields(pas="pass_empty")
page.should_be_alert("err_to_admin")
def test_reg_with_empty_conf_pass(self, browser):
page.registration(email=TestData.NEW_USER_EMAIL,
name=TestData.NEW_USER_NAME,
password="",
conf_password=TestData.PASSWORD_USER_NORMAL)
page.submit_click()
page.should_be_err_reg_fields(pas="pass_empty", conf_pass="pass_and_cof_pass_diff")
page.should_be_alert("err_to_admin")
def test_reg_with_diff_passwords(self, browser):
page.registration(email=TestData.NEW_USER_EMAIL,
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_AD)
page.submit_click()
page.should_be_err_reg_fields(conf_pass="pass_and_cof_pass_diff")
page.should_be_alert("err_to_admin")
def test_reg_with_not_format_email(self, browser):
page.registration(email=TestData.NEW_USER_EMAIL.replace("@", ""),
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_err_reg_fields(email="not_valid_email")
page.should_be_alert("not_valid_pass_or_log")
def test_reg_with_taken_email(self, browser):
page.registration(email=TestData.USER_NORMAL_EMAIL,
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_alert("email_is_taken", text=TestData.USER_NORMAL_EMAIL)
def test_reg_with_dangerous_content_in_email(self, browser):
page.registration(email=f"<{TestData.NEW_USER_EMAIL}>",
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_err_reg_fields(email="not_valid_email")
page.should_be_alert("dang_cont")
def test_reg_with_dangerous_content_in_name(self, browser):
page.registration(email=TestData.NEW_USER_EMAIL,
name=f"<{TestData.NEW_USER_NAME}>",
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_alert("dang_cont")
class TestsNegativeAfterReg():
@pytest.fixture(scope="function", autouse=True)
def setup_for_login_neg_function(self, browser):
browser.refresh()
page.go_to_reg_page()
yield page
page.check_new_user_exist()
def test_reg_link_from_letters(self):
page.registration(email=TestData.NEW_USER_EMAIL,
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_success_reg_page()
page.go_to_login_page_from_confirm_reg()
page.login(email=TestData.NEW_USER_EMAIL,
password=TestData.PASSWORD_USER_NORMAL)
page.should_be_alert("acc_not_conf")
page.should_be_user_in_bd()
class TestsRegPositive():
@pytest.fixture(scope="function", autouse=True)
def setup_for_login_neg_function(self, browser):
browser.refresh()
global mail_num
mail_num = page.old_letters_count()
page.go_to_reg_page()
yield page
page.should_be_user_in_bd()
page.check_new_user_exist()
def test_reg_link_from_letters(self):
page.registration(email=TestData.NEW_USER_EMAIL,
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_success_reg_page()
page.go_to_account_activation(old_lett=mail_num)
page.should_be_reg_confirm_page()
page.go_to_login_page_from_confirm_reg()
page.login_new_user()
page.close_tab()
def test_reg_link_from_reg_page(self):
page.registration(email=TestData.NEW_USER_EMAIL,
name=TestData.NEW_USER_NAME,
password=TestData.PASSWORD_USER_NORMAL,
conf_password=TestData.PASSWORD_USER_NORMAL)
page.should_be_success_reg_page()
page.go_to_account_activation(old_lett=mail_num)
page.should_be_reg_confirm_page()
page.close_tab()
page.go_to_login_page_from_confirm_reg()
page.login_new_user()
# class TestsDeleteMe():
# def test_registr2(self):
# page.change_sys_paran(auth_ad="True", dir_control="False")
# mail_num = page.old_letters_count(link=Links.MAIL_FOR_SPAM_NEW_US + "2")
# page.registration(email=TestData.NEW_USER.replace("ser@", "ser2@"),
# name=TestData.NEW_USER_NAME + "2",
# password=TestData.PASSWORD_USER_NORMAL,
# conf_password=TestData.PASSWORD_USER_NORMAL)
# page.go_to_account_activation(old_lett=mail_num, link=Links.MAIL_FOR_SPAM_NEW_US + "2")
# page.change_sys_paran(auth_ad="True", dir_control="True")
#
# def test_registr1(self):
# page.change_sys_paran(auth_ad="True", dir_control="False")
# mail_num = page.old_letters_count(link=Links.MAIL_FOR_SPAM_NEW_US)
# page.registration(email=TestData.NEW_USER,
# name=TestData.NEW_USER_NAME,
# password=TestData.PASSWORD_USER_NORMAL,
# conf_password=TestData.PASSWORD_USER_NORMAL)
# page.go_to_account_activation(old_lett=mail_num, link=Links.MAIL_FOR_SPAM_NEW_US)
# page.change_sys_paran(auth_ad="True", dir_control="True")
# sleep(6)
| 44.51269
| 97
| 0.64192
| 1,054
| 8,769
| 4.883302
| 0.110057
| 0.044881
| 0.067612
| 0.146882
| 0.873907
| 0.83777
| 0.798329
| 0.771517
| 0.760637
| 0.723334
| 0
| 0.001099
| 0.273691
| 8,769
| 197
| 98
| 44.51269
| 0.807034
| 0.138442
| 0
| 0.653333
| 0
| 0
| 0.062384
| 0.026812
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0.286667
| 0.033333
| 0
| 0.173333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.